hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a00db21ab1a50fcc1218202d645edd09a25be1a
| 766
|
py
|
Python
|
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/images/tests/conftest.py
|
lisongshan007/azure-intelligent-edge-patterns
|
d95561d96625b4f0a8e69cc210149fe61c9f8b5c
|
[
"MIT"
] | null | null | null |
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/images/tests/conftest.py
|
lisongshan007/azure-intelligent-edge-patterns
|
d95561d96625b4f0a8e69cc210149fe61c9f8b5c
|
[
"MIT"
] | null | null | null |
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/images/tests/conftest.py
|
lisongshan007/azure-intelligent-edge-patterns
|
d95561d96625b4f0a8e69cc210149fe61c9f8b5c
|
[
"MIT"
] | null | null | null |
"""Conftest
"""
from unittest import mock
import pytest
from ...azure_projects.models import Project
from ...azure_settings.models import Setting
from ...cameras.models import Camera
@pytest.fixture(scope="function", autouse=True)
def mock_validate(monkeypatch):
monkeypatch.setattr(Setting, "validate", mock.MagicMock(return_value=True))
monkeypatch.setattr(
Setting, "get_domain_id", mock.MagicMock(return_value="Fake_id")
)
class FakeProject:
def __init__(self):
self.name = "Fake Project"
fake_project = FakeProject()
monkeypatch.setattr(
Project, "get_project_obj", mock.MagicMock(return_value=fake_project)
)
monkeypatch.setattr(Project, "validate", mock.MagicMock(return_value=True))
| 26.413793
| 79
| 0.721932
|
4a00dbb5e2505ac01f119da49238cff0fe9cd380
| 513
|
py
|
Python
|
gaussianize.py
|
frodre/LMR
|
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
|
[
"BSD-3-Clause"
] | 17
|
2018-08-27T18:50:36.000Z
|
2021-03-17T22:48:55.000Z
|
gaussianize.py
|
mingsongli/LMR
|
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
|
[
"BSD-3-Clause"
] | 5
|
2018-10-15T22:13:27.000Z
|
2019-04-26T11:45:58.000Z
|
gaussianize.py
|
mingsongli/LMR
|
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
|
[
"BSD-3-Clause"
] | 11
|
2018-10-11T19:35:34.000Z
|
2021-08-17T12:08:11.000Z
|
import numpy as np
from scipy import special
import copy
def gaussianize(X):
#n = X.shape[0]
n = X[~np.isnan(X)].shape[0] # This line counts only elements with data.
#Xn = np.empty((n,))
Xn = copy.deepcopy(X) # This line retains the data type of the original data variable.
Xn[:] = np.NAN
nz = np.logical_not(np.isnan(X))
index = np.argsort(X[nz])
rank = np.argsort(index)
CDF = 1.*(rank+1)/(1.*n) -1./(2*n)
Xn[nz] = np.sqrt(2)*special.erfinv(2*CDF -1)
return Xn
| 25.65
| 91
| 0.608187
|
4a00dcb4c499eb40e1236e17aa987f40f973e2ba
| 883
|
py
|
Python
|
zigzag_conversion/zigzag_conversion_test.py
|
kevinzen/learning
|
148129a1ec48e86e74c6ed244ba50ab682ebf00b
|
[
"MIT"
] | null | null | null |
zigzag_conversion/zigzag_conversion_test.py
|
kevinzen/learning
|
148129a1ec48e86e74c6ed244ba50ab682ebf00b
|
[
"MIT"
] | null | null | null |
zigzag_conversion/zigzag_conversion_test.py
|
kevinzen/learning
|
148129a1ec48e86e74c6ed244ba50ab682ebf00b
|
[
"MIT"
] | null | null | null |
import unittest
from zigzag_conversion.solution import Solution
class ZigzagConversionTest(unittest.TestCase):
def test_zigzag_conversion(self):
s = Solution()
tests = [
("ABCD", 2, "ACBD"),
("ABABABABABABABABA", 2, "AAAAAAAAABBBBBBBB"),
('PAYPALISHIRING', 3, 'PAHNAPLSIIGYIR'),
('acfimqnjgdbehkorpl', 6 ,'abcdefghijklmnopqr'),
('012345678901234567', 6, '001912823737464655'),
("", 1, ""),
("A", 1, "A"),
("AB", 1, "AB")
]
for test in tests:
input = test[0]
numRows = test[1]
expected = test[2]
actual = s.zigzag_conversion(input, numRows)
print("input = " + str(input) + ", actual = " + str(actual) + ", expected = " + str(expected))
self.assertEqual(expected, actual)
| 29.433333
| 106
| 0.524349
|
4a00ddcb1891ffb069866ff46d073623de4d9269
| 1,311
|
py
|
Python
|
webhook_trigger_service/basic/app_inputs.py
|
GShepherdTC/tcex-app-templates
|
fae927965563f98eed0bd7716afa3bf4d4fda3bf
|
[
"Apache-2.0"
] | 1
|
2022-02-23T16:04:16.000Z
|
2022-02-23T16:04:16.000Z
|
webhook_trigger_service/basic/app_inputs.py
|
GShepherdTC/tcex-app-templates
|
fae927965563f98eed0bd7716afa3bf4d4fda3bf
|
[
"Apache-2.0"
] | null | null | null |
webhook_trigger_service/basic/app_inputs.py
|
GShepherdTC/tcex-app-templates
|
fae927965563f98eed0bd7716afa3bf4d4fda3bf
|
[
"Apache-2.0"
] | 3
|
2022-02-16T18:13:58.000Z
|
2022-03-31T18:46:20.000Z
|
"""App Inputs"""
# standard library
from typing import List, Union
# third-party
from pydantic import BaseModel
from tcex.input.field_types import String
from tcex.input.models import CreateConfigModel
class ServiceConfigModel(BaseModel):
"""Base model for the App containing any common inputs.
Trigger Service App inputs do not take playbookDataType.
This is the configuration input that is sent to the Service
on startup. The inputs that are configured in the Service
configuration in the Platform.
"""
service_input: Union[List[String], String]
class TriggerConfigModel(CreateConfigModel):
"""Base model for Trigger (playbook) config.
Trigger Playbook inputs do not take playbookDataType.
This is the configuration input that gets sent to the service
when a Playbook is enabled (createConfig).
"""
playbook_input: String
class AppInputs:
"""App Inputs"""
def __init__(self, inputs: 'BaseModel') -> None:
"""Initialize class properties."""
self.inputs = inputs
# update with custom models and run validation
self.update_inputs()
def update_inputs(self) -> None:
"""Add custom App models to inputs. Validation will run at the same time."""
self.inputs.add_model(ServiceConfigModel)
| 26.755102
| 84
| 0.714722
|
4a00de3b774e8cb9b96e6fd91f4d387de3145d78
| 1,690
|
py
|
Python
|
policies/named_policies/ElspethV9.py
|
TeamJumpstart/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 10
|
2021-04-18T17:54:02.000Z
|
2021-07-26T19:58:41.000Z
|
policies/named_policies/ElspethV9.py
|
DiddiZ/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 1
|
2021-04-21T15:13:41.000Z
|
2021-04-21T15:13:41.000Z
|
policies/named_policies/ElspethV9.py
|
DiddiZ/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 1
|
2021-04-20T09:42:50.000Z
|
2021-04-20T09:42:50.000Z
|
from heuristics import (
CompositeHeuristic, OpponentDistanceHeuristic, PathLengthHeuristic, RandomProbingHeuristic, RegionHeuristic,
VoronoiHeuristic
)
from policies import HeuristicPolicy
# Variant of ElspethV7 with closing_iterations working
pol = HeuristicPolicy(
CompositeHeuristic(
[
# longest path search - longer path is always better
PathLengthHeuristic(20),
# prefers bigger regions - more space to fill cells and survive longer
RandomProbingHeuristic(
RegionHeuristic(closing_iterations=1),
n_steps=6,
n_probes=20,
),
RandomProbingHeuristic(
RegionHeuristic(),
n_steps=3,
n_probes=10,
),
# kill near opponents and minimize their regions
RandomProbingHeuristic(
CompositeHeuristic(
[
VoronoiHeuristic(max_steps=12, minimize_opponents=True),
RegionHeuristic(closing_iterations=1),
RegionHeuristic(),
OpponentDistanceHeuristic(dist_threshold=6)
]
),
n_steps=2,
n_probes=10,
),
# supports the endgame
VoronoiHeuristic(),
RandomProbingHeuristic(
RegionHeuristic(),
n_steps=1,
n_probes=1,
),
],
weights=[20, 5, 5, 4, 1, 1]
),
# defines how aggresive our policy is (bigger value - avoids enemys more)
occupancy_map_depth=3
)
| 33.8
| 112
| 0.540237
|
4a00dee2299b3547ef05740cc257a4567cd77250
| 4,573
|
py
|
Python
|
pytorch3d/implicitron/tools/utils.py
|
ParikhKadam/pytorch3d
|
5cd70067e2ba642d98fd36e8e31273366d1599cb
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pytorch3d/implicitron/tools/utils.py
|
ParikhKadam/pytorch3d
|
5cd70067e2ba642d98fd36e8e31273366d1599cb
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pytorch3d/implicitron/tools/utils.py
|
ParikhKadam/pytorch3d
|
5cd70067e2ba642d98fd36e8e31273366d1599cb
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import collections
import dataclasses
import time
from contextlib import contextmanager
from typing import Any, Callable, Dict
import torch
@contextmanager
def evaluating(net: torch.nn.Module):
"""Temporarily switch to evaluation mode."""
istrain = net.training
try:
net.eval()
yield net
finally:
if istrain:
net.train()
def try_to_cuda(t: Any) -> Any:
"""
Try to move the input variable `t` to a cuda device.
Args:
t: Input.
Returns:
t_cuda: `t` moved to a cuda device, if supported.
"""
try:
t = t.cuda()
except AttributeError:
pass
return t
def try_to_cpu(t: Any) -> Any:
"""
Try to move the input variable `t` to a cpu device.
Args:
t: Input.
Returns:
t_cpu: `t` moved to a cpu device, if supported.
"""
try:
t = t.cpu()
except AttributeError:
pass
return t
def dict_to_cuda(batch: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Move all values in a dictionary to cuda if supported.
Args:
batch: Input dict.
Returns:
batch_cuda: `batch` moved to a cuda device, if supported.
"""
return {k: try_to_cuda(v) for k, v in batch.items()}
def dict_to_cpu(batch):
"""
Move all values in a dictionary to cpu if supported.
Args:
batch: Input dict.
Returns:
batch_cpu: `batch` moved to a cpu device, if supported.
"""
return {k: try_to_cpu(v) for k, v in batch.items()}
def dataclass_to_cuda_(obj):
"""
Move all contents of a dataclass to cuda inplace if supported.
Args:
batch: Input dataclass.
Returns:
batch_cuda: `batch` moved to a cuda device, if supported.
"""
for f in dataclasses.fields(obj):
setattr(obj, f.name, try_to_cuda(getattr(obj, f.name)))
return obj
def dataclass_to_cpu_(obj):
"""
Move all contents of a dataclass to cpu inplace if supported.
Args:
batch: Input dataclass.
Returns:
batch_cuda: `batch` moved to a cpu device, if supported.
"""
for f in dataclasses.fields(obj):
setattr(obj, f.name, try_to_cpu(getattr(obj, f.name)))
return obj
# TODO: test it
def cat_dataclass(batch, tensor_collator: Callable):
"""
Concatenate all fields of a list of dataclasses `batch` to a single
dataclass object using `tensor_collator`.
Args:
batch: Input list of dataclasses.
Returns:
concatenated_batch: All elements of `batch` concatenated to a single
dataclass object.
tensor_collator: The function used to concatenate tensor fields.
"""
elem = batch[0]
collated = {}
for f in dataclasses.fields(elem):
elem_f = getattr(elem, f.name)
if elem_f is None:
collated[f.name] = None
elif torch.is_tensor(elem_f):
collated[f.name] = tensor_collator([getattr(e, f.name) for e in batch])
elif dataclasses.is_dataclass(elem_f):
collated[f.name] = cat_dataclass(
[getattr(e, f.name) for e in batch], tensor_collator
)
elif isinstance(elem_f, collections.abc.Mapping):
collated[f.name] = {
k: tensor_collator([getattr(e, f.name)[k] for e in batch])
if elem_f[k] is not None
else None
for k in elem_f
}
else:
raise ValueError("Unsupported field type for concatenation")
return type(elem)(**collated)
def setattr_if_hasattr(obj, name, value):
"""
Same as setattr(obj, name, value), but does nothing in case `name` is
not an attribe of `obj`.
"""
if hasattr(obj, name):
setattr(obj, name, value)
class Timer:
"""
A simple class for timing execution.
Example:
```
with Timer():
print("This print statement is timed.")
```
"""
def __init__(self, name="timer", quiet=False):
self.name = name
self.quiet = quiet
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
if not self.quiet:
print("%20s: %1.6f sec" % (self.name, self.interval))
| 23.694301
| 83
| 0.598513
|
4a00df057c42a539e5b9d7f74cc1ff61a679d53b
| 6,701
|
py
|
Python
|
piquery/piq_feature.py
|
JaradC42/pyapi
|
80b9c4419251343603c84b8054413bab013083a8
|
[
"MIT"
] | 1
|
2019-10-18T00:55:10.000Z
|
2019-10-18T00:55:10.000Z
|
piquery/piq_feature.py
|
JaradC42/pyapi
|
80b9c4419251343603c84b8054413bab013083a8
|
[
"MIT"
] | 65
|
2019-10-21T07:16:51.000Z
|
2021-07-26T05:34:41.000Z
|
piquery/piq_feature.py
|
JaradC42/pyapi
|
80b9c4419251343603c84b8054413bab013083a8
|
[
"MIT"
] | 1
|
2019-10-17T22:13:01.000Z
|
2019-10-17T22:13:01.000Z
|
import cv2 as cv
import numpy as np
import pandas as pd
import os
def parseKeypoints(kp):
return [{'center': p.pt,
'diameter': p.size,
'angle': p.angle,
'class_id': p.class_id,
'octave': p.octave,
'response': p.response} for p in kp]
def kpdfsort(kp):
return pd.DataFrame(parseKeypoints(kp)).sort_values(by=['response'], ascending=False)[['center', 'diameter', 'angle', 'response']]
def orbparams(orb):
params = dict()
params['DefaultName'] = orb.getDefaultName()
params['EdgeThreshold'] = orb.getEdgeThreshold()
params['FastThreshold'] = orb.getFastThreshold()
params['FirstLevel'] = orb.getFirstLevel()
params['MaxFeatures'] = orb.getMaxFeatures()
params['NLevels'] = orb.getNLevels()
params['PatchSize'] = orb.getPatchSize()
params['ScaleFactor'] = orb.getScaleFactor()
params['ScoreType'] = orb.getScoreType()
params['WTA_K'] = orb.getWTA_K()
return params
def byte2hex(bt):
hx = hex(bt).split('x')[1]
if bt < 16:
return '0' + hx
return hx
# 解决中文路径问题
def cv_imread(file_path):
root_dir, file_name = os.path.split(file_path)
pwd = os.getcwd()
if root_dir:
os.chdir(root_dir)
cv_img = cv.imread(file_name)
os.chdir(pwd)
return cv_img
class ImFeature:
def __init__(self, alg=None, k=500):
if alg == 'sift':
self.algf = cv.xfeatures2d.SIFT_create()
elif alg == 'surf':
self.algf = cv.xfeatures2d.SURF_create()
else:
self.algf = cv.ORB_create(k)
self.alg = alg
self.matcher = None
self.flann_matcher = None
self.store = dict()
def read(self, img_path):
# if not img_path in self.store:
# store = self.store
# store[img_path] = dict()
bgr = cv_imread(img_path)
gray= cv.cvtColor(bgr, cv.COLOR_BGR2GRAY)
# store[img_path]['bgr'] = bgr
# store[img_path]['gray'] = gray
return bgr, gray
def keypoint(self, im):
if isinstance(im, str):
bgr, gray = self.read(im)
return gray, self.algf.detect(gray, None)
elif isinstance(im, np.ndarray):
return im, self.algf.detect(im, None)
return None, None
def descriptor(self, img, kp):
return self.algf.compute(img, kp)
def fingerprint(self, descriptor):
return ''.join([''.join([byte2hex(d) for d in dps]) for dps in descriptor])
def feature(self, im):
if isinstance(im, str):
bgr, gray = self.read(im)
return self.algf.detectAndCompute(gray, None)
elif isinstance(im, np.ndarray):
return self.algf.detectAndCompute(im, None)
return None, None
def fastFeature(self, im):
bgr, gray = self.read(im)
fast = cv.FastFeatureDetector_create()
kp = fast.detect(gray, None)
return kp
def match(self, im1, im2, k=None):
kp1, des1 = self.feature(im1)
kp2, des2 = self.feature(im2)
alg = self.alg
if self.matcher is None:
if alg == 'sift':
self.matcher = cv.BFMatcher()
elif alg == 'surf':
self.matcher = cv.BFMatcher()
else:
self.matcher = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
if k is None:
return self.matcher.match(des1, des2)
else:
return self.matcher.knnMatch(des1,des2, k)
def flannMatch(self, im1, im2):
if isinstance(im1, str):
kp1, des1 = self.feature(im1)
kp2, des2 = self.feature(im2)
else:
des1 = im1
des2 = im2
alg = self.alg
if self.flann_matcher is None:
if alg == 'sift' or alg == 'surf':
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
self.flann_matcher = cv.FlannBasedMatcher(index_params,search_params)
else:
FLANN_INDEX_LSH = 6
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
search_params = dict(checks=50)
self.flann_matcher = cv.FlannBasedMatcher(index_params,search_params)
if alg == 'sift' or alg == 'surf':
return self.flann_matcher.knnMatch(des1,des2,k=2)
else:
return self.flann_matcher.match(des1, des2)
class CropImFeature(ImFeature):
def crop(self, img):
h, w = img.shape[:2]
if h > w:
offset = int((h - w)/2)
limit = offset + w
if limit > h:
limit = h
return img[offset:limit, :]
elif h < w:
offset = int((w - h)/2)
limit = offset + h
if limit > w:
limit = w
return img[:, offset:limit]
return img
def read(self, img_path, size=100):
bgr, gray = super().read(img_path)
return bgr, self.crop(gray)
class ResizeImFeature(CropImFeature):
def resize(self, img, size=128):
h, w = img.shape[:2]
resize_w = size
resize_h = int(h * resize_w/w)
return cv.resize(img, (resize_w, resize_h), interpolation = cv.INTER_CUBIC)
def read(self, img_path, size=100):
bgr, gray = super().read(img_path)
return bgr, self.resize(gray, size=size)
class ImSim:
def __init__(self, k=500, resize=False, crop=True):
self.k = k
if resize == True:
self.feature = ResizeImFeature(k=k)
elif crop == True:
self.feature = CropImFeature(k=k)
else:
self.feature = ImFeature(k=k)
def match(self, img1, img2):
matches = self.feature.flannMatch(img1, img2)
return sorted([match for match in matches if match.distance < 10], key=lambda x:x.distance)
def getFeature(self, img):
return self.feature.feature(img)
def calcSim(self, img1, img2):
k = self.k
if isinstance(img1, str):
kp1, des1 = self.feature.feature(img1)
kp2, des2 = self.feature.feature(img2)
if len(kp1) > len(kp2):
k = len(kp2)
else:
k = len(kp1)
matches = self.match(des1, des2)
else:
if len(img1) > len(img2):
k = len(img2)
else:
k = len(img1)
matches = self.match(img1, img2)
return len(matches)/k
| 34.541237
| 134
| 0.55335
|
4a00df2d783cf79f563acda01b609263f0582a85
| 15,579
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/arrays/categorical/test_operators.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 28,899
|
2016-10-13T03:32:12.000Z
|
2022-03-31T21:39:05.000Z
|
venv/Lib/site-packages/pandas/tests/arrays/categorical/test_operators.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 31,004
|
2016-10-12T23:22:27.000Z
|
2022-03-31T23:17:38.000Z
|
venv/Lib/site-packages/pandas/tests/arrays/categorical/test_operators.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 15,149
|
2016-10-13T03:21:31.000Z
|
2022-03-31T18:46:47.000Z
|
import operator
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalOpsWithFactor(TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == "a"]
expected = self.factor[np.asarray(self.factor) == "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != "a"]
expected = self.factor[np.asarray(self.factor) != "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < "c"]
expected = self.factor[np.asarray(self.factor) < "c"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > "a"]
expected = self.factor[np.asarray(self.factor) > "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= "b"]
expected = self.factor[np.asarray(self.factor) >= "b"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= "b"]
expected = self.factor[np.asarray(self.factor) <= "b"]
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == "d"
expected = np.zeros(len(self.factor), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True
)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"])
with pytest.raises(TypeError, match=msg):
cat_rev > cat_rev_base2
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
with pytest.raises(TypeError, match=msg):
cat > cat_unorderd
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
# check that zero-dim array gets unboxed
res = cat_rev > np.array("b")
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps:
def test_compare_frame(self):
# GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame
data = ["a", "b", 2, "a"]
cat = Categorical(data)
df = DataFrame(cat)
result = cat == df.T
expected = DataFrame([[True, True, True, True]])
tm.assert_frame_equal(result, expected)
result = cat[::-1] != df.T
expected = DataFrame([[False, True, True, False]])
tm.assert_frame_equal(result, expected)
def test_compare_frame_raises(self, all_compare_operators):
# alignment raises unless we transpose
op = getattr(operator, all_compare_operators)
cat = Categorical(["a", "b", 2, "a"])
df = DataFrame(cat)
msg = "Unable to coerce to Series, length must be 1: given 4"
with pytest.raises(ValueError, match=msg):
op(cat, df)
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
msg = "Invalid comparison between dtype=category and int"
with pytest.raises(TypeError, match=msg):
cat < 4
with pytest.raises(TypeError, match=msg):
cat > 4
with pytest.raises(TypeError, match=msg):
4 < cat
with pytest.raises(TypeError, match=msg):
4 > cat
tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))
def test_comparison_with_tuple(self):
cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object))
result = cat == "foo"
expected = np.array([True, False, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat == (0, 1)
expected = np.array([False, True, False, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat != (0, 1)
tm.assert_numpy_array_equal(result, ~expected)
def test_comparison_of_ordered_categorical_with_nan_to_scalar(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# BUG: fix ordered categorical comparison with missing values (#26504 )
# and following comparisons with scalars in categories with missing
# values should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
scalar = 2
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)
actual = getattr(cat, compare_operators_no_eq_ne)(scalar)
tm.assert_numpy_array_equal(actual, expected)
def test_comparison_of_ordered_categorical_with_nan_to_listlike(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# and following comparisons of missing values in ordered Categorical
# with listlike should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
actual = getattr(cat, compare_operators_no_eq_ne)(other)
tm.assert_numpy_array_equal(actual, expected)
@pytest.mark.parametrize(
"data,reverse,base",
[(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True)
)
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
with pytest.raises(TypeError, match=msg):
a < cat
with pytest.raises(TypeError, match=msg):
a < cat_rev
@pytest.mark.parametrize(
"ctor",
[
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
],
)
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
assert (c1 == c2).all()
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "a"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "b"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False)
c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False)
with pytest.raises(TypeError, match=("Categoricals can only be compared")):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=["a", "b"])
c2 = Categorical([], categories=["a"])
msg = "Categoricals can only be compared if 'categories' are the same."
with pytest.raises(TypeError, match=msg):
c1 == c2
def test_compare_unordered_different_order(self):
# https://github.com/pandas-dev/pandas/issues/16603#issuecomment-
# 349290078
a = Categorical(["a"], categories=["a", "b"])
b = Categorical(["b"], categories=["b", "a"])
assert not a.equals(b)
def test_numeric_like_ops(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
# numeric ops should not succeed
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df["value_group"]
for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:
msg = f"'Categorical' does not implement reduction '{op}'"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
# mad technically works because it takes always the numeric data
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
with pytest.raises(
TypeError, match="'Categorical' does not implement reduction 'sum'"
):
np.sum(s)
# numeric ops on a Series
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
# invalid ufunc
msg = "Object with dtype category cannot perform the numpy op log"
with pytest.raises(TypeError, match=msg):
np.log(s)
| 38.657568
| 88
| 0.601451
|
4a00e01088bb0b81dfa997df094fe47ca1c5a0de
| 2,975
|
py
|
Python
|
tests/test_country_flags_unused.py
|
Pelmen323/Kaiserreich_Autotests
|
3ad60ae09bb6d15922c5f581c0b1d80c3e8f08de
|
[
"MIT"
] | null | null | null |
tests/test_country_flags_unused.py
|
Pelmen323/Kaiserreich_Autotests
|
3ad60ae09bb6d15922c5f581c0b1d80c3e8f08de
|
[
"MIT"
] | null | null | null |
tests/test_country_flags_unused.py
|
Pelmen323/Kaiserreich_Autotests
|
3ad60ae09bb6d15922c5f581c0b1d80c3e8f08de
|
[
"MIT"
] | null | null | null |
##########################
# Test script to check for unused country flags
# If flag is not used via "has_country_flag" at least once, it will appear in test results
# By Pelmen, https://github.com/Pelmen323
##########################
import glob
import re
import os
from ..test_classes.generic_test_class import FileOpener, DataCleaner, ResultsReporter
import logging
FALSE_POSITIVES = ('is_han_chinese_tag', # Currently unused flags
'is_non_han_chinese_tag',
'saf_antagonise_maf',
'saf_antagonise_nmb')
def test_check_unused_country_flags(test_runner: object):
filepath = test_runner.full_path_to_mod
country_flags = {}
paths = {}
# Part 1 - get the dict of entities
for filename in glob.iglob(filepath + '**/*.txt', recursive=True):
text_file = FileOpener.open_text_file(filename)
if 'set_country_flag =' in text_file:
pattern_matches = re.findall("set_country_flag = \\b[\\w']*\\b", text_file)
if len(pattern_matches) > 0:
for match in pattern_matches:
match = match[19:].strip()
country_flags[match] = 0
paths[match] = os.path.basename(filename)
pattern_matches2 = re.findall("set_country_flag = \\{ flag = \\b[\\w']*\\b", text_file)
if len(pattern_matches2) > 0:
for match in pattern_matches2:
match = match[27:].strip()
country_flags[match] = 0
paths[match] = os.path.basename(filename)
# Part 2 - clear false positives and flags with variables:
country_flags = DataCleaner.clear_false_positives(input_iter=country_flags, false_positives=FALSE_POSITIVES)
# Part 3 - count the number of entity occurrences
logging.debug(f'{len(country_flags)} set country flags found')
for filename in glob.iglob(filepath + '**/*.txt', recursive=True):
text_file = FileOpener.open_text_file(filename)
not_encountered_flags = [i for i in country_flags.keys() if country_flags[i] == 0]
if 'has_country_flag =' in text_file:
for flag in not_encountered_flags:
if flag in text_file:
country_flags[flag] += text_file.count(f'has_country_flag = {flag}')
country_flags[flag] += text_file.count(f'has_country_flag = {{ flag = {flag}')
if country_flags[flag] == 0: # Performance optimization
pattern = 'has_country_flag = \\{\\n\\t*flag = ' + flag
country_flags[flag] += len(re.findall(pattern, text_file))
# Part 4 - throw the error if entity is not used
results = [i for i in country_flags if country_flags[i] == 0]
ResultsReporter.report_results(results=results, paths=paths, message="Unused country flags were encountered - they are not used via 'has_country_flag' at least once. Check console output")
| 47.983871
| 192
| 0.627563
|
4a00e0a0cc7e6870328d694b16f1a5cbd3c03121
| 6,018
|
py
|
Python
|
src/visualization/draw_patterns.py
|
chrhenning/snn_global_pattern_induction
|
3ee4037a4568c393378eaec74483696d5281f376
|
[
"Apache-2.0"
] | 9
|
2018-06-21T14:16:12.000Z
|
2021-06-22T15:52:35.000Z
|
src/visualization/draw_patterns.py
|
Arthas1121/snn_global_pattern_induction
|
3ee4037a4568c393378eaec74483696d5281f376
|
[
"Apache-2.0"
] | null | null | null |
src/visualization/draw_patterns.py
|
Arthas1121/snn_global_pattern_induction
|
3ee4037a4568c393378eaec74483696d5281f376
|
[
"Apache-2.0"
] | 4
|
2018-05-08T13:51:09.000Z
|
2019-04-11T13:56:06.000Z
|
#!/usr/bin/env python3
# Copyright 2017 Christian Henning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
@title :visualization/draw_patterns.py
@author :ch
@contact :christian@ini.ethz.ch
@created :04/05/2017
@version :1.0
@python_version :3.5.2
Visualizing a PatternGeneration instance.
This module can be used to investigate the different patterns, generated by the
class PatternGeneration. To evaluate their quality the module computes and
plots several measures additional to the plotted pattern activation per neuron
for all classes.
"""
import configuration as config
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import numpy as np
import os
import math
from util import lin_alg as la
import logging
logger = logging.getLogger(config.logging_name)
def draw_patterns(network, pattern):
"""Draw the pattern for each output class in a different plot. The
activation per neuron is color coded. Only excitatory neurons are drawn, as
the patterns are only going to influence those.
Args:
network: An instance of the class NetworkModel.
pattern: An instance of the class PatternGeneration.
Returns:
"""
w = network.num_layers
# Maximum number of neurons per layer.
max_n = max([network.layer_size(i) for i in range(w)])
# A scaling factor, to scale the whole plot to a higher resolution (outside
# the box 0..1).
# Do not change! Not implemented.
scaling = 1.0
# Width of a single layer.
layer_width = 1./w * scaling
# The maximum height a single neuron can demand.
neuron_height = 1./max_n * scaling
# Circle radius (radius of a single neuron).
radius = 0.8 * min(layer_width, .5*neuron_height)
num_plots = pattern.num_patterns
plot_rows = int(math.floor(math.sqrt(num_plots)))
plot_cols = int(math.ceil(math.sqrt(num_plots)))
plt.close('all')
fig, axes = plt.subplots(plot_rows,plot_cols)
plt.suptitle('Pattern: %s' % config.pattern_type)
fig.subplots_adjust(hspace=0.4)
for i in range(num_plots):
r = i // plot_cols
c = i % plot_cols
axes[r,c].set_title('Output class %d' % i)
axes[r,c].get_xaxis().set_visible(False)
axes[r,c].get_yaxis().set_visible(False)
for i in range(num_plots, plot_rows*plot_cols):
r = i // plot_cols
c = i % plot_cols
axes[r,c].set_axis_off()
# Store position of each neuron (index: layer, neuron).
positions = dict()
### Draw neurons of each layer.
# The circle should be centered within a box of size layer_width x
# neuron_height. Therefore, we need the offsets within this box.
cox = layer_width / 2.
coy = neuron_height / 2.
cmap = cm.get_cmap('seismic')
color = lambda x : cmap(x/2. + .5)
for i in range(num_plots):
r = i // plot_cols
c = i % plot_cols
ax = axes[r,c]
x_offset = 0
for l in range(w):
nn = network.layer_size(l)
y_offset = (scaling - nn * neuron_height) / 2.
if l > 0:
pat = pattern.get_pattern(l,i)
else:
pat = np.zeros(nn)
for n in range(nn):
ind = (l, n)
positions[ind] = (x_offset+cox, y_offset+coy)
ax.add_patch(patches.Circle(positions[ind], radius,
facecolor=color(pat[n]),
edgecolor='k'))
y_offset += neuron_height
x_offset += layer_width
# Setup colorbar as legend.
normalizer = mcolors.Normalize(vmin=-1, vmax=1)
cbar = cm.ScalarMappable(norm=normalizer, cmap=cmap)
cbar_values = np.linspace(-1,1,3)
cbar.set_array(cbar_values)
# Adjust figure, just that colorbar has enough space on the left.
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
plt.colorbar(cbar, cax=cbar_ax)
# Add textbox with additional information.
fig.subplots_adjust(top=0.8)
text_ax = fig.add_axes([0.0, 0.0, 1.0, 0.1])
text_ax.get_xaxis().set_visible(False)
text_ax.get_yaxis().set_visible(False)
orth_msg = 'Orthogonality [layer = mean (std. dev.)]: '
eucl_msg = 'Euclidean Distance [layer = mean (std. dev.)]: '
spars_msg = 'Sparsity (rel. percentage) [layer = mean (std. dev.)]: '
for l in range(1, network.num_layers-1):
pat = pattern.get_pattern(l)
m, sd = la.check_orthogonality(pat)
orth_msg += '%d = %g (%g), ' % (l, m, sd)
m, sd = la.check_euclidean_distances(pat)
eucl_msg += '%d = %g (%g), ' % (l, m, sd)
m, sd = la.evaluate_sparsity(pat)
spars_msg += '%d = %g (%g), ' % (l, m, sd)
msg = orth_msg + '\n' + eucl_msg + '\n' + spars_msg
text_ax.text(0.5, 0.5, msg, horizontalalignment='center',
verticalalignment='center')
# FIXME depends on the backend
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
if config.save_pattern_plot:
# Check if directory of plot already exists.
fn = config.patterns_plot_filename
if not os.path.isdir(os.path.dirname(fn)):
os.mkdir(os.path.dirname(fn))
plt.savefig(fn, format='svg')
if config.plot_patterns:
plt.show()
if __name__ == '__main__':
pass
| 31.673684
| 79
| 0.636756
|
4a00e0dda512b3211fe99784417d80db8643ecd2
| 11,666
|
py
|
Python
|
vrchatapi/model/info_push_data_clickable.py
|
vrchatapi/vrchatapi-python
|
afe5ec9fda298723e7408358473aafe343e27d18
|
[
"MIT"
] | 8
|
2021-08-25T02:35:30.000Z
|
2022-03-28T18:11:58.000Z
|
vrchatapi/model/info_push_data_clickable.py
|
vrchatapi/vrchatapi-python
|
afe5ec9fda298723e7408358473aafe343e27d18
|
[
"MIT"
] | 1
|
2022-03-18T20:29:30.000Z
|
2022-03-18T20:35:05.000Z
|
vrchatapi/model/info_push_data_clickable.py
|
vrchatapi/vrchatapi-python
|
afe5ec9fda298723e7408358473aafe343e27d18
|
[
"MIT"
] | 1
|
2022-01-11T10:49:12.000Z
|
2022-01-11T10:49:12.000Z
|
"""
VRChat API Documentation
The version of the OpenAPI document: 1.6.8
Contact: me@ruby.js.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from vrchatapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from vrchatapi.exceptions import ApiAttributeError
class InfoPushDataClickable(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('command',): {
'OPENURL': "OpenURL",
'OPENVRCPLUSMENU': "OpenVRCPlusMenu",
'OPENSAFETYMENU': "OpenSafetyMenu",
'CANNEDWORLDSEARCH': "CannedWorldSearch",
},
}
validations = {
('command',): {
'min_length': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'command': (str,), # noqa: E501
'parameters': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'command': 'command', # noqa: E501
'parameters': 'parameters', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, command, *args, **kwargs): # noqa: E501
"""InfoPushDataClickable - a model defined in OpenAPI
Args:
command (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
parameters ([str]): In case of OpenURL, this would contain the link.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.command = command
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, command, *args, **kwargs): # noqa: E501
"""InfoPushDataClickable - a model defined in OpenAPI
Args:
command (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
parameters ([str]): In case of OpenURL, this would contain the link.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.command = command
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.421818
| 121
| 0.566347
|
4a00e1294430918de4e285e4c00374ebdb2da252
| 7,014
|
py
|
Python
|
assignments/assignment-6/src/GOT_classification.py
|
PeterThramkrongart/cds-language-portfolio
|
9af745bdc42ed6b13121d2ef0382bcf7b888fbb3
|
[
"MIT"
] | 1
|
2021-05-20T07:39:44.000Z
|
2021-05-20T07:39:44.000Z
|
assignments/assignment-6/src/GOT_classification.py
|
PeterThramkrongart/cds-language-portfolio
|
9af745bdc42ed6b13121d2ef0382bcf7b888fbb3
|
[
"MIT"
] | null | null | null |
assignments/assignment-6/src/GOT_classification.py
|
PeterThramkrongart/cds-language-portfolio
|
9af745bdc42ed6b13121d2ef0382bcf7b888fbb3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# system tools
import os
import sys
sys.path.append(os.path.join(".."))
# pandas, numpy
import pandas as pd
import numpy as np
# import my classifier utility functions
import utils.classifier_utils as clf
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
# Machine learning stuff from sklearn
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
# tools from tensorflow
import tensorflow as tf
from tensorflow.random import set_seed
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense,
Dropout,
BatchNormalization,
)
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import backend as K
from tensorflow.keras.utils import plot_model
from tensorflow.keras.regularizers import L2
# matplotlib
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("Agg")
random_state = 42
#set seed for reproducibility
set_seed(random_state)
np.random.seed(random_state)
def plot_history(H, epochs):
"""
Utility function for plotting model history using matplotlib
H: model history
epochs: number of epochs for which the model was trained
"""
plt.style.use("fivethirtyeight")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.tight_layout()
plt.draw()
plt.savefig(os.path.join("..","models", "nn_training_history.png"))
def main():
"""
A function for running text classification of GoT texts from the terminal
"""
# loading data
data = pd.read_csv(os.path.join("..", "data", "raw","Game_of_Thrones_Script.csv"))
# gathering all lines from a given character by a seson an episode to context and model's accuracy
data = data.groupby(["Season", "Episode", "Name"])
data = data["Sentence"].agg(lambda x: " ".join(x)).to_frame()
data = data.reset_index().rename(columns ={"Sentence": "Text"}) #resetting index
# train and test split using sklearn
X_train, X_test, y_train, y_test = train_test_split(data.Text,
data["Season"],
test_size=0.1,
random_state=random_state)
print("Data loaded and split")
### a baseline model of a logistic regresssion ###
print("fitting baseline LogReg model")
pipe = Pipeline(steps=[
('tfidf', TfidfVectorizer()),
('clf', LogisticRegression(solver = "liblinear",random_state = random_state))
])
# report model metrict
classifier = pipe.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier_metrics_lr = metrics.classification_report(y_test, y_pred)
print(classifier_metrics_lr)
# save the classification report
filepath = os.path.join("..","models","LG_metrics.txt")
text_file = open(filepath, "w")
text_file.write(classifier_metrics_lr)
text_file.close()
### Building network ###
# integers to one-hot vectors
lb = LabelBinarizer()
y_train_bin = lb.fit_transform(y_train)
y_test_bin = lb.fit_transform(y_test)
# the nn will have a vocabulary size of 15000
maxlen = 15000
vectorizer = TfidfVectorizer(ngram_range=(1,2), max_features = maxlen)
X_train_feats = vectorizer.fit_transform(X_train).toarray()
X_test_feats = vectorizer.transform(X_test).toarray()
# l2 regularization
l2 = L2(0.00001)
# a new neural network
model = Sequential()
model.add(Dense(64, activation='relu', kernel_regularizer=l2,input_shape=(maxlen,)))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(8, activation='softmax'))
# compiler
model.compile(loss='categorical_crossentropy',
optimizer= SGD(learning_rate= .01),
metrics=['accuracy'])
epochs = 10
print(model.summary())
achitecture_path = os.path.join("..","models","nn_model_architecture.png")
#plot model
plot_model(model, to_file = achitecture_path, show_shapes=True, show_layer_names=True)
print(f"Image of model architecture saved in {achitecture_path}")
print("fitting nn-model")
# a fit history of the network
history = model.fit(X_train_feats, y_train_bin,
epochs=epochs,
verbose=True,
validation_data=(X_test_feats, y_test_bin))
# plot history
plot_history(history, epochs = epochs)
predictions=model.predict(X_test_feats, verbose=True)
# get the class with highest probability for each sample
y_pred = np.argmax(predictions, axis=1)
le = LabelEncoder()
y_test_int = le.fit_transform(y_test) #encode labels for the classification report
# get the classification report
metrics_nn = metrics.classification_report(y_test_int, y_pred, target_names = y_test.sort_values().unique())
print(metrics_nn)
# save metrics
filepath = os.path.join("..","models","NN_metrics.txt")
text_file = open(filepath, "w")
text_file.write(metrics_nn)
text_file.close()
print("We will now use grid search and crossvalidation to find a better model using an SGD-classifier")
# Grid Search for SGD Classifier (stochastic gradient classifier)
## making a pipeline where we use two embedding methods to find out the best one
pipe = Pipeline(steps=[
('tfidf', TfidfVectorizer()),
('clf', SGDClassifier(random_state = random_state))
])
## specifying
parameters = {
'tfidf__ngram_range': [(1, 1), (1, 2),(1,3)],
'tfidf__max_df': [1.0, 0.95,0.9,0.85],
'tfidf__min_df': [0.0, 0.05],
'clf__alpha': [1e-3, 1e-2, 1e-1], # learning rate
'clf__penalty': ['l2'],
}
search = GridSearchCV(pipe, parameters, n_jobs = -1, verbose = 1, refit = True)
gs_clf = search.fit(X_train, y_train)
print(f"The best{gs_clf.best_score_}")
print(f"The best model hyper parameters: {gs_clf.best_params_}")
y_pred = gs_clf.predict(X_test)
classifier_metrics_sgd = metrics.classification_report(y_test, y_pred)
print(classifier_metrics_sgd)
# get the classification report
filepath = os.path.join("..","models", "SGD_metrics.txt")
text_file = open(filepath, "w")
text_file.write(classifier_metrics_sgd)
text_file.close()
if __name__=="__main__":
main()
| 32.174312
| 111
| 0.689621
|
4a00e158d69333c8c3b60948551b2d35dff83e34
| 1,038
|
py
|
Python
|
kubernetes/test/test_v1_weighted_pod_affinity_term.py
|
jashandeep-sohi/kubernetes-python
|
e057f273069de445a2d5a250ac5fe37d79671f3b
|
[
"Apache-2.0"
] | 1
|
2020-05-08T12:41:04.000Z
|
2020-05-08T12:41:04.000Z
|
kubernetes/test/test_v1_weighted_pod_affinity_term.py
|
jashandeep-sohi/kubernetes-python
|
e057f273069de445a2d5a250ac5fe37d79671f3b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_weighted_pod_affinity_term.py
|
jashandeep-sohi/kubernetes-python
|
e057f273069de445a2d5a250ac5fe37d79671f3b
|
[
"Apache-2.0"
] | 2
|
2021-07-09T08:49:05.000Z
|
2021-08-03T18:08:36.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_weighted_pod_affinity_term import V1WeightedPodAffinityTerm
class TestV1WeightedPodAffinityTerm(unittest.TestCase):
""" V1WeightedPodAffinityTerm unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1WeightedPodAffinityTerm(self):
"""
Test V1WeightedPodAffinityTerm
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_weighted_pod_affinity_term.V1WeightedPodAffinityTerm()
pass
if __name__ == '__main__':
unittest.main()
| 23.066667
| 105
| 0.73025
|
4a00e3aac4d18ead908d5ca44b928b6205df6c84
| 1,349
|
py
|
Python
|
sampling_methods/sampling_def.py
|
sarmadt/active-learning
|
e8c3ad15d8960a90debb7334ade236d914b12556
|
[
"Apache-2.0"
] | null | null | null |
sampling_methods/sampling_def.py
|
sarmadt/active-learning
|
e8c3ad15d8960a90debb7334ade236d914b12556
|
[
"Apache-2.0"
] | null | null | null |
sampling_methods/sampling_def.py
|
sarmadt/active-learning
|
e8c3ad15d8960a90debb7334ade236d914b12556
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for sampling methods.
Provides interface to sampling methods that allow same signature
for select_batch. Each subclass implements select_batch_ with the desired
signature for readability.
"""
import abc
import numpy as np
class SamplingMethod(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def __init__(self, X, y, seed, **kwargs):
self.X = X
self.y = y
self.seed = seed
def flatten_X(self):
shape = self.X.shape
flat_X = self.X
if len(shape) > 2:
flat_X = np.reshape(self.X, (shape[0],np.product(shape[1:])))
return flat_X
@abc.abstractmethod
def select_batch_(self):
return
def select_batch(self, **kwargs):
return self.select_batch_(**kwargs)
def to_dict(self):
return None
| 25.942308
| 74
| 0.722758
|
4a00e4eb7f20c912003179858cb4ea2b28aa25e8
| 3,099
|
py
|
Python
|
src/pyobo/sources/biogrid.py
|
ddomingof/pyobo
|
057a76e729fe063d8cb58ddb38076e19c4336725
|
[
"MIT"
] | null | null | null |
src/pyobo/sources/biogrid.py
|
ddomingof/pyobo
|
057a76e729fe063d8cb58ddb38076e19c4336725
|
[
"MIT"
] | null | null | null |
src/pyobo/sources/biogrid.py
|
ddomingof/pyobo
|
057a76e729fe063d8cb58ddb38076e19c4336725
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Extract and convert BioGRID identifiers."""
from typing import Mapping
import pandas as pd
from pyobo.cache_utils import cached_mapping
from pyobo.extract import get_name_id_mapping
from pyobo.path_utils import ensure_df, prefix_directory_join
PREFIX = 'biogrid'
VERSION = '3.5.186'
BASE_URL = 'https://downloads.thebiogrid.org/Download/BioGRID/Release-Archive'
URL = f'{BASE_URL}/BIOGRID-{VERSION}/BIOGRID-IDENTIFIERS-{VERSION}.tab.zip'
taxonomy_remapping = { # so much for official names
"Canis familiaris": "9615", # Canis lupus familiaris
"Human Herpesvirus 1": "10298", # Human alphaherpesvirus 1
"Human Herpesvirus 3": "10335", # Human alphaherpesvirus 3
"Murid Herpesvirus 1": "10366", # Murid betaherpesvirus 1
"Human Herpesvirus 4": "10376", # Human gammaherpesvirus 4
"Hepatitus C Virus": "11103", # Hepacivirus C
"Human Immunodeficiency Virus 1": "11676", # Human immunodeficiency virus 1
"Human Immunodeficiency Virus 2": "11709", # Human immunodeficiency virus 2
"Human Herpesvirus 2": "10310", # Human alphaherpesvirus 2
"Human Herpesvirus 5": "10359", # Human betaherpesvirus 5
"Human Herpesvirus 6A": "32603", # Human betaherpesvirus 6A
"Human Herpesvirus 6B": "32604", # Human betaherpesvirus 6B
"Human Herpesvirus 7": "10372", # Human betaherpesvirus 7
"Human Herpesvirus 8": "37296", # Human gammaherpesvirus 8
"Emericella nidulans": "162425", # Aspergillus nidulans
"Bassica campestris": "145471", # Brassica rapa subsp. oleifera (was a typo)
"Tarsius syrichta": "1868482", # Carlito syrichta
"Felis Catus": "9685", # Felis catus
"Vaccinia Virus": "10245", # Vaccinia virus
"Simian Virus 40": "1891767", # Macaca mulatta polyomavirus 1
"Simian Immunodeficiency Virus": "11723", # Simian immunodeficiency virus
"Tobacco Mosaic Virus": "12242", # Tobacco mosaic virus
# Not in my current dump, but definitely there!
"Severe acute respiratory syndrome coronavirus 2": "2697049", # Severe acute respiratory syndrome coronavirus 2
'Middle-East Respiratory Syndrome-related Coronavirus': '1335626',
}
def _lookup(name):
if name in taxonomy_remapping:
return taxonomy_remapping[name]
return get_name_id_mapping('ncbitaxon')[name]
def get_df() -> pd.DataFrame:
"""Get the BioGRID identifiers mapping dataframe."""
df = ensure_df(PREFIX, URL, skiprows=28, dtype=str)
df['taxonomy_id'] = df['ORGANISM_OFFICIAL_NAME'].map(_lookup)
return df
@cached_mapping(
path=prefix_directory_join(PREFIX, 'cache', 'xrefs', 'ncbigene.tsv'),
header=['biogrid_id', 'ncbigene_id'],
)
def get_ncbigene_mapping() -> Mapping[str, str]:
"""Get BioGRID to NCBIGENE mapping.
Is basically equivalent to:
.. code-block:: python
from pyobo import get_filtered_xrefs
biogrid_ncbigene_mapping = get_filtered_xrefs('biogrid', 'ncbigene')
"""
df = get_df()
df = df.loc[df['IDENTIFIER_TYPE'] == 'ENTREZ_GENE', ['BIOGRID_ID', 'IDENTIFIER_VALUE']]
return dict(df.values)
| 39.730769
| 116
| 0.704098
|
4a00e5fc6979b93e7a097cfd1a2d33ba3df1ab07
| 484
|
py
|
Python
|
data/data_loader.py
|
sthadas/Perceptual_similarity
|
5d21660e66e1ea31d72462db35aa4a8c850d6016
|
[
"BSD-2-Clause"
] | 1
|
2018-10-03T13:32:09.000Z
|
2018-10-03T13:32:09.000Z
|
data/data_loader.py
|
sthadas/Perceptual_similarity
|
5d21660e66e1ea31d72462db35aa4a8c850d6016
|
[
"BSD-2-Clause"
] | null | null | null |
data/data_loader.py
|
sthadas/Perceptual_similarity
|
5d21660e66e1ea31d72462db35aa4a8c850d6016
|
[
"BSD-2-Clause"
] | null | null | null |
def CreateDataLoader(datafolder,dataroot='./scripts/dataset',dataset_mode='2afc',load_size=64,batch_size=1,serial_batches=True):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
# print(data_loader.name())
data_loader.initialize(datafolder,dataroot=dataroot+'/'+dataset_mode,dataset_mode=dataset_mode,load_size=load_size,batch_size=batch_size,serial_batches=serial_batches, nThreads=1)
return data_loader
| 69.142857
| 183
| 0.82438
|
4a00e821dc026bd8158348004682853b4db7283b
| 2,884
|
py
|
Python
|
homeschool/schools/models.py
|
sabrisay/homeschool
|
a48cbb8483627e7f74af518397f591fb73abe709
|
[
"MIT"
] | null | null | null |
homeschool/schools/models.py
|
sabrisay/homeschool
|
a48cbb8483627e7f74af518397f591fb73abe709
|
[
"MIT"
] | null | null | null |
homeschool/schools/models.py
|
sabrisay/homeschool
|
a48cbb8483627e7f74af518397f591fb73abe709
|
[
"MIT"
] | null | null | null |
import uuid
from typing import Optional
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from homeschool.core.models import DaysOfWeekModel
from homeschool.users.models import User
class School(models.Model):
"""A school to hold students"""
admin = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
help_text="The school administrator",
)
def get_current_grade_levels(self):
"""Get the grade levels for the current school year."""
today = timezone.localdate()
school_year = SchoolYear.objects.filter(
school=self, start_date__lte=today, end_date__gte=today
).first()
if school_year:
return GradeLevel.objects.filter(school_year=school_year)
return GradeLevel.objects.none()
@receiver(post_save, sender=User)
def create_school(sender, instance, created, **kwargs):
"""A new user gets an associated school."""
if created:
School.objects.create(admin=instance)
class SchoolYear(DaysOfWeekModel):
"""A school year to bound start and end dates of the academic year"""
school = models.ForeignKey("schools.School", on_delete=models.CASCADE)
start_date = models.DateField()
end_date = models.DateField()
uuid = models.UUIDField(default=uuid.uuid4, db_index=True)
@classmethod
def get_current_year_for(cls, user: User) -> Optional["SchoolYear"]:
"""Get a current school year for the user.
Pick the current school if it's available.
If not, look ahead to the future so a user can plan
appropriately for the new school year.
"""
today = user.get_local_today()
school_year = SchoolYear.objects.filter(
school__admin=user, start_date__lte=today, end_date__gte=today
).first()
# Look for a future school year if there is no current one.
# This is for new users who may be building their school year
# for the first time.
if not school_year:
school_year = SchoolYear.objects.filter(
school__admin=user, start_date__gt=today
).first()
return school_year
def __str__(self):
if self.start_date.year == self.end_date.year:
return str(self.start_date.year)
return f"{self.start_date.year}–{self.end_date.year}"
class GradeLevel(models.Model):
"""A student is in a grade level in a given school year"""
name = models.CharField(max_length=128)
school_year = models.ForeignKey(
"schools.SchoolYear", on_delete=models.CASCADE, related_name="grade_levels"
)
uuid = models.UUIDField(default=uuid.uuid4, db_index=True)
def __str__(self):
return self.name
| 32.404494
| 83
| 0.682039
|
4a00e83251dc2e41f571f6253b49bc870c5f99cf
| 2,107
|
py
|
Python
|
ui/server.py
|
TihonkovSergey/twitch_speech
|
ca916c49832f30ebe3f5ff7caa1e4ca69d6ad379
|
[
"Unlicense"
] | null | null | null |
ui/server.py
|
TihonkovSergey/twitch_speech
|
ca916c49832f30ebe3f5ff7caa1e4ca69d6ad379
|
[
"Unlicense"
] | null | null | null |
ui/server.py
|
TihonkovSergey/twitch_speech
|
ca916c49832f30ebe3f5ff7caa1e4ca69d6ad379
|
[
"Unlicense"
] | null | null | null |
from flask import Flask, request, jsonify
from flask_cors import CORS
import sys
sys.path.append("/home/sergey/Documents/homeworks/twitch_speech/")
from utils.main_server import TwitchSpeechServer
from utils.db_connector import DBConnector
import config as cf
pipeline_server = TwitchSpeechServer()
db_connector = DBConnector(cf.DATABASE_NAME)
app = Flask(__name__)
CORS(app)
# {
# 'status': 'Ищем видяшку',
# 'progress': int,
# 'download_speed': str,
# }
def check_id(video_id):
status_info = db_connector.get_status(video_id)
if not status_info:
pipeline_server.process_videos(ids=[video_id])
return {'status': 'Процесс запущен',
'progress': 0,
'download_speed': "0MB",
}
status = status_info['status']
try:
downloaded_count = int(status_info['info']['downloaded_count'])
total_count = int(status_info['info']['total_count'])
progress = int(downloaded_count / total_count) * 100
except:
progress = 0
try:
download_speed = status_info['info']['speed']
except:
download_speed = '-'
return {
'status': status,
'progress': progress,
'download_speed': download_speed,
}
def search_text(video_id, input_text):
if input_text and input_text != '':
data = db_connector.find_text(video_id, input_text)
else:
data = db_connector.get_parts(video_id)
result = {}
for i, sub in enumerate(data):
timecode = int(sub['start']) // 1000
text = sub['text']
result[str(i)] = {'timecode': timecode, 'text': text}
return result
# return {'0': {'timecode': '00:56', 'text': 'lol chto'},
# '1': {'timecode': '02:56', 'text': 'lol kek'}}
@app.route('/', methods=['GET'])
def check():
return check_id(request.args.get('video_id'))
@app.route('/search', methods=['GET'])
def search():
video_id, input_text = request.args.get('video_id'), request.args.get('input_text')
return search_text(video_id, input_text)
if __name__ == "__main__":
app.run(debug=True)
| 25.083333
| 87
| 0.636925
|
4a00e855e4e626b04009286c3e525d5e41b63b5e
| 1,161
|
py
|
Python
|
pyleecan/Methods/Slot/SlotW16/build_geometry.py
|
ajpina/pyleecan
|
f8d1fce7d108cf443f5767e35d59ff15905fb49f
|
[
"Apache-2.0"
] | 2
|
2020-08-28T14:54:55.000Z
|
2021-03-13T19:34:45.000Z
|
pyleecan/Methods/Slot/SlotW16/build_geometry.py
|
ajpina/pyleecan
|
f8d1fce7d108cf443f5767e35d59ff15905fb49f
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Methods/Slot/SlotW16/build_geometry.py
|
ajpina/pyleecan
|
f8d1fce7d108cf443f5767e35d59ff15905fb49f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from ....Classes.Arc1 import Arc1
from ....Classes.Segment import Segment
def build_geometry(self):
"""Compute the curve (Line) needed to plot the Slot.
The ending point of a curve is the starting point of the next curve in
the list
Parameters
----------
self : SlotW16
A SlotW16 object
Returns
-------
curve_list: llist
A list of 4 Segment and 5 Arc
"""
Rbo = self.get_Rbo()
[Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8, Z9, Z10] = self._comp_point_coordinate()
# Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z2))
curve_list.append(Arc1(Z2, Z3, -Rbo + self.H0, is_trigo_direction=False))
curve_list.append(Arc1(Z3, Z4, -self.R1, is_trigo_direction=False))
curve_list.append(Segment(Z4, Z5))
curve_list.append(Arc1(Z5, Z6, Rbo - self.H0 - self.H2, is_trigo_direction=True))
curve_list.append(Segment(Z6, Z7))
curve_list.append(Arc1(Z7, Z8, -self.R1, is_trigo_direction=False))
curve_list.append(Arc1(Z8, Z9, -Rbo + self.H0, is_trigo_direction=False))
curve_list.append(Segment(Z9, Z10))
return curve_list
| 28.317073
| 85
| 0.655469
|
4a00ea14c09904e4cc6e9d56fac1938f20d324cc
| 20,518
|
py
|
Python
|
fence/__init__.py
|
grugna/fence
|
8cf0bb4160d82c03b58a176637df73458bd694e4
|
[
"Apache-2.0"
] | null | null | null |
fence/__init__.py
|
grugna/fence
|
8cf0bb4160d82c03b58a176637df73458bd694e4
|
[
"Apache-2.0"
] | null | null | null |
fence/__init__.py
|
grugna/fence
|
8cf0bb4160d82c03b58a176637df73458bd694e4
|
[
"Apache-2.0"
] | 1
|
2022-02-10T20:21:35.000Z
|
2022-02-10T20:21:35.000Z
|
from collections import OrderedDict
import os
import tempfile
from urllib.parse import urljoin
import flask
from flask_cors import CORS
from flask_sqlalchemy_session import flask_scoped_session, current_session
from authutils.oauth2.client import OAuthClient
from cdislogging import get_logger
from gen3authz.client.arborist.client import ArboristClient
from flask_wtf.csrf import validate_csrf
from userdatamodel.driver import SQLAlchemyDriver
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from azure.storage.blob import BlobServiceClient
from azure.core.exceptions import ResourceNotFoundError
from fence.auth import logout, build_redirect_url
from fence.blueprints.data.indexd import S3IndexedFileLocation
from fence.blueprints.login.utils import allowed_login_redirects, domain
from fence.errors import UserError
from fence.jwt import keys
from fence.models import migrate
from fence.oidc.client import query_client
from fence.oidc.server import server
from fence.resources.audit.client import AuditServiceClient
from fence.resources.aws.boto_manager import BotoManager
from fence.resources.openid.idp_oauth2 import Oauth2ClientBase
from fence.resources.openid.cilogon_oauth2 import CilogonOauth2Client
from fence.resources.openid.cognito_oauth2 import CognitoOauth2Client
from fence.resources.openid.google_oauth2 import GoogleOauth2Client
from fence.resources.openid.microsoft_oauth2 import MicrosoftOauth2Client
from fence.resources.openid.okta_oauth2 import OktaOauth2Client
from fence.resources.openid.orcid_oauth2 import OrcidOauth2Client
from fence.resources.openid.synapse_oauth2 import SynapseOauth2Client
from fence.resources.openid.ras_oauth2 import RASOauth2Client
from fence.resources.storage import StorageManager
from fence.resources.user.user_session import UserSessionInterface
from fence.error_handler import get_error_response
from fence.utils import random_str
from fence.config import config
from fence.settings import CONFIG_SEARCH_FOLDERS
import fence.blueprints.admin
import fence.blueprints.data
import fence.blueprints.login
import fence.blueprints.oauth2
import fence.blueprints.misc
import fence.blueprints.storage_creds
import fence.blueprints.user
import fence.blueprints.well_known
import fence.blueprints.link
import fence.blueprints.google
import fence.blueprints.privacy
import fence.blueprints.register
import fence.blueprints.ga4gh
# for some reason the temp dir does not get created properly if we move
# this statement to `_setup_prometheus()`
PROMETHEUS_TMP_COUNTER_DIR = tempfile.TemporaryDirectory()
# Can't read config yet. Just set to debug for now, else no handlers.
# Later, in app_config(), will actually set level based on config
logger = get_logger(__name__, log_level="debug")
app = flask.Flask(__name__)
CORS(app=app, headers=["content-type", "accept"], expose_headers="*")
def warn_about_logger():
raise Exception(
"Flask 0.12 will remove and replace all of our log handlers if you call "
"app.logger anywhere. Use get_logger from cdislogging instead."
)
def app_init(
app,
settings="fence.settings",
root_dir=None,
config_path=None,
config_file_name=None,
):
app.__dict__["logger"] = warn_about_logger
app_config(
app,
settings=settings,
root_dir=root_dir,
config_path=config_path,
file_name=config_file_name,
)
app_sessions(app)
app_register_blueprints(app)
server.init_app(app, query_client=query_client)
def app_sessions(app):
app.url_map.strict_slashes = False
app.db = SQLAlchemyDriver(config["DB"])
# TODO: we will make a more robust migration system external from the application
# initialization soon
if config["ENABLE_DB_MIGRATION"]:
logger.info("Running database migration...")
migrate(app.db)
logger.info("Done running database migration.")
else:
logger.info("NOT running database migration.")
session = flask_scoped_session(app.db.Session, app) # noqa
app.session_interface = UserSessionInterface()
def app_register_blueprints(app):
app.register_blueprint(fence.blueprints.oauth2.blueprint, url_prefix="/oauth2")
app.register_blueprint(fence.blueprints.user.blueprint, url_prefix="/user")
creds_blueprint = fence.blueprints.storage_creds.make_creds_blueprint()
app.register_blueprint(creds_blueprint, url_prefix="/credentials")
app.register_blueprint(fence.blueprints.admin.blueprint, url_prefix="/admin")
app.register_blueprint(
fence.blueprints.well_known.blueprint, url_prefix="/.well-known"
)
login_blueprint = fence.blueprints.login.make_login_blueprint()
app.register_blueprint(login_blueprint, url_prefix="/login")
link_blueprint = fence.blueprints.link.make_link_blueprint()
app.register_blueprint(link_blueprint, url_prefix="/link")
google_blueprint = fence.blueprints.google.make_google_blueprint()
app.register_blueprint(google_blueprint, url_prefix="/google")
app.register_blueprint(
fence.blueprints.privacy.blueprint, url_prefix="/privacy-policy"
)
app.register_blueprint(fence.blueprints.register.blueprint, url_prefix="/register")
app.register_blueprint(fence.blueprints.ga4gh.blueprint, url_prefix="/ga4gh")
fence.blueprints.misc.register_misc(app)
@app.route("/")
def root():
"""
Register the root URL.
"""
endpoints = {
"oauth2 endpoint": "/oauth2",
"user endpoint": "/user",
"keypair endpoint": "/credentials",
}
return flask.jsonify(endpoints)
@app.route("/logout")
def logout_endpoint():
root = config.get("BASE_URL", "")
request_next = flask.request.args.get("next", root)
force_era_global_logout = (
flask.request.args.get("force_era_global_logout") == "true"
)
if request_next.startswith("https") or request_next.startswith("http"):
next_url = request_next
else:
next_url = build_redirect_url(config.get("ROOT_URL", ""), request_next)
if domain(next_url) not in allowed_login_redirects():
raise UserError("invalid logout redirect URL: {}".format(next_url))
return logout(
next_url=next_url, force_era_global_logout=force_era_global_logout
)
@app.route("/jwt/keys")
def public_keys():
"""
Return the public keys which can be used to verify JWTs signed by fence.
The return value should look like this:
{
"keys": [
{
"key-01": " ... [public key here] ... "
}
]
}
"""
return flask.jsonify(
{"keys": [(keypair.kid, keypair.public_key) for keypair in app.keypairs]}
)
def _check_azure_storage(app):
"""
Confirm access to Azure Storage Account and Containers
"""
azure_creds = config.get("AZ_BLOB_CREDENTIALS", None)
# if this is a public bucket, Fence will not try to sign the URL
if azure_creds == "*":
return
if not azure_creds or azure_creds.strip() == "":
# Azure Blob credentials are not configured.
# If you're using Azure Blob Storage set AZ_BLOB_CREDENTIALS to your Azure Blob Storage Connection String.
logger.debug(
"Azure Blob credentials are not configured. If you're using Azure Blob Storage, please set AZ_BLOB_CREDENTIALS to your Azure Blob Storage Connection String."
)
return
blob_service_client = BlobServiceClient.from_connection_string(azure_creds)
for c in blob_service_client.list_containers():
container_client = blob_service_client.get_container_client(c.name)
# check if container exists. If it doesn't exist, log a warning.
if container_client.exists() is False:
logger.debug(
f"Unable to access Azure Blob Storage Container {c.name}. You may run into issues resolving orphaned indexed files pointing to this container."
)
continue
# verify that you can check the container properties
try:
container_properties = container_client.get_container_properties()
public_access = container_properties["public_access"]
# check container properties
logger.debug(
f"Azure Blob Storage Container {c.name} has public access {public_access}"
)
except ResourceNotFoundError as err:
logger.debug(
f"Unable to access Azure Blob Storage Container {c.name}. You may run into issues resolving orphaned indexed files pointing to this container."
)
logger.debug(err)
def _check_aws_creds_and_region(app):
"""
Function to ensure that all s3_buckets have a valid credential.
Additionally, if there is no region it will produce a warning
then try to fetch and cache the region.
"""
buckets = config.get("S3_BUCKETS") or {}
aws_creds = config.get("AWS_CREDENTIALS") or {}
for bucket_name, bucket_details in buckets.items():
cred = bucket_details.get("cred")
region = bucket_details.get("region")
if not cred:
raise ValueError(
"No cred for S3_BUCKET: {}. cred is required.".format(bucket_name)
)
# if this is a public bucket, Fence will not try to sign the URL
# so it won't need to know the region.
if cred == "*":
continue
if cred not in aws_creds:
raise ValueError(
"Credential {} for S3_BUCKET {} is not defined in AWS_CREDENTIALS".format(
cred, bucket_name
)
)
# only require region when we're not specifying an
# s3-compatible endpoint URL (ex: no need for region when using cleversafe)
if not region and not bucket_details.get("endpoint_url"):
logger.warning(
"WARNING: no region for S3_BUCKET: {}. Providing the region will reduce"
" response time and avoid a call to GetBucketLocation which you make lack the AWS ACLs for.".format(
bucket_name
)
)
credential = S3IndexedFileLocation.get_credential_to_access_bucket(
bucket_name,
aws_creds,
config.get("MAX_PRESIGNED_URL_TTL", 3600),
app.boto,
)
if not getattr(app, "boto"):
logger.warning(
"WARNING: boto not setup for app, probably b/c "
"nothing in AWS_CREDENTIALS. Cannot attempt to get bucket "
"bucket regions."
)
return
region = app.boto.get_bucket_region(bucket_name, credential)
config["S3_BUCKETS"][bucket_name]["region"] = region
cred = config["PUSH_AUDIT_LOGS_CONFIG"].get("aws_sqs_config", {}).get("aws_cred")
if cred and cred not in aws_creds:
raise ValueError(
"Credential {} for PUSH_AUDIT_LOGS_CONFIG.aws_sqs_config.aws_cred is not defined in AWS_CREDENTIALS".format(
cred
)
)
def app_config(
app,
settings="fence.settings",
root_dir=None,
config_path=None,
file_name=None,
):
"""
Set up the config for the Flask app.
"""
if root_dir is None:
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
logger.info("Loading settings...")
# not using app.config.from_object because we don't want all the extra flask cfg
# vars inside our singleton when we pass these through in the next step
settings_cfg = flask.Config(app.config.root_path)
settings_cfg.from_object(settings)
# dump the settings into the config singleton before loading a configuration file
config.update(dict(settings_cfg))
# load the configuration file, this overwrites anything from settings/local_settings
config.load(
config_path=config_path,
search_folders=CONFIG_SEARCH_FOLDERS,
file_name=file_name,
)
# load all config back into flask app config for now, we should PREFER getting config
# directly from the fence config singleton in the code though.
app.config.update(**config._configs)
_setup_arborist_client(app)
_setup_audit_service_client(app)
_setup_data_endpoint_and_boto(app)
_load_keys(app, root_dir)
_set_authlib_cfgs(app)
app.prometheus_counters = {}
if config["ENABLE_PROMETHEUS_METRICS"]:
logger.info("Enabling Prometheus metrics...")
_setup_prometheus(app)
else:
logger.info("Prometheus metrics are NOT enabled.")
app.storage_manager = StorageManager(config["STORAGE_CREDENTIALS"], logger=logger)
app.debug = config["DEBUG"]
# Following will update logger level, propagate, and handlers
get_logger(__name__, log_level="debug" if config["DEBUG"] is True else "info")
_setup_oidc_clients(app)
with app.app_context():
_check_aws_creds_and_region(app)
_check_azure_storage(app)
def _setup_data_endpoint_and_boto(app):
if "AWS_CREDENTIALS" in config and len(config["AWS_CREDENTIALS"]) > 0:
value = list(config["AWS_CREDENTIALS"].values())[0]
app.boto = BotoManager(value, logger=logger)
app.register_blueprint(fence.blueprints.data.blueprint, url_prefix="/data")
def _load_keys(app, root_dir):
if root_dir is None:
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
app.keypairs = keys.load_keypairs(os.path.join(root_dir, "keys"))
app.jwt_public_keys = {
config["BASE_URL"]: OrderedDict(
[(str(keypair.kid), str(keypair.public_key)) for keypair in app.keypairs]
)
}
def _set_authlib_cfgs(app):
# authlib OIDC settings
# key will need to be added
settings = {"OAUTH2_JWT_KEY": keys.default_private_key(app)}
app.config.update(settings)
config.update(settings)
# only add the following if not already provided
config.setdefault("OAUTH2_JWT_ENABLED", True)
config.setdefault("OAUTH2_JWT_ALG", "RS256")
config.setdefault("OAUTH2_JWT_ISS", app.config["BASE_URL"])
config.setdefault("OAUTH2_PROVIDER_ERROR_URI", "/api/oauth2/errors")
app.config.setdefault("OAUTH2_JWT_ENABLED", True)
app.config.setdefault("OAUTH2_JWT_ALG", "RS256")
app.config.setdefault("OAUTH2_JWT_ISS", app.config["BASE_URL"])
app.config.setdefault("OAUTH2_PROVIDER_ERROR_URI", "/api/oauth2/errors")
def _setup_oidc_clients(app):
configured_idps = config.get("OPENID_CONNECT", {})
clean_idps = [idp.lower().replace(" ", "") for idp in configured_idps]
if len(clean_idps) != len(set(clean_idps)):
raise ValueError(
f"Some IDPs configured in OPENID_CONNECT are not unique once they are lowercased and spaces are removed: {clean_idps}"
)
for idp in set(configured_idps.keys()):
logger.info(f"Setting up OIDC client for {idp}")
settings = configured_idps[idp]
if idp == "google":
app.google_client = GoogleOauth2Client(
settings,
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
elif idp == "orcid":
app.orcid_client = OrcidOauth2Client(
settings,
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
elif idp == "ras":
app.ras_client = RASOauth2Client(
settings,
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
elif idp == "synapse":
app.synapse_client = SynapseOauth2Client(
settings, HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger
)
elif idp == "microsoft":
app.microsoft_client = MicrosoftOauth2Client(
settings,
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
elif idp == "okta":
app.okta_client = OktaOauth2Client(
settings,
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
elif idp == "cognito":
app.cognito_client = CognitoOauth2Client(
settings, HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger
)
elif idp == "cilogon":
app.cilogon_client = CilogonOauth2Client(
settings,
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
elif idp == "fence":
app.fence_client = OAuthClient(**settings)
else: # generic OIDC implementation
client = Oauth2ClientBase(
settings=settings,
logger=logger,
HTTP_PROXY=config.get("HTTP_PROXY"),
idp=settings.get("name") or idp.title(),
)
clean_idp = idp.lower().replace(" ", "")
setattr(app, f"{clean_idp}_client", client)
def _setup_arborist_client(app):
if app.config.get("ARBORIST"):
app.arborist = ArboristClient(arborist_base_url=config["ARBORIST"])
def _setup_audit_service_client(app):
# Initialize the client regardless of whether audit logs are enabled. This
# allows us to call `app.audit_service_client.create_x_log()` from
# anywhere without checking if audit logs are enabled. The client
# checks that for us.
service_url = app.config.get("AUDIT_SERVICE") or urljoin(
app.config["BASE_URL"], "/audit"
)
app.audit_service_client = AuditServiceClient(
service_url=service_url, logger=logger
)
def _setup_prometheus(app):
# This environment variable MUST be declared before importing the
# prometheus modules (or unit tests fail)
# More details on this awkwardness: https://github.com/prometheus/client_python/issues/250
os.environ["prometheus_multiproc_dir"] = PROMETHEUS_TMP_COUNTER_DIR.name
from prometheus_client import (
CollectorRegistry,
multiprocess,
make_wsgi_app,
)
from prometheus_flask_exporter import Counter
from prometheus_flask_exporter.multiprocess import (
UWsgiPrometheusMetrics,
)
app.prometheus_registry = CollectorRegistry()
multiprocess.MultiProcessCollector(app.prometheus_registry)
UWsgiPrometheusMetrics(app)
# Add prometheus wsgi middleware to route /metrics requests
app.wsgi_app = DispatcherMiddleware(
app.wsgi_app, {"/metrics": make_wsgi_app(registry=app.prometheus_registry)}
)
# set up counters
app.prometheus_counters["pre_signed_url_req"] = Counter(
"pre_signed_url_req",
"tracking presigned url requests",
["requested_protocol"],
)
@app.errorhandler(Exception)
def handle_error(error):
"""
Register an error handler for general exceptions.
"""
return get_error_response(error)
@app.before_request
def check_csrf():
has_auth = "Authorization" in flask.request.headers
no_username = not flask.session.get("username")
if has_auth or no_username:
return
if not config.get("ENABLE_CSRF_PROTECTION", True):
return
if flask.request.method != "GET":
try:
csrf_header = flask.request.headers.get("x-csrf-token")
csrf_formfield = flask.request.form.get("csrf_token")
# validate_csrf checks the input (a signed token) against the raw
# token stored in session["csrf_token"].
# (session["csrf_token"] is managed by flask-wtf.)
# To pass CSRF check, there must exist EITHER an x-csrf-token header
# OR a csrf_token form field that matches the token in the session.
assert (
csrf_header
and validate_csrf(csrf_header) is None
or csrf_formfield
and validate_csrf(csrf_formfield) is None
)
referer = flask.request.headers.get("referer")
assert referer, "Referer header missing"
logger.debug("HTTP REFERER " + str(referer))
except Exception as e:
raise UserError("CSRF verification failed: {}. Request aborted".format(e))
| 36.770609
| 170
| 0.669705
|
4a00eade946fd3aa60a69dea9c4e5df3f8aa3da8
| 809
|
py
|
Python
|
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/vrml/vrml97/script.py
|
alexus37/AugmentedRealityChess
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
[
"MIT"
] | 1
|
2015-07-12T07:24:17.000Z
|
2015-07-12T07:24:17.000Z
|
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/vrml/vrml97/script.py
|
alexus37/AugmentedRealityChess
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
[
"MIT"
] | null | null | null |
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/vrml/vrml97/script.py
|
alexus37/AugmentedRealityChess
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
[
"MIT"
] | 1
|
2016-02-19T21:55:53.000Z
|
2016-02-19T21:55:53.000Z
|
"""VRML97 Script-node stub"""
from vrml import node, fieldtypes
from vrml.vrml97 import nodetypes
class _Script( nodetypes.Children, node.Node ):
"""A sub-type of node with scripting/pseudo-proto support
The class here just handles basic node-like functionality,
a special constructor factory takes care of the PROTO-like
functionality.
"""
url = fieldtypes.MFString(
'url', 1,
)
directOutput = fieldtypes.SFBool(
'directOutput', default = 0,
)
mustEvaluate = fieldtypes.SFBool(
'mustEvaluate', default = 0,
)
def Script( fields, **namedarguments ):
"""Create a new script prototype and an instance of that prototype"""
proto = node.prototype( 'Script', fields, baseClasses = (_Script,) )
return proto( **namedarguments )
| 31.115385
| 73
| 0.674907
|
4a00ebd015718505b6d5730203993796904e2f85
| 56,125
|
py
|
Python
|
requests_auth/authentication.py
|
bottoy/requests_auth
|
f95ecd833d52341ebe0e2c974d133577ae124dd9
|
[
"MIT"
] | 1
|
2020-04-02T02:29:56.000Z
|
2020-04-02T02:29:56.000Z
|
requests_auth/authentication.py
|
bottoy/requests_auth
|
f95ecd833d52341ebe0e2c974d133577ae124dd9
|
[
"MIT"
] | null | null | null |
requests_auth/authentication.py
|
bottoy/requests_auth
|
f95ecd833d52341ebe0e2c974d133577ae124dd9
|
[
"MIT"
] | null | null | null |
import base64
import os
import uuid
from hashlib import sha256, sha512
from urllib.parse import parse_qs, urlsplit, urlunsplit, urlencode
from typing import Optional
import requests
import requests.auth
import warnings
from requests_auth import oauth2_authentication_responses_server, oauth2_tokens
from requests_auth.errors import InvalidGrantRequest, GrantNotProvided
def _add_parameters(initial_url: str, extra_parameters: dict) -> str:
"""
Add parameters to an URL and return the new URL.
:param initial_url:
:param extra_parameters: dictionary of parameters name and value.
:return: the new URL containing parameters.
"""
scheme, netloc, path, query_string, fragment = urlsplit(initial_url)
query_params = parse_qs(query_string)
for parameter_name in extra_parameters.keys():
# TODO Handle parameters with a list as a value and submit PR to requests or Python
query_params[parameter_name] = [extra_parameters[parameter_name]]
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def _pop_parameter(url: str, query_parameter_name: str) -> (str, Optional[str]):
"""
Remove and return parameter of an URL.
:param url: The URL containing (or not) the parameter.
:param query_parameter_name: The query parameter to pop.
:return: The new URL (without this parameter) and the parameter value (None if not found).
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
parameter_value = query_params.pop(query_parameter_name, None)
new_query_string = urlencode(query_params, doseq=True)
return (
urlunsplit((scheme, netloc, path, new_query_string, fragment)),
parameter_value,
)
def _get_query_parameter(url: str, param_name: str) -> Optional[str]:
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
all_values = query_params.get(param_name)
return all_values[0] if all_values else None
def request_new_grant_with_post(
url: str, data, grant_name: str, timeout: float, auth=None
) -> (str, int):
response = requests.post(url, data=data, timeout=timeout, auth=auth)
if not response:
# As described in https://tools.ietf.org/html/rfc6749#section-5.2
raise InvalidGrantRequest(response)
content = response.json()
token = content.get(grant_name)
if not token:
raise GrantNotProvided(grant_name, content)
return token, content.get("expires_in")
class OAuth2:
token_cache = oauth2_tokens.TokenMemoryCache()
class SupportMultiAuth:
"""Inherit from this class to be able to use your class with requests_auth provided authentication classes."""
def __add__(self, other):
if isinstance(other, _MultiAuth):
return _MultiAuth(self, *other.authentication_modes)
return _MultiAuth(self, other)
def __and__(self, other):
if isinstance(other, _MultiAuth):
return _MultiAuth(self, *other.authentication_modes)
return _MultiAuth(self, other)
class BrowserAuth:
def __init__(self, kwargs):
"""
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 code will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a code or a token to be received once requested.
Wait for 1 minute (60 seconds) by default.
:param success_display_time: In case a code is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received code is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
"""
redirect_uri_endpoint = kwargs.pop("redirect_uri_endpoint", None) or ""
self.redirect_uri_port = int(kwargs.pop("redirect_uri_port", None) or 5000)
self.redirect_uri = (
f"http://localhost:{self.redirect_uri_port}/{redirect_uri_endpoint}"
)
# Time is expressed in seconds
self.timeout = float(kwargs.pop("timeout", None) or 60)
# Time is expressed in milliseconds
self.success_display_time = int(kwargs.pop("success_display_time", None) or 1)
# Time is expressed in milliseconds
self.failure_display_time = int(
kwargs.pop("failure_display_time", None) or 5000
)
class OAuth2ResourceOwnerPasswordCredentials(requests.auth.AuthBase, SupportMultiAuth):
"""
Resource Owner Password Credentials Grant
Describes an OAuth 2 resource owner password credentials (also called password) flow requests authentication.
More details can be found in https://tools.ietf.org/html/rfc6749#section-4.3
"""
def __init__(self, token_url: str, username: str, password: str, **kwargs):
"""
:param token_url: OAuth 2 token URL.
:param username: Resource owner user name.
:param password: Resource owner password.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param scope: Scope parameter sent to token URL as body. Can also be a list of scopes. Not sent by default.
:param token_field_name: Field name containing the token. access_token by default.
:param kwargs: all additional authorization parameters that should be put as body parameters in the token URL.
"""
self.token_url = token_url
if not self.token_url:
raise Exception("Token URL is mandatory.")
self.username = username
if not self.username:
raise Exception("User name is mandatory.")
self.password = password
if not self.password:
raise Exception("Password is mandatory.")
self.kwargs = kwargs
extra_parameters = dict(kwargs)
self.header_name = extra_parameters.pop("header_name", None) or "Authorization"
self.header_value = (
extra_parameters.pop("header_value", None) or "Bearer {token}"
)
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
self.token_field_name = (
extra_parameters.pop("token_field_name", None) or "access_token"
)
# Time is expressed in seconds
self.timeout = int(extra_parameters.pop("timeout", None) or 60)
# As described in https://tools.ietf.org/html/rfc6749#section-4.3.2
self.data = {
"grant_type": "password",
"username": self.username,
"password": self.password,
}
scope = extra_parameters.pop("scope", None)
if scope:
self.data["scope"] = " ".join(scope) if isinstance(scope, list) else scope
self.data.update(extra_parameters)
all_parameters_in_url = _add_parameters(self.token_url, self.data)
self.state = sha512(all_parameters_in_url.encode("unicode_escape")).hexdigest()
def __call__(self, r):
token = OAuth2.token_cache.get_token(self.state, self.request_new_token)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
def request_new_token(self):
# As described in https://tools.ietf.org/html/rfc6749#section-4.3.3
token, expires_in = request_new_grant_with_post(
self.token_url,
self.data,
self.token_field_name,
self.timeout,
auth=(self.username, self.password),
)
# Handle both Access and Bearer tokens
return (self.state, token, expires_in) if expires_in else (self.state, token)
class OAuth2ClientCredentials(requests.auth.AuthBase, SupportMultiAuth):
"""
Client Credentials Grant
Describes an OAuth 2 client credentials (also called application) flow requests authentication.
More details can be found in https://tools.ietf.org/html/rfc6749#section-4.4
"""
def __init__(self, token_url: str, client_id: str, client_secret: str, **kwargs):
"""
:param token_url: OAuth 2 token URL.
:param client_id: Resource owner user name.
:param client_secret: Resource owner password.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param scope: Scope parameter sent to token URL as body. Can also be a list of scopes. Not sent by default.
:param token_field_name: Field name containing the token. access_token by default.
:param kwargs: all additional authorization parameters that should be put as query parameter in the token URL.
"""
self.token_url = token_url
if not self.token_url:
raise Exception("Token URL is mandatory.")
self.client_id = client_id
if not self.client_id:
raise Exception("client_id is mandatory.")
self.client_secret = client_secret
if not self.client_secret:
raise Exception("client_secret is mandatory.")
self.kwargs = kwargs
extra_parameters = dict(kwargs)
self.header_name = extra_parameters.pop("header_name", None) or "Authorization"
self.header_value = (
extra_parameters.pop("header_value", None) or "Bearer {token}"
)
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
self.token_field_name = (
extra_parameters.pop("token_field_name", None) or "access_token"
)
# Time is expressed in seconds
self.timeout = int(extra_parameters.pop("timeout", None) or 60)
# As described in https://tools.ietf.org/html/rfc6749#section-4.4.2
self.data = {"grant_type": "client_credentials"}
scope = extra_parameters.pop("scope", None)
if scope:
self.data["scope"] = " ".join(scope) if isinstance(scope, list) else scope
self.data.update(extra_parameters)
all_parameters_in_url = _add_parameters(self.token_url, self.data)
self.state = sha512(all_parameters_in_url.encode("unicode_escape")).hexdigest()
def __call__(self, r):
token = OAuth2.token_cache.get_token(self.state, self.request_new_token)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
def request_new_token(self) -> tuple:
# As described in https://tools.ietf.org/html/rfc6749#section-4.4.3
token, expires_in = request_new_grant_with_post(
self.token_url,
self.data,
self.token_field_name,
self.timeout,
auth=(self.client_id, self.client_secret),
)
# Handle both Access and Bearer tokens
return (self.state, token, expires_in) if expires_in else (self.state, token)
class OAuth2AuthorizationCode(requests.auth.AuthBase, SupportMultiAuth, BrowserAuth):
"""
Authorization Code Grant
Describes an OAuth 2 authorization code (also called access code) flow requests authentication.
Request a code with client browser, then request a token using this code.
Store the token and use it for subsequent valid requests.
More details can be found in https://tools.ietf.org/html/rfc6749#section-4.1
"""
def __init__(self, authorization_url: str, token_url: str, **kwargs):
"""
:param authorization_url: OAuth 2 authorization URL.
:param token_url: OAuth 2 token URL.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 code will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a code or a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a code is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received code is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param response_type: Value of the response_type query parameter if not already provided in authorization URL.
code by default.
:param token_field_name: Field name containing the token. access_token by default.
:param code_field_name: Field name containing the code. code by default.
:param username: User name in case basic authentication should be used to retrieve token.
:param password: User password in case basic authentication should be used to retrieve token.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL and as body parameters in the token URL.
Usual parameters are:
* client_id: Corresponding to your Application ID (in Microsoft Azure app portal)
* client_secret: If client is not authenticated with the authorization server
* nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
"""
self.authorization_url = authorization_url
if not self.authorization_url:
raise Exception("Authorization URL is mandatory.")
self.token_url = token_url
if not self.token_url:
raise Exception("Token URL is mandatory.")
BrowserAuth.__init__(self, kwargs)
self.header_name = kwargs.pop("header_name", None) or "Authorization"
self.header_value = kwargs.pop("header_value", None) or "Bearer {token}"
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
self.token_field_name = kwargs.pop("token_field_name", None) or "access_token"
username = kwargs.pop("username", None)
password = kwargs.pop("password", None)
self.auth = (username, password) if username and password else None
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.2
code_field_name = kwargs.pop("code_field_name", "code")
if _get_query_parameter(self.authorization_url, "response_type"):
# Ensure provided value will not be overridden
kwargs.pop("response_type", None)
else:
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.1
kwargs.setdefault("response_type", "code")
authorization_url_without_nonce = _add_parameters(
self.authorization_url, kwargs
)
authorization_url_without_nonce, nonce = _pop_parameter(
authorization_url_without_nonce, "nonce"
)
self.state = sha512(
authorization_url_without_nonce.encode("unicode_escape")
).hexdigest()
custom_code_parameters = {
"state": self.state,
"redirect_uri": self.redirect_uri,
}
if nonce:
custom_code_parameters["nonce"] = nonce
code_grant_url = _add_parameters(
authorization_url_without_nonce, custom_code_parameters
)
self.code_grant_details = oauth2_authentication_responses_server.GrantDetails(
code_grant_url,
code_field_name,
self.timeout,
self.success_display_time,
self.failure_display_time,
self.redirect_uri_port,
)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.3
self.token_data = {
"grant_type": "authorization_code",
"redirect_uri": self.redirect_uri,
}
self.token_data.update(kwargs)
def __call__(self, r):
token = OAuth2.token_cache.get_token(self.state, self.request_new_token)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
def request_new_token(self):
# Request code
state, code = oauth2_authentication_responses_server.request_new_grant(
self.code_grant_details
)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.3
self.token_data["code"] = code
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.4
token, expires_in = request_new_grant_with_post(
self.token_url,
self.token_data,
self.token_field_name,
self.timeout,
auth=self.auth,
)
# Handle both Access and Bearer tokens
return (self.state, token, expires_in) if expires_in else (self.state, token)
class OAuth2AuthorizationCodePKCE(
requests.auth.AuthBase, SupportMultiAuth, BrowserAuth
):
"""
Proof Key for Code Exchange
Describes an OAuth 2 Proof Key for Code Exchange (PKCE) flow requests authentication.
Request a code with client browser, then request a token using this code.
Store the token and use it for subsequent valid requests.
More details can be found in https://tools.ietf.org/html/rfc7636
"""
def __init__(self, authorization_url: str, token_url: str, **kwargs):
"""
:param authorization_url: OAuth 2 authorization URL.
:param token_url: OAuth 2 token URL.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 code will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a code or a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a code is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received code is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param response_type: Value of the response_type query parameter if not already provided in authorization URL.
code by default.
:param token_field_name: Field name containing the token. access_token by default.
:param code_field_name: Field name containing the code. code by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL and as body parameters in the token URL.
Usual parameters are:
* client_id: Corresponding to your Application ID (in Microsoft Azure app portal)
* client_secret: If client is not authenticated with the authorization server
* nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
"""
self.authorization_url = authorization_url
if not self.authorization_url:
raise Exception("Authorization URL is mandatory.")
self.token_url = token_url
if not self.token_url:
raise Exception("Token URL is mandatory.")
BrowserAuth.__init__(self, kwargs)
self.header_name = kwargs.pop("header_name", None) or "Authorization"
self.header_value = kwargs.pop("header_value", None) or "Bearer {token}"
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
self.token_field_name = kwargs.pop("token_field_name", None) or "access_token"
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.2
code_field_name = kwargs.pop("code_field_name", "code")
authorization_url_without_response_type, response_type = _pop_parameter(
self.authorization_url, "response_type"
)
if response_type:
# Ensure provided value will not be overridden
kwargs["response_type"] = response_type
else:
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.1
kwargs.setdefault("response_type", "code")
authorization_url_without_nonce = _add_parameters(
authorization_url_without_response_type, kwargs
)
authorization_url_without_nonce, nonce = _pop_parameter(
authorization_url_without_nonce, "nonce"
)
self.state = sha512(
authorization_url_without_nonce.encode("unicode_escape")
).hexdigest()
custom_code_parameters = {
"state": self.state,
"redirect_uri": self.redirect_uri,
}
if nonce:
custom_code_parameters["nonce"] = nonce
# generate PKCE code verifier and challenge
code_verifier = self.generate_code_verifier()
code_challenge = self.generate_code_challenge(code_verifier)
# add code challenge parameters to the authorization_url request
custom_code_parameters["code_challenge"] = code_challenge
custom_code_parameters["code_challenge_method"] = "S256"
code_grant_url = _add_parameters(
authorization_url_without_nonce, custom_code_parameters
)
self.code_grant_details = oauth2_authentication_responses_server.GrantDetails(
code_grant_url,
code_field_name,
self.timeout,
self.success_display_time,
self.failure_display_time,
self.redirect_uri_port,
)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.3
# include the PKCE code verifier used in the second part of the flow
self.token_data = {
"code_verifier": code_verifier,
"grant_type": "authorization_code",
"redirect_uri": self.redirect_uri,
}
self.token_data.update(kwargs)
def __call__(self, r):
token = OAuth2.token_cache.get_token(self.state, self.request_new_token)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
def request_new_token(self) -> tuple:
# Request code
state, code = oauth2_authentication_responses_server.request_new_grant(
self.code_grant_details
)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.3
self.token_data["code"] = code
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.4
token, expires_in = request_new_grant_with_post(
self.token_url, self.token_data, self.token_field_name, self.timeout
)
# Handle both Access and Bearer tokens
return (self.state, token, expires_in) if expires_in else (self.state, token)
@staticmethod
def generate_code_verifier() -> bytes:
"""
Source: https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py
Generates a 'code_verifier' as described in section 4.1 of RFC 7636.
This is a 'high-entropy cryptographic random string' that will be
impractical for an attacker to guess.
https://tools.ietf.org/html/rfc7636#section-4.1
:return: urlsafe base64-encoded random data.
"""
return base64.urlsafe_b64encode(os.urandom(64)).rstrip(b"=")
@staticmethod
def generate_code_challenge(verifier: bytes) -> bytes:
"""
Source: https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py
Creates a 'code_challenge' as described in section 4.2 of RFC 7636
by taking the sha256 hash of the verifier and then urlsafe
base64-encoding it.
https://tools.ietf.org/html/rfc7636#section-4.1
:param verifier: code_verifier as generated by generate_code_verifier()
:return: urlsafe base64-encoded sha256 hash digest, without '=' padding.
"""
digest = sha256(verifier).digest()
return base64.urlsafe_b64encode(digest).rstrip(b"=")
class OAuth2Implicit(requests.auth.AuthBase, SupportMultiAuth, BrowserAuth):
"""
Implicit Grant
Describes an OAuth 2 implicit flow requests authentication.
Request a token with client browser.
Store the token and use it for subsequent valid requests.
More details can be found in https://tools.ietf.org/html/rfc6749#section-4.2
"""
def __init__(self, authorization_url: str, **kwargs):
"""
:param authorization_url: OAuth 2 authorization URL.
:param response_type: Value of the response_type query parameter if not already provided in authorization URL.
token by default.
:param token_field_name: Name of the expected field containing the token.
id_token by default if response_type is id_token, else access_token.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* client_id: Corresponding to your Application ID (in Microsoft Azure app portal)
* nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
* prompt: none to avoid prompting the user if a session is already opened.
"""
self.authorization_url = authorization_url
if not self.authorization_url:
raise Exception("Authorization URL is mandatory.")
BrowserAuth.__init__(self, kwargs)
self.header_name = kwargs.pop("header_name", None) or "Authorization"
self.header_value = kwargs.pop("header_value", None) or "Bearer {token}"
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
response_type = _get_query_parameter(self.authorization_url, "response_type")
if response_type:
# Ensure provided value will not be overridden
kwargs.pop("response_type", None)
else:
# As described in https://tools.ietf.org/html/rfc6749#section-4.2.1
response_type = kwargs.setdefault("response_type", "token")
# As described in https://tools.ietf.org/html/rfc6749#section-4.2.2
token_field_name = kwargs.pop("token_field_name", None)
if not token_field_name:
token_field_name = (
"id_token" if "id_token" == response_type else "access_token"
)
authorization_url_without_nonce = _add_parameters(
self.authorization_url, kwargs
)
authorization_url_without_nonce, nonce = _pop_parameter(
authorization_url_without_nonce, "nonce"
)
self.state = sha512(
authorization_url_without_nonce.encode("unicode_escape")
).hexdigest()
custom_parameters = {"state": self.state, "redirect_uri": self.redirect_uri}
if nonce:
custom_parameters["nonce"] = nonce
grant_url = _add_parameters(authorization_url_without_nonce, custom_parameters)
self.grant_details = oauth2_authentication_responses_server.GrantDetails(
grant_url,
token_field_name,
self.timeout,
self.success_display_time,
self.failure_display_time,
self.redirect_uri_port,
)
def __call__(self, r):
token = OAuth2.token_cache.get_token(
self.state,
oauth2_authentication_responses_server.request_new_grant,
self.grant_details,
)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
class AzureActiveDirectoryImplicit(OAuth2Implicit):
"""
Describes an Azure Active Directory (OAuth 2) "Access Token" requests authentication.
https://docs.microsoft.com/en-us/azure/active-directory/develop/access-tokens
"""
def __init__(self, tenant_id: str, client_id: str, **kwargs):
"""
:param tenant_id: Microsoft Tenant Identifier (formatted as an Universal Unique Identifier)
:param client_id: Microsoft Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
token by default.
:param token_field_name: Name of the expected field containing the token.
access_token by default.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
OAuth2Implicit.__init__(
self,
f"https://login.microsoftonline.com/{tenant_id}/oauth2/authorize",
client_id=client_id,
nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()),
**kwargs,
)
class AzureActiveDirectoryImplicitIdToken(OAuth2Implicit):
"""
Describes an Azure Active Directory (OpenID Connect) "ID Token" requests authentication.
https://docs.microsoft.com/en-us/azure/active-directory/develop/id-tokens
"""
def __init__(self, tenant_id: str, client_id: str, **kwargs):
"""
:param tenant_id: Microsoft Tenant Identifier (formatted as an Universal Unique Identifier)
:param client_id: Microsoft Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
id_token by default.
:param token_field_name: Name of the expected field containing the token.
id_token by default.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
OAuth2Implicit.__init__(
self,
f"https://login.microsoftonline.com/{tenant_id}/oauth2/authorize",
client_id=client_id,
response_type=kwargs.pop("response_type", "id_token"),
token_field_name=kwargs.pop("token_field_name", "id_token"),
nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()),
**kwargs,
)
class OktaImplicit(OAuth2Implicit):
"""
Describes an OKTA (OAuth 2) "Access Token" implicit flow requests authentication.
https://developer.okta.com/docs/guides/implement-implicit/overview/
"""
def __init__(self, instance: str, client_id: str, **kwargs):
"""
:param instance: OKTA instance (like "testserver.okta-emea.com")
:param client_id: OKTA Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
token by default.
:param token_field_name: Name of the expected field containing the token.
access_token by default.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param authorization_server: OKTA authorization server.
default by default.
:param scope: Scope parameter sent in query. Can also be a list of scopes.
Request ['openid', 'profile', 'email'] by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", None) or ["openid", "profile", "email"]
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2Implicit.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/authorize",
client_id=client_id,
nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()),
**kwargs,
)
class OktaImplicitIdToken(OAuth2Implicit):
"""
Describes an OKTA (OpenID Connect) "ID Token" implicit flow requests authentication.
"""
def __init__(self, instance: str, client_id: str, **kwargs):
"""
:param instance: OKTA instance (like "testserver.okta-emea.com")
:param client_id: OKTA Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
id_token by default.
:param token_field_name: Name of the expected field containing the token.
id_token by default.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param authorization_server: OKTA authorization server
default by default.
:param scope: Scope parameter sent in query. Can also be a list of scopes.
Request ['openid', 'profile', 'email'] by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", None) or ["openid", "profile", "email"]
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2Implicit.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/authorize",
client_id=client_id,
response_type=kwargs.pop("response_type", "id_token"),
token_field_name=kwargs.pop("token_field_name", "id_token"),
nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()),
**kwargs,
)
class OktaAuthorizationCode(OAuth2AuthorizationCode):
"""
Describes an OKTA (OAuth 2) "Access Token" authorization code flow requests authentication.
"""
def __init__(self, instance: str, client_id: str, **kwargs):
"""
:param instance: OKTA instance (like "testserver.okta-emea.com")
:param client_id: OKTA Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
token by default.
:param token_field_name: Name of the expected field containing the token.
access_token by default.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param authorization_server: OKTA authorization server
default by default.
:param scope: Scope parameter sent in query. Can also be a list of scopes.
Request 'openid' by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", "openid")
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2AuthorizationCode.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/authorize",
f"https://{instance}/oauth2/{authorization_server}/v1/token",
client_id=client_id,
**kwargs,
)
class OktaAuthorizationCodePKCE(OAuth2AuthorizationCodePKCE):
"""
Describes an OKTA (OAuth 2) "Access Token" Proof Key for Code Exchange (PKCE) flow requests authentication.
"""
def __init__(self, instance: str, client_id: str, **kwargs):
"""
:param instance: OKTA instance (like "testserver.okta-emea.com")
:param client_id: OKTA Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
code by default.
:param token_field_name: Name of the expected field containing the token.
access_token by default.
:param code_field_name: Field name containing the code. code by default.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param authorization_server: OKTA authorization server
default by default.
:param scope: Scope parameter sent in query. Can also be a list of scopes.
Request 'openid' by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL and as body parameters in the token URL.
Usual parameters are:
* client_secret: If client is not authenticated with the authorization server
* nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", "openid")
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2AuthorizationCodePKCE.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/authorize",
f"https://{instance}/oauth2/{authorization_server}/v1/token",
client_id=client_id,
**kwargs,
)
class OktaClientCredentials(OAuth2ClientCredentials):
"""
Describes an OKTA (OAuth 2) client credentials (also called application) flow requests authentication.
"""
def __init__(self, instance: str, client_id: str, client_secret: str, **kwargs):
"""
:param instance: OKTA instance (like "testserver.okta-emea.com")
:param client_id: OKTA Application Identifier (formatted as an Universal Unique Identifier)
:param client_secret: Resource owner password.
:param authorization_server: OKTA authorization server
default by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param scope: Scope parameter sent to token URL as body. Can also be a list of scopes.
Request 'openid' by default.
:param token_field_name: Field name containing the token. access_token by default.
:param kwargs: all additional authorization parameters that should be put as query parameter in the token URL.
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", "openid")
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2ClientCredentials.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/token",
client_id=client_id,
client_secret=client_secret,
**kwargs,
)
class HeaderApiKey(requests.auth.AuthBase, SupportMultiAuth):
"""Describes an API Key requests authentication."""
def __init__(self, api_key: str, header_name: str = None):
"""
:param api_key: The API key that will be sent.
:param header_name: Name of the header field. "X-API-Key" by default.
"""
self.api_key = api_key
if not api_key:
raise Exception("API Key is mandatory.")
self.header_name = header_name or "X-API-Key"
def __call__(self, r):
r.headers[self.header_name] = self.api_key
return r
class QueryApiKey(requests.auth.AuthBase, SupportMultiAuth):
"""Describes an API Key requests authentication."""
def __init__(self, api_key: str, query_parameter_name: str = None):
"""
:param api_key: The API key that will be sent.
:param query_parameter_name: Name of the query parameter. "api_key" by default.
"""
self.api_key = api_key
if not api_key:
raise Exception("API Key is mandatory.")
self.query_parameter_name = query_parameter_name or "api_key"
def __call__(self, r):
r.url = _add_parameters(r.url, {self.query_parameter_name: self.api_key})
return r
class Basic(requests.auth.HTTPBasicAuth, SupportMultiAuth):
"""Describes a basic requests authentication."""
def __init__(self, username: str, password: str):
requests.auth.HTTPBasicAuth.__init__(self, username, password)
class NTLM(requests.auth.AuthBase, SupportMultiAuth):
"""Describes a NTLM requests authentication."""
def __init__(self, username: str = None, password: str = None):
"""
:param username: Mandatory if requests_negotiate_sspi module is not installed.
:param password: Mandatory if requests_negotiate_sspi module is not installed.
"""
self.username = username
self.password = password
if not username and not password:
try:
import requests_negotiate_sspi
self.auth = requests_negotiate_sspi.HttpNegotiateAuth()
except ImportError:
raise Exception(
"NTLM authentication requires requests_negotiate_sspi module."
)
else:
if not username:
raise Exception(
'NTLM authentication requires "username" to be provided in security_details.'
)
if not password:
raise Exception(
'NTLM authentication requires "password" to be provided in security_details.'
)
try:
import requests_ntlm
self.auth = requests_ntlm.HttpNtlmAuth(username, password)
except ImportError:
raise Exception("NTLM authentication requires requests_ntlm module.")
def __call__(self, r):
self.auth.__call__(r)
return r
class _MultiAuth(requests.auth.AuthBase):
"""Authentication using multiple authentication methods."""
def __init__(self, *authentication_modes):
self.authentication_modes = authentication_modes
def __call__(self, r):
for authentication_mode in self.authentication_modes:
authentication_mode.__call__(r)
return r
def __add__(self, other):
if isinstance(other, _MultiAuth):
return _MultiAuth(*self.authentication_modes, *other.authentication_modes)
return _MultiAuth(*self.authentication_modes, other)
def __and__(self, other):
if isinstance(other, _MultiAuth):
return _MultiAuth(*self.authentication_modes, *other.authentication_modes)
return _MultiAuth(*self.authentication_modes, other)
class Auths(_MultiAuth):
def __init__(self, *authentication_modes):
warnings.warn(
"Auths class will be removed in the future. Use + instead.",
DeprecationWarning,
)
super().__init__(*authentication_modes)
| 48.467185
| 118
| 0.680659
|
4a00ec1734ef4694de51af68f2cd2a5950ad71e4
| 9,536
|
py
|
Python
|
CIFAR/C100P1.2DropOutPatched.py
|
kice/SeqResNet
|
b9663b5835b1605c4a6bd79f0f5bbefa0a95e8b4
|
[
"MIT"
] | 1
|
2019-04-06T17:33:38.000Z
|
2019-04-06T17:33:38.000Z
|
CIFAR/C100P1.2DropOutPatched.py
|
kice/SeqResNet
|
b9663b5835b1605c4a6bd79f0f5bbefa0a95e8b4
|
[
"MIT"
] | null | null | null |
CIFAR/C100P1.2DropOutPatched.py
|
kice/SeqResNet
|
b9663b5835b1605c4a6bd79f0f5bbefa0a95e8b4
|
[
"MIT"
] | null | null | null |
import os
import time
import torch
from torchvision import datasets, transforms
from ModelDropOutPatched import ResNetCIFAR
import numpy as np
import random
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class AverageMeter(object):
"""
Computes and stores the average and current value
Copied from: https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train_epoch(model, loader, optimizer, epoch, n_epochs, print_freq=1, info=''):
batch_time = AverageMeter()
losses = AverageMeter()
error = AverageMeter()
# Model on train mode
model.train()
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
# Create vaiables
if torch.cuda.is_available():
input_var = torch.autograd.Variable(input.cuda(non_blocking=True))
target_var = torch.autograd.Variable(target.cuda(non_blocking=True))
else:
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = torch.nn.functional.cross_entropy(output, target_var)
# measure accuracy and record loss
batch_size = target.size(0)
_, pred = output.data.cpu().topk(1, dim=1)
error.update(torch.ne(pred.squeeze(), target.cpu()).float().sum() / batch_size, batch_size)
losses.update(loss.item(), batch_size)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print stats
if batch_idx % print_freq == 0:
res = '\t'.join([
info + ' Epoch: [%d/%d]' % (epoch + 1, n_epochs),
'Iter: [%d/%d]' % (batch_idx + 1, len(loader)),
'Time %.3f (%.3f)' % (batch_time.val, batch_time.avg),
'Loss %.4f (%.4f)' % (losses.val, losses.avg),
'Error %.4f (%.4f)' % (error.val, error.avg)
])
print(res)
# Return summary statistics
return batch_time.avg, losses.avg, error.avg
def test_epoch(model, loader, print_freq=1, is_test=True):
batch_time = AverageMeter()
losses = AverageMeter()
error = AverageMeter()
# Model on eval mode
model.eval()
end = time.time()
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
# Create vaiables
if torch.cuda.is_available():
input_var = torch.autograd.Variable(input.cuda(non_blocking=True))
target_var = torch.autograd.Variable(target.cuda(non_blocking=True))
else:
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = torch.nn.functional.cross_entropy(output, target_var)
# measure accuracy and record loss
batch_size = target.size(0)
_, pred = output.data.cpu().topk(1, dim=1)
error.update(torch.ne(pred.squeeze(), target.cpu()).float().sum() / batch_size, batch_size)
losses.update(loss.item(), batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print stats
if batch_idx % print_freq == 0:
res = '\t'.join([
'Test' if is_test else 'Valid',
'Iter: [%d/%d]' % (batch_idx + 1, len(loader)),
'Time %.3f (%.3f)' % (batch_time.val, batch_time.avg),
'Loss %.4f (%.4f)' % (losses.val, losses.avg),
'Error %.4f (%.4f)' % (error.val, error.avg),
])
print(res)
# Return summary statistics
return batch_time.avg, losses.avg, error.avg
def train(model, train_set, test_set, save, n_epochs=300, valid_size=5000,
batch_size=64, lr=0.1, wd=0.0001, momentum=0.9, seed=None, info=''):
save += info
if not os.path.exists(save):
os.makedirs(save)
if not os.path.isdir(save):
raise Exception('%s is not a dir' % save)
best_error = 1
if seed is not None:
torch.manual_seed(seed)
# Data loaders
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False,
pin_memory=(torch.cuda.is_available()), num_workers=0)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True,
pin_memory=(torch.cuda.is_available()), num_workers=0)
# Model on cuda
if torch.cuda.is_available():
model = model.cuda()
# Wrap model for multi-GPUs, if necessary
model_wrapper = model
if torch.cuda.is_available() and torch.cuda.device_count() > 1:
model_wrapper = torch.nn.DataParallel(model).cuda()
# Optimizer
optimizer = torch.optim.SGD(model_wrapper.parameters(), lr=lr, momentum=momentum, nesterov=True, weight_decay=wd)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.5 * n_epochs, 0.75 * n_epochs],
gamma=0.1)
# Start log
with open(os.path.join(save, 'results.csv'), 'w') as f:
f.write('epoch,train_loss,train_error,valid_loss,valid_error,test_error\n')
# Train model
for epoch in range(n_epochs):
scheduler.step()
_, train_loss, train_error = train_epoch(
model=model_wrapper,
loader=train_loader,
optimizer=optimizer,
epoch=epoch,
n_epochs=n_epochs,
info=info
)
_, valid_loss, valid_error = test_epoch(
model=model,
loader=test_loader,
is_test=True
)
# Determine if model is the best
if valid_error < best_error:
best_error = valid_error
print('New best error: %.4f' % best_error)
torch.save(model.state_dict(), os.path.join(save, 'model.dat'))
# Log results
with open(os.path.join(save, 'results.csv'), 'a') as f:
f.write('%03d,%0.6f,%0.6f,%0.5f,%0.5f,\n' % (
(epoch + 1),
train_loss,
train_error,
valid_loss,
valid_error,
))
torch.save(model.state_dict(), os.path.join(save, 'current.dat'))
# Final test of model on test set
model.load_state_dict(torch.load(os.path.join(save, 'model.dat')))
test_results = test_epoch(
model=model,
loader=test_loader,
is_test=True
)
_, _, test_error = test_results
with open(os.path.join(save, 'results.csv'), 'a') as f:
f.write(',,,,,%0.5f\n' % (test_error))
print('Final test error: %.4f' % test_error)
model.load_state_dict(torch.load(os.path.join(save, 'current.dat')))
def demo(data, save, valid_size=0, n_epochs=300, batch_size=64, seed=None):
"""
A demo to show off training of efficient DenseNets.
Trains and evaluates a DenseNet-BC on CIFAR-100.
Args:
data (str) - path to directory where data should be loaded from/downloaded
(default $DATA_DIR)
save (str) - path to save the model to (default /tmp)
depth (int) - depth of the network (number of convolution layers) (default 40)
growth_rate (int) - number of features added per DenseNet layer (default 12)
efficient (bool) - use the memory efficient implementation? (default True)
valid_size (int) - size of validation set
n_epochs (int) - number of epochs for training (default 300)
batch_size (int) - size of minibatch (default 256)
seed (int) - manually set the random seed (default None)
"""
# Data transforms
mean = [129.3 / 255, 124.1 / 255, 112.4 / 255]
stdv = [68.2 / 255, 65.4 / 255, 70.4 / 255]
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=stdv),
])
test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=stdv),
])
# Datasets
train_set = datasets.CIFAR100(data, train=True, transform=train_transforms, download=True)
test_set = datasets.CIFAR100(data, train=False, transform=test_transforms, download=False)
# Models
model = ResNetCIFAR(Classes=100, BlocksPerStage=[1,1,1], PyramidFactor=[1,2,3], Widening=4, Granularity=8, BatchNormalization=True, WeightScale=0)
train(model=model, train_set=train_set, test_set=test_set, save=save,
valid_size=valid_size, n_epochs=n_epochs, batch_size=batch_size, lr=0.1, seed=seed, info='C100P1.2DropOutPatched')
# Train the model
print('Done!')
demo('./data', './weights')
| 33.226481
| 150
| 0.6013
|
4a00ec98739ca575b465bf9e7b34486eb4bdc5d1
| 17,770
|
py
|
Python
|
pymc3/step_methods/hmc/nuts.py
|
mcnoat/pymc3
|
8b1f64cce32db3357301b88bbe9f7108733ac70a
|
[
"Apache-2.0"
] | 4
|
2021-03-26T10:13:46.000Z
|
2021-05-21T15:45:20.000Z
|
pymc3/step_methods/hmc/nuts.py
|
mcnoat/pymc3
|
8b1f64cce32db3357301b88bbe9f7108733ac70a
|
[
"Apache-2.0"
] | 2
|
2017-11-27T00:11:53.000Z
|
2017-11-27T00:42:36.000Z
|
pymc3/step_methods/hmc/nuts.py
|
mcnoat/pymc3
|
8b1f64cce32db3357301b88bbe9f7108733ac70a
|
[
"Apache-2.0"
] | 6
|
2017-11-03T01:15:11.000Z
|
2022-03-17T12:26:45.000Z
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import numpy as np
from pymc3.backends.report import SamplerWarning, WarningType
from pymc3.distributions import BART
from pymc3.math import logbern, logdiffexp_numpy
from pymc3.step_methods.arraystep import Competence
from pymc3.step_methods.hmc.base_hmc import BaseHMC, DivergenceInfo, HMCStepData
from pymc3.step_methods.hmc.integration import IntegrationError
from pymc3.theanof import floatX
from pymc3.vartypes import continuous_types
__all__ = ["NUTS"]
class NUTS(BaseHMC):
r"""A sampler for continuous variables based on Hamiltonian mechanics.
NUTS automatically tunes the step size and the number of steps per
sample. A detailed description can be found at [1], "Algorithm 6:
Efficient No-U-Turn Sampler with Dual Averaging".
NUTS provides a number of statistics that can be accessed with
`trace.get_sampler_stats`:
- `mean_tree_accept`: The mean acceptance probability for the tree
that generated this sample. The mean of these values across all
samples but the burn-in should be approximately `target_accept`
(the default for this is 0.8).
- `diverging`: Whether the trajectory for this sample diverged. If
there are any divergences after burnin, this indicates that
the results might not be reliable. Reparametrization can
often help, but you can also try to increase `target_accept` to
something like 0.9 or 0.95.
- `energy`: The energy at the point in phase-space where the sample
was accepted. This can be used to identify posteriors with
problematically long tails. See below for an example.
- `energy_change`: The difference in energy between the start and
the end of the trajectory. For a perfect integrator this would
always be zero.
- `max_energy_change`: The maximum difference in energy along the
whole trajectory.
- `depth`: The depth of the tree that was used to generate this sample
- `tree_size`: The number of leafs of the sampling tree, when the
sample was accepted. This is usually a bit less than
`2 ** depth`. If the tree size is large, the sampler is
using a lot of leapfrog steps to find the next sample. This can for
example happen if there are strong correlations in the posterior,
if the posterior has long tails, if there are regions of high
curvature ("funnels"), or if the variance estimates in the mass
matrix are inaccurate. Reparametrisation of the model or estimating
the posterior variances from past samples might help.
- `tune`: This is `True`, if step size adaptation was turned on when
this sample was generated.
- `step_size`: The step size used for this sample.
- `step_size_bar`: The current best known step-size. After the tuning
samples, the step size is set to this value. This should converge
during tuning.
- `model_logp`: The model log-likelihood for this sample.
- `process_time_diff`: The time it took to draw the sample, as defined
by the python standard library `time.process_time`. This counts all
the CPU time, including worker processes in BLAS and OpenMP.
- `perf_counter_diff`: The time it took to draw the sample, as defined
by the python standard library `time.perf_counter` (wall time).
- `perf_counter_start`: The value of `time.perf_counter` at the beginning
of the computation of the draw.
References
----------
.. [1] Hoffman, Matthew D., & Gelman, Andrew. (2011). The No-U-Turn
Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo.
"""
name = "nuts"
default_blocked = True
generates_stats = True
stats_dtypes = [
{
"depth": np.int64,
"step_size": np.float64,
"tune": np.bool,
"mean_tree_accept": np.float64,
"step_size_bar": np.float64,
"tree_size": np.float64,
"diverging": np.bool,
"energy_error": np.float64,
"energy": np.float64,
"max_energy_error": np.float64,
"model_logp": np.float64,
"process_time_diff": np.float64,
"perf_counter_diff": np.float64,
"perf_counter_start": np.float64,
}
]
def __init__(self, vars=None, max_treedepth=10, early_max_treedepth=8, **kwargs):
r"""Set up the No-U-Turn sampler.
Parameters
----------
vars: list of Theano variables, default all continuous vars
Emax: float, default 1000
Maximum energy change allowed during leapfrog steps. Larger
deviations will abort the integration.
target_accept: float, default .8
Adapt the step size such that the average acceptance
probability across the trajectories are close to target_accept.
Higher values for target_accept lead to smaller step sizes.
Setting this to higher values like 0.9 or 0.99 can help
with sampling from difficult posteriors. Valid values are
between 0 and 1 (exclusive).
step_scale: float, default 0.25
Size of steps to take, automatically scaled down by `1/n**(1/4)`.
If step size adaptation is switched off, the resulting step size
is used. If adaptation is enabled, it is used as initial guess.
gamma: float, default .05
k: float, default .75
Parameter for dual averaging for step size adaptation. Values
between 0.5 and 1 (exclusive) are admissible. Higher values
correspond to slower adaptation.
t0: int, default 10
Parameter for dual averaging. Higher values slow initial
adaptation.
adapt_step_size: bool, default=True
Whether step size adaptation should be enabled. If this is
disabled, `k`, `t0`, `gamma` and `target_accept` are ignored.
max_treedepth: int, default=10
The maximum tree depth. Trajectories are stopped when this
depth is reached.
early_max_treedepth: int, default=8
The maximum tree depth during the first 200 tuning samples.
scaling: array_like, ndim = {1,2}
The inverse mass, or precision matrix. One dimensional arrays are
interpreted as diagonal matrices. If `is_cov` is set to True,
this will be interpreded as the mass or covariance matrix.
is_cov: bool, default=False
Treat the scaling as mass or covariance matrix.
potential: Potential, optional
An object that represents the Hamiltonian with methods `velocity`,
`energy`, and `random` methods. It can be specified instead
of the scaling matrix.
model: pymc3.Model
The model
kwargs: passed to BaseHMC
Notes
-----
The step size adaptation stops when `self.tune` is set to False.
This is usually achieved by setting the `tune` parameter if
`pm.sample` to the desired number of tuning steps.
"""
super().__init__(vars, **kwargs)
self.max_treedepth = max_treedepth
self.early_max_treedepth = early_max_treedepth
self._reached_max_treedepth = 0
def _hamiltonian_step(self, start, p0, step_size):
if self.tune and self.iter_count < 200:
max_treedepth = self.early_max_treedepth
else:
max_treedepth = self.max_treedepth
tree = _Tree(len(p0), self.integrator, start, step_size, self.Emax)
for _ in range(max_treedepth):
direction = logbern(np.log(0.5)) * 2 - 1
divergence_info, turning = tree.extend(direction)
if divergence_info or turning:
break
else:
if not self.tune:
self._reached_max_treedepth += 1
stats = tree.stats()
accept_stat = stats["mean_tree_accept"]
return HMCStepData(tree.proposal, accept_stat, divergence_info, stats)
@staticmethod
def competence(var, has_grad):
"""Check how appropriate this class is for sampling a random variable."""
if var.dtype in continuous_types and has_grad and not isinstance(var.distribution, BART):
return Competence.IDEAL
return Competence.INCOMPATIBLE
def warnings(self):
warnings = super().warnings()
n_samples = self._samples_after_tune
n_treedepth = self._reached_max_treedepth
if n_samples > 0 and n_treedepth / float(n_samples) > 0.05:
msg = (
"The chain reached the maximum tree depth. Increase "
"max_treedepth, increase target_accept or reparameterize."
)
warn = SamplerWarning(WarningType.TREEDEPTH, msg, "warn")
warnings.append(warn)
return warnings
# A proposal for the next position
Proposal = namedtuple("Proposal", "q, q_grad, energy, log_p_accept_weighted, logp")
# A subtree of the binary tree built by nuts.
Subtree = namedtuple(
"Subtree",
"left, right, p_sum, proposal, log_size, log_weighted_accept_sum, n_proposals",
)
class _Tree:
def __init__(self, ndim, integrator, start, step_size, Emax):
"""Binary tree from the NUTS algorithm.
Parameters
----------
leapfrog: function
A function that performs a single leapfrog step.
start: integration.State
The starting point of the trajectory.
step_size: float
The step size to use in this tree
Emax: float
The maximum energy change to accept before aborting the
transition as diverging.
"""
self.ndim = ndim
self.integrator = integrator
self.start = start
self.step_size = step_size
self.Emax = Emax
self.start_energy = np.array(start.energy)
self.left = self.right = start
self.proposal = Proposal(start.q, start.q_grad, start.energy, 1.0, start.model_logp)
self.depth = 0
self.log_size = 0
self.log_weighted_accept_sum = -np.inf
self.mean_tree_accept = 0.0
self.n_proposals = 0
self.p_sum = start.p.copy()
self.max_energy_change = 0
def extend(self, direction):
"""Double the treesize by extending the tree in the given direction.
If direction is larger than 0, extend it to the right, otherwise
extend it to the left.
Return a tuple `(diverging, turning)` of type (DivergenceInfo, bool).
`diverging` indicates, that the tree extension was aborted because
the energy change exceeded `self.Emax`. `turning` indicates that
the tree extension was stopped because the termination criterior
was reached (the trajectory is turning back).
"""
if direction > 0:
tree, diverging, turning = self._build_subtree(
self.right, self.depth, floatX(np.asarray(self.step_size))
)
leftmost_begin, leftmost_end = self.left, self.right
rightmost_begin, rightmost_end = tree.left, tree.right
leftmost_p_sum = self.p_sum
rightmost_p_sum = tree.p_sum
self.right = tree.right
else:
tree, diverging, turning = self._build_subtree(
self.left, self.depth, floatX(np.asarray(-self.step_size))
)
leftmost_begin, leftmost_end = tree.right, tree.left
rightmost_begin, rightmost_end = self.left, self.right
leftmost_p_sum = tree.p_sum
rightmost_p_sum = self.p_sum
self.left = tree.right
self.depth += 1
self.n_proposals += tree.n_proposals
if diverging or turning:
return diverging, turning
size1, size2 = self.log_size, tree.log_size
if logbern(size2 - size1):
self.proposal = tree.proposal
self.log_size = np.logaddexp(self.log_size, tree.log_size)
self.log_weighted_accept_sum = np.logaddexp(
self.log_weighted_accept_sum, tree.log_weighted_accept_sum
)
self.p_sum[:] += tree.p_sum
# Additional turning check only when tree depth > 0 to avoid redundant work
if self.depth > 0:
left, right = self.left, self.right
p_sum = self.p_sum
turning = (p_sum.dot(left.v) <= 0) or (p_sum.dot(right.v) <= 0)
p_sum1 = leftmost_p_sum + rightmost_begin.p
turning1 = (p_sum1.dot(leftmost_begin.v) <= 0) or (p_sum1.dot(rightmost_begin.v) <= 0)
p_sum2 = leftmost_end.p + rightmost_p_sum
turning2 = (p_sum2.dot(leftmost_end.v) <= 0) or (p_sum2.dot(rightmost_end.v) <= 0)
turning = turning | turning1 | turning2
return diverging, turning
def _single_step(self, left, epsilon):
"""Perform a leapfrog step and handle error cases."""
try:
right = self.integrator.step(epsilon, left)
except IntegrationError as err:
error_msg = str(err)
error = err
right = None
else:
# h - H0
energy_change = right.energy - self.start_energy
if np.isnan(energy_change):
energy_change = np.inf
if np.abs(energy_change) > np.abs(self.max_energy_change):
self.max_energy_change = energy_change
if np.abs(energy_change) < self.Emax:
# Acceptance statistic
# e^{H(q_0, p_0) - H(q_n, p_n)} max(1, e^{H(q_0, p_0) - H(q_n, p_n)})
# Saturated Metropolis accept probability with Boltzmann weight
# if h - H0 < 0
log_p_accept_weighted = -energy_change + min(0.0, -energy_change)
log_size = -energy_change
proposal = Proposal(
right.q,
right.q_grad,
right.energy,
log_p_accept_weighted,
right.model_logp,
)
tree = Subtree(right, right, right.p, proposal, log_size, log_p_accept_weighted, 1)
return tree, None, False
else:
error_msg = "Energy change in leapfrog step is too large: %s." % energy_change
error = None
tree = Subtree(None, None, None, None, -np.inf, -np.inf, 1)
divergance_info = DivergenceInfo(error_msg, error, left, right)
return tree, divergance_info, False
def _build_subtree(self, left, depth, epsilon):
if depth == 0:
return self._single_step(left, epsilon)
tree1, diverging, turning = self._build_subtree(left, depth - 1, epsilon)
if diverging or turning:
return tree1, diverging, turning
tree2, diverging, turning = self._build_subtree(tree1.right, depth - 1, epsilon)
left, right = tree1.left, tree2.right
if not (diverging or turning):
p_sum = tree1.p_sum + tree2.p_sum
turning = (p_sum.dot(left.v) <= 0) or (p_sum.dot(right.v) <= 0)
# Additional U turn check only when depth > 1 to avoid redundant work.
if depth - 1 > 0:
p_sum1 = tree1.p_sum + tree2.left.p
turning1 = (p_sum1.dot(tree1.left.v) <= 0) or (p_sum1.dot(tree2.left.v) <= 0)
p_sum2 = tree1.right.p + tree2.p_sum
turning2 = (p_sum2.dot(tree1.right.v) <= 0) or (p_sum2.dot(tree2.right.v) <= 0)
turning = turning | turning1 | turning2
log_size = np.logaddexp(tree1.log_size, tree2.log_size)
log_weighted_accept_sum = np.logaddexp(
tree1.log_weighted_accept_sum, tree2.log_weighted_accept_sum
)
if logbern(tree2.log_size - log_size):
proposal = tree2.proposal
else:
proposal = tree1.proposal
else:
p_sum = tree1.p_sum
log_size = tree1.log_size
log_weighted_accept_sum = tree1.log_weighted_accept_sum
proposal = tree1.proposal
n_proposals = tree1.n_proposals + tree2.n_proposals
tree = Subtree(left, right, p_sum, proposal, log_size, log_weighted_accept_sum, n_proposals)
return tree, diverging, turning
def stats(self):
# Update accept stat if any subtrees were accepted
if self.log_size > 0:
# Remove contribution from initial state which is always a perfect
# accept
log_sum_weight = logdiffexp_numpy(self.log_size, 0.0)
self.mean_tree_accept = np.exp(self.log_weighted_accept_sum - log_sum_weight)
return {
"depth": self.depth,
"mean_tree_accept": self.mean_tree_accept,
"energy_error": self.proposal.energy - self.start.energy,
"energy": self.proposal.energy,
"tree_size": self.n_proposals,
"max_energy_error": self.max_energy_change,
"model_logp": self.proposal.logp,
}
| 42.511962
| 100
| 0.632696
|
4a00ecb2516a935447f54d97356c81c106bbc17a
| 3,520
|
py
|
Python
|
code/tf_string_utils.py
|
zkk995/NonSeqDIN
|
a71dfe615b782c70138eb1e7571b2f781858700a
|
[
"Apache-2.0"
] | null | null | null |
code/tf_string_utils.py
|
zkk995/NonSeqDIN
|
a71dfe615b782c70138eb1e7571b2f781858700a
|
[
"Apache-2.0"
] | 1
|
2021-05-25T09:40:12.000Z
|
2021-05-25T09:40:12.000Z
|
code/tf_string_utils.py
|
zkk995/NonSeqDIN
|
a71dfe615b782c70138eb1e7571b2f781858700a
|
[
"Apache-2.0"
] | 1
|
2021-05-25T09:37:45.000Z
|
2021-05-25T09:37:45.000Z
|
import tensorflow as tf
def string_seq_to_tensor(lines, max_len=64, sep=',', out_type=tf.float32, mode=1):
'''
lines = ['1,2,3', '3,4']
返回:
array([[1., 2., 3.],
[3., 4., 0.]], dtype=float32)
说明: 同PAITF的函数trans_csv_id2dense(records, max_id, id_as_value=True)
'''
sps = tf.string_split(lines, delimiter=sep)
spss = tf.sparse_slice(sps, [0, 0], [sps.dense_shape[0], int(max_len)])
dense_shape = tf.stack((spss.dense_shape[0], int(max_len)))
sp_x = tf.SparseTensor(indices=spss.indices,
values=tf.string_to_number(spss.values, out_type=out_type),
dense_shape=spss.dense_shape if mode == 1 else dense_shape)
return tf.sparse.to_dense(sp_x)
def string_kv_to_tensor(lines, max_len=64, sep='|', kv_sep=':', val_sep='^', num_vals=3, out_type=tf.float32):
'''
lines = ["12:4^3^3|1:5^3^3|88:6^3^3|1:3^3^3|2:100^3^3", "12:4^3^3|1:5^3^3"]
key, v1, v2, v3 = string_kv_to_tensor(lines, 100)
'''
sps = tf.string_split(lines, delimiter=sep)
spss = tf.sparse_slice(sps, [0, 0], [sps.dense_shape[0], int(max_len)])
splits = tf.string_split(spss.values, kv_sep)
id_vals = tf.reshape(splits.values, splits.dense_shape)
col_ids, vals = tf.split(id_vals, num_or_size_splits=2, axis=1)
def to_dense(vs):
_vs = tf.string_to_number(vs, out_type=out_type)
_vs = tf.SparseTensor(indices=spss.indices, values=_vs,
dense_shape=spss.dense_shape)
return tf.sparse.to_dense(_vs)
vv = []
# key
vv.append(to_dense(col_ids[:, 0]))
# values
if num_vals > 1:
_vals = tf.string_split(vals[:, 0], val_sep)
_vals = tf.reshape(_vals.values, _vals.dense_shape)
_vals = tf.split(_vals, num_or_size_splits=num_vals, axis=1)
_list = [to_dense(v[:, 0]) for v in _vals]
vv.extend(_list)
if num_vals == 1:
vv.append(to_dense(vals[:, 0]))
return vv
def string_kv_to_sparse(lines, num_cols, sep='|', kv_sep=':', val_sep='^', num_vals=3, hash_key=False, is_list=False):
'''
lines = ["12:4^3^3|1:5^3^3|88:6^3^3|1:3^3^3|2:100^3^3", "12:4^3^3|1:5^3^3"]
indices, (v1, v2, v3), dense_shape = string_kv_to_sparse(lines, 100)
'''
if isinstance(lines, tf.SparseTensor):
columns = tf.string_split(lines.values, sep)
num_rows = lines.dense_shape[0]
else:
columns = tf.string_split(lines, sep)
num_rows = columns.dense_shape[0]
splits = tf.string_split(columns.values, kv_sep)
id_vals = tf.reshape(splits.values, splits.dense_shape)
col_ids, vals = tf.split(id_vals, num_or_size_splits=2, axis=1)
if hash_key:
col_ids = tf.string_to_hash_bucket_fast(col_ids[:, 0], num_cols)
else:
col_ids = tf.string_to_number(col_ids[:, 0], out_type=tf.int64)
indices = tf.stack((columns.indices[:, 0], col_ids), axis=-1)
dense_shape = tf.stack([num_rows, num_cols])
if num_vals > 1:
_vals = tf.string_split(vals[:, 0], val_sep)
_vals = tf.reshape(_vals.values, _vals.dense_shape)
_vals = tf.split(_vals, num_or_size_splits=num_vals, axis=1)
values_list = [tf.string_to_number(v[:, 0], out_type=tf.int64) for v in _vals]
return indices, values_list, dense_shape
if num_vals == 1:
values = tf.string_to_number(vals[:, 0], out_type=tf.float32)
if is_list:
values = [values]
return indices, values, dense_shape
return None
| 39.550562
| 118
| 0.625852
|
4a00ecf7085d8d4484978bce42812cb0255b3cc1
| 946
|
py
|
Python
|
kubernetes_asyncio/test/test_v1_affinity.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | 1
|
2020-03-25T01:24:27.000Z
|
2020-03-25T01:24:27.000Z
|
kubernetes_asyncio/test/test_v1_affinity.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_v1_affinity.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_affinity import V1Affinity # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1Affinity(unittest.TestCase):
"""V1Affinity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1Affinity(self):
"""Test V1Affinity"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_affinity.V1Affinity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.073171
| 119
| 0.709302
|
4a00ed6eabe972eab7011fffd07ae26b7e98de05
| 3,004
|
py
|
Python
|
figure_depth.py
|
idiap/psfestimation
|
1f3cbf594fd5a36675ff2941fa6e292a935f1aae
|
[
"BSD-3-Clause"
] | 17
|
2020-06-16T20:58:48.000Z
|
2022-01-18T10:53:57.000Z
|
figure_depth.py
|
idiap/psfestimation
|
1f3cbf594fd5a36675ff2941fa6e292a935f1aae
|
[
"BSD-3-Clause"
] | 2
|
2020-06-17T20:30:54.000Z
|
2021-09-17T10:44:33.000Z
|
figure_depth.py
|
idiap/psfestimation
|
1f3cbf594fd5a36675ff2941fa6e292a935f1aae
|
[
"BSD-3-Clause"
] | 1
|
2021-03-20T12:59:36.000Z
|
2021-03-20T12:59:36.000Z
|
'''
Code for the implementation of
"Spatially-Variant CNN-based Point Spread Function Estimation for Blind Deconvolution and Depth Estimation in Optical Microscopy"
Copyright (c) 2020 Idiap Research Institute, https://www.idiap.ch/
Written by Adrian Shajkofci <adrian.shajkofci@idiap.ch>,
All rights reserved.
This file is part of Spatially-Variant CNN-based Point Spread Function Estimation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of mosquitto nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import r2_score
import math
# Parameters of the microscope
degrees_inclination = 6
radians_inclination = degrees_inclination * math.pi / 180.
resolution = 0.32
size = 2048
num_cases = 16
max_view = resolution*size #micrometers 0.32 micron per pixel * 2048 pixel
max_depth = max_view * math.tan(radians_inclination)
print('Max depth : {} um'.format(max_depth))
# This file is generated by test_plane_stats
table = np.load('feat13.npy')
x = np.linspace(0,num_cases,num_cases)
table = table[:,1::128]
y = table.mean(axis=0)
alpha = max_depth / (y.max()-y.min())
y = y*alpha
x = x*max_view/num_cases
print('Y range :{}'.format((y.max()-y.min())))
var = table.var(axis=0)
fit = np.polyfit(x,y,1)
fit_fn = np.poly1d(fit)
fitted_x = fit_fn(x)
rsquare = r2_score(y, fitted_x)
error = ((y - fitted_x).__abs__()).mean()
plt.figure(dpi=300, figsize=(6,3))
plt.plot(x,y, '.', x, fitted_x, '--k')
plt.title(r'$R^2=$' + '{:0.3f} / error = {:0.5f}'.format(rsquare, error))
plt.xlabel('Y direction in the input image [microns]')
plt.ylabel('Depth [microns]')
plt.errorbar(x,y,yerr=var*max_view)
plt.show()
| 38.512821
| 129
| 0.76731
|
4a00efbee0cacc553d689c9c435952e3264eb2e6
| 870
|
py
|
Python
|
clase 2/algoritmos.py
|
cardenasG1238/computacion_para_ingenieria
|
f57ccd2c72d50fb34c9d8acdce75f6ba3f323f9c
|
[
"Apache-2.0"
] | null | null | null |
clase 2/algoritmos.py
|
cardenasG1238/computacion_para_ingenieria
|
f57ccd2c72d50fb34c9d8acdce75f6ba3f323f9c
|
[
"Apache-2.0"
] | null | null | null |
clase 2/algoritmos.py
|
cardenasG1238/computacion_para_ingenieria
|
f57ccd2c72d50fb34c9d8acdce75f6ba3f323f9c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 1 10:19:05 2022
@author: HP
"""
# contadores de 1 en 1
cont = 0
# cont +=1 #--> cont = cont +1
# 1 .- contar numeros del 1 al 10 y mostrar en la pantalla
while cont < 10:
cont = cont +1
print (cont)
# 2.- sumar los numeros del 1 al 10
# list = [1, 2,4,3,,,,100]
# range (1,n) ---> [1,2,3,4,5,6,,,(n-1)]
sum=0
for num in range(1,11): # [1,2,3,4]
sum =sum +num
print (f'la suma total de 1 al 10 es {sum}')
# 3. multiplicar los numeros del 1 al 10
mult =1
for num in range(1,11): #range un arra [1,2,3,,,,10]
mult=mult*num
print("la multi total es ", mult)
# 4.- mostrar los impares del 1 al 10
for num in range (1,11):
if num % 22 == 0:
print ("numeros pares:", num)
else :
print ("numeros impares:", num)
resto = 3 % 2
print ("el resto 10 % 2 es" , resto)
| 20.232558
| 58
| 0.56092
|
4a00eff388ca18422c010785c6300842e65a170a
| 293
|
py
|
Python
|
deliverable3/ui/functions/task_hyperbolic_sine.py
|
rmanaem/eternity
|
e99910747ee3ff7c1342f8ad9802e9a060ca8fc5
|
[
"MIT"
] | null | null | null |
deliverable3/ui/functions/task_hyperbolic_sine.py
|
rmanaem/eternity
|
e99910747ee3ff7c1342f8ad9802e9a060ca8fc5
|
[
"MIT"
] | null | null | null |
deliverable3/ui/functions/task_hyperbolic_sine.py
|
rmanaem/eternity
|
e99910747ee3ff7c1342f8ad9802e9a060ca8fc5
|
[
"MIT"
] | null | null | null |
from hyperbolic_sine import HyperbolicSine
#Checking Sinh(2.5)
sinh = HyperbolicSine(2.5)
x = sinh.calculate_sinh()
#Checking Sinh(3)
sinh.set_value(3)
y = sinh.calculate_sinh()
#Checking Sinh(2+2j)
sinh.set_value(2+2j)
z = sinh.calculate_sinh()
print(x)
print(y)
print(z)
print((x*y) + z)
| 16.277778
| 42
| 0.730375
|
4a00f0bc5f715049f165f987f4a8c5f2e1c9eb56
| 9,312
|
py
|
Python
|
analysis_collection/tracing_sim/results_exponential_withoutQ_NMEAS_100_ONLYSAVETIME_False/simulation.py
|
benmaier/DigCT
|
62fc3fddb7600e2a43761e08618b2e3df423569c
|
[
"MIT"
] | null | null | null |
analysis_collection/tracing_sim/results_exponential_withoutQ_NMEAS_100_ONLYSAVETIME_False/simulation.py
|
benmaier/DigCT
|
62fc3fddb7600e2a43761e08618b2e3df423569c
|
[
"MIT"
] | null | null | null |
analysis_collection/tracing_sim/results_exponential_withoutQ_NMEAS_100_ONLYSAVETIME_False/simulation.py
|
benmaier/DigCT
|
62fc3fddb7600e2a43761e08618b2e3df423569c
|
[
"MIT"
] | 1
|
2021-07-12T13:50:35.000Z
|
2021-07-12T13:50:35.000Z
|
import epipack
import numpy as np
from epipack.stochastic_epi_models import StochasticEpiModel
from math import exp
from numpy import random
import networkx as nx
#from smallworld import get_smallworld_graph
from scipy.stats import expon
import numpy as np
import networkx as nx
def _edge(i,j):
if i > j:
return (j,i)
elif j > i:
return (i,j)
else:
raise ValueError('self-loop')
def get_expon_small_world(N,k0,more_lattice_like=False,node_creation_order='random'):
G = nx.empty_graph(N)
degree_seq = [ int(k) for k in expon.rvs(scale=k0,size=N)]
stubs = list(degree_seq)
if sum(stubs) % 2 == 1:
stubs[np.random.randint(0,N-1)] += 1
if node_creation_order == 'random':
# generates small world but locally clustered
order = np.random.permutation(N)
elif node_creation_order == 'desc':
# generates locally clustered
order = np.argsort(stubs)[::-1]
elif node_creation_order == 'asc':
# generates locally clustered with short paths
order = np.argsort(stubs)
else:
raise ValueError("`node_creation_order` must be 'random', 'desc', or 'asc', not " + node_creation_order)
edges = []
cnt = 0
for i in order:
d = 1
up = True
while stubs[i] > 0:
if up:
j = (i+d) % N
else:
j = (i-d) % N
d += 1
if i == j:
break
if stubs[j] > 0:#and not G.has_edge(i,j):
edges.append(_edge(int(i),int(j)))
#G.add_edge(i,j)
stubs[i] -= 1
stubs[j] -= 1
up = not up
if d >= N//2:
break
#f d > N // 2:
# print(stubs[i], np.mean(stubs), np.min(stubs),np.max(stubs),cnt)
# raise ValueError('Couldn''t find stub')
cnt += 1
#print("leftover stubs:",sum(stubs))
#print("number of nodes with leftover stubs:",np.count_nonzero(stubs))
#print("len(edges) = ", len(edges), "len(set(edges)) = ", len(set(edges)), "difference = ", len(edges) - len(set(edges)))
G.add_edges_from(edges)
return G
def confignetwork(N, parameter,**kwargs):
p = parameter
k0 = p['number_of_contacts']
def expodegree(x):
return 1/k0*exp(-x/k0)
P = []
k_i = []
for i in range(N-1):
p_k = expodegree(i)
P.append(p_k)
k_i.append(i)
P = np.array(P)
P /= P.sum()
def seq(k_i,P):
expected_degree_sequence = np.linspace(0,1,2)
while sum(expected_degree_sequence) % 2 != 0:
expected_degree_sequence = np.random.choice(
k_i,
N,
p = P
)
return expected_degree_sequence
expected_degree_sequence = seq(k_i,P)
G = nx.configuration_model(expected_degree_sequence,create_using = nx.Graph())
G.remove_edges_from(nx.selfloop_edges(G))
edge_weight_tuples = [ (e[0], e[1], 1.0) for e in G.edges() ]
k_norm = 2*len(edge_weight_tuples) / N
del G
return edge_weight_tuples, k_norm
def swnetwork(N, parameter,**kwargs):
p = parameter
k_over_2 = int(p['number_of_contacts']/2)
#beta = 10e-4 #for k = 50, N = 10_000
#beta = 10e-5 #for k = 20, N = 10_000
#beta = 10e-6 #for k = 20, N = 20_000
beta = 10e-7 #for k = 20, N = 200_000 or k0=10
#beta = 1
G = get_smallworld_graph(N,k_over_2,beta)
edge_weight_tuples = [ (e[0], e[1], 1.0) for e in G.edges() ]
k_norm = 2*len(edge_weight_tuples) / N
del G
print(k_norm)
return edge_weight_tuples, k_norm
def exp_sw_network(N,parameter,**kwargs):
p = parameter
k0 = p['number_of_contacts']
G = get_expon_small_world(N,k0,node_creation_order='random')
edge_weight_tuples = [ (e[0], e[1], 1.0) for e in G.edges() ]
k_norm = 2*len(edge_weight_tuples) / N
del G
print(k_norm)
return edge_weight_tuples, k_norm
def simulation_code(kwargs):
def mixed(N, parameter, time, sampling_dt,quarantiningS, a, q, y, **kwargs):
p = parameter
edge_weight_tuples, k_norm = confignetwork(N,parameter)
kappa = (q*p['recovery_rate'])/(1-q)
IPa0 = int(random.binomial(p['I_0'], a, 1))
IP0 = int(p['I_0'] - IPa0)
Sa0 = int(random.binomial(N-p['I_0'], a, 1))
S0 = int(N - p['I_0'] - Sa0)
if quarantiningS == True:
model = epipack.StochasticEpiModel(['S','E','I_P','I_S','I_A','R','T','X','Sa','Ea','I_Pa','I_Sa','I_Aa','Ra','Ta','Xa','Qa','C'],N, edge_weight_tuples ,directed=False)
model.set_conditional_link_transmission_processes({
("Ta", "->", "Xa") : [
("Xa", "I_Pa", y, "Xa", "Ta" ),
("Xa", "I_Sa", y, "Xa", "Ta" ),
("Xa", "I_Aa", y, "Xa", "Ta" ),
("Xa", "Ea", y, "Xa", "Ta" ),
("Xa", "Sa", "->", "Xa", "Qa" ),
("Xa", "I_Pa", (1-y), "Xa", "C" ),
("Xa", "I_Sa", (1-y), "Xa", "C" ),
("Xa", "I_Aa", (1-y), "Xa", "C" ),
("Xa", "Ea", (1-y), "Xa", "C" )]
})
model.set_node_transition_processes([
('E',p['alpha'],'I_P'),
('I_P',(1-p['x'])*p['beta'],'I_S'),
('I_P',p['x']*p['beta'],'I_A'),
('I_A',p['recovery_rate'],'R'),
('I_S',p['recovery_rate'],'R'),
('I_S',kappa,'T'),
('T',p['chi'],'X'),
('Qa',p['omega'],'Sa'),
('Ea',p['alpha'],'I_Pa'),
('I_Pa',(1-p['x'])*p['beta'],'I_Sa'),
('I_Pa',p['x']*p['beta'],'I_Aa'),
('I_Aa',p['recovery_rate'],'Ra'),
('I_Sa',p['recovery_rate'],'Ra'),
('I_Sa',kappa,'Ta'),
('Ta',p["z"]*p['chi'],'Xa'),
('Ta',(1-p["z"])*p['chi'],'X')])
elif quarantiningS == False:
model = epipack.StochasticEpiModel(['S','E','I_P','I_S','I_A','R','T','X','Sa','Ea','I_Pa','I_Sa','I_Aa','Ra','Ta','Xa','C'],N, edge_weight_tuples ,directed=False)
model.set_conditional_link_transmission_processes({
("Ta", "->", "Xa") : [
("Xa", "I_Pa", y, "Xa", "Ta" ),
("Xa", "I_Sa", y, "Xa", "Ta" ),
("Xa", "I_Aa", y, "Xa", "Ta" ),
("Xa", "Ea", y, "Xa", "Ta" ),
("Xa", "I_Pa", (1-y), "Xa", "C" ),
("Xa", "I_Sa", (1-y), "Xa", "C" ),
("Xa", "I_Aa", (1-y), "Xa", "C" ),
("Xa", "Ea", (1-y), "Xa", "C" )]
})
model.set_node_transition_processes([
('E',p['alpha'],'I_P'),
('I_P',(1-p['x'])*p['beta'],'I_S'),
('I_P',p['x']*p['beta'],'I_A'),
('I_A',p['recovery_rate'],'R'),
('I_S',p['recovery_rate'],'R'),
('I_S',kappa,'T'),
('T',p['chi'],'X'),
('Ea',p['alpha'],'I_Pa'),
('I_Pa',(1-p['x'])*p['beta'],'I_Sa'),
('I_Pa',p['x']*p['beta'],'I_Aa'),
('I_Aa',p['recovery_rate'],'Ra'),
('I_Sa',p['recovery_rate'],'Ra'),
('I_Sa',kappa,'Ta'),
('Ta',p["z"]*p['chi'],'Xa'),
('Ta',(1-p["z"])*p['chi'],'X')])
model.set_link_transmission_processes([
('I_Pa','S',p["R0"]/k_norm*p['beta']/2,'I_Pa','E'),
('I_Aa','S',p["R0"]/k_norm*p['recovery_rate']/2,'I_Aa','E'),
('I_Sa','S',p["R0"]/k_norm*p['recovery_rate']/2,'I_Sa','E'),
('I_P','Sa',p["R0"]/k_norm*p['beta']/2,'I_P','Ea'),
('I_A','Sa',p["R0"]/k_norm*p['recovery_rate']/2,'I_A','Ea'),
('I_S','Sa',p["R0"]/k_norm*p['recovery_rate']/2,'I_S','Ea'),
('I_Pa','Sa',p["R0"]/k_norm*p['beta']/2,'I_Pa','Ea'),
('I_Aa','Sa',p["R0"]/k_norm*p['recovery_rate']/2,'I_Aa','Ea'),
('I_Sa','Sa',p["R0"]/k_norm*p['recovery_rate']/2,'I_Sa','Ea'),
('I_P','S',p["R0"]/k_norm*p['beta']/2,'I_P','E'),
('I_A','S',p["R0"]/k_norm*p['recovery_rate']/2,'I_A','E'),
('I_S','S',p["R0"]/k_norm*p['recovery_rate']/2,'I_S','E')])
model.set_network(N, edge_weight_tuples)
del edge_weight_tuples
model.set_random_initial_conditions({ 'Sa': Sa0, 'S': S0, 'I_P': IP0, 'I_Pa': IPa0})
del p
del a
del q
del N
t, result = model.simulate(tmax = time , sampling_dt = sampling_dt)
del model
del t
del time
del sampling_dt
results = max(result['R']),max(result['Ra']),max(result['X']),max(result['Xa']),max(result['C'])
del result
return results
results = mixed(**kwargs)
return results
| 38.320988
| 180
| 0.459837
|
4a00f19273c169b7b7a7d81f38abc2e0d17034bd
| 7,123
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/redis/_help.py
|
AnshulBhl/azure-cli
|
24f7efbe23a9c874ae06e4bfda216d9aa6cbf3a3
|
[
"MIT"
] | 1
|
2021-08-23T19:53:54.000Z
|
2021-08-23T19:53:54.000Z
|
src/azure-cli/azure/cli/command_modules/redis/_help.py
|
amisi01/azure-cli
|
49206c5b12312985b417f557857fa409bf8aed2e
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/redis/_help.py
|
amisi01/azure-cli
|
49206c5b12312985b417f557857fa409bf8aed2e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['redis'] = """
type: group
short-summary: Manage dedicated Redis caches for your Azure applications.
"""
helps['redis create'] = """
type: command
short-summary: Create new Redis Cache instance.
examples:
- name: Create new Redis Cache instance. (autogenerated)
text: az redis create --location westus2 --name MyRedisCache --resource-group MyResourceGroup --sku Basic --vm-size c0
crafted: true
- name: Configure the multiple zones for new Premium Azure Cache for Redis
text: az redis create --location westus2 --name MyRedisCache --resource-group MyResourceGroup --sku Premium --vm-size p1 --zones 1 2
crafted: true
- name: Create a Premium Azure Cache for Redis with clustering enabled
text: az redis create --location westus2 --name MyRedisCache --resource-group MyResourceGroup --sku Premium --vm-size p1 --shard-count 2
crafted: true
- name: Deploying a Premium Azure Cache for Redis inside an existing Azure Virtual Network
text: az redis create --location westus2 --name MyRedisCache --resource-group MyResourceGroup --sku Premium --vm-size p1 --subnet-id "/subscriptions/{subid}/resourceGroups/{resourceGroupName}/providers/Microsoft.{Network|ClassicNetwork}/virtualNetworks/vnet1/subnets/subnet1"
crafted: true
"""
helps['redis export'] = """
type: command
short-summary: Export data stored in a Redis cache.
"""
helps['redis firewall-rules'] = """
type: group
short-summary: Manage Redis firewall rules.
"""
helps['redis firewall-rules create'] = """
type: command
short-summary: Create a redis cache firewall rule.
long-summary: Usage example - az redis firewall-rules create --name testCacheName --resource-group testResourceGroup --start-ip 10.10.10.10 --end-ip 20.20.20.20 --rule-name 10to20
"""
helps['redis firewall-rules update'] = """
type: command
short-summary: Update a redis cache firewall rule.
"""
helps['redis import'] = """
type: command
short-summary: Import data into a Redis cache.
"""
helps['redis list'] = """
type: command
short-summary: List Redis Caches.
long-summary: Lists details about all caches within current Subscription or provided Resource Group.
"""
helps['redis patch-schedule'] = """
type: group
short-summary: Manage Redis patch schedules.
"""
helps['redis patch-schedule create'] = """
type: command
short-summary: Create patching schedule for Redis cache.
long-summary: Usage example - az redis patch-schedule create --name testCacheName --resource-group testResourceGroup --schedule-entries '[{"dayOfWeek":"Tuesday","startHourUtc":"00","maintenanceWindow":"PT5H"}]'
"""
helps['redis patch-schedule update'] = """
type: command
short-summary: Update the patching schedule for Redis cache.
long-summary: Usage example - az redis patch-schedule update --name testCacheName --resource-group testResourceGroup --schedule-entries '[{"dayOfWeek":"Tuesday","startHourUtc":"00","maintenanceWindow":"PT5H"}]'
"""
helps['redis server-link'] = """
type: group
short-summary: Manage Redis server links.
"""
helps['redis server-link create'] = """
type: command
short-summary: Adds a server link to the Redis cache (requires Premium SKU).
long-summary: Usage example - az redis server-link create --name testCacheName --resource-group testResourceGroup --cache-to-link secondTestCacheName --replication-role Secondary
"""
helps['redis update'] = """
type: command
short-summary: Update a Redis cache.
long-summary: Scale or update settings of a Redis cache.
examples:
- name: Update the maxmemory-policy for your Azure Cache for Redis named MyRedisCache
text: az redis update --name MyRedisCache --resource-group MyResourceGroup --set "redisConfiguration.maxmemory-policy"="allkeys-lru"
crafted: true
- name: Disable the RDB back up data persistence for Premium Azure Cache for Redis
text: az redis update --name MyRedisCache --resource-group MyResourceGroup --set "redisConfiguration.rdb-backup-enabled"="false"
crafted: true
- name: Configure the RDB back up enabled data persistence for already created Premium Azure Cache for Redis
text: az redis update --name MyRedisCache --resource-group MyResourceGroup --set "redisConfiguration.rdb-storage-connection-string"="DefaultEndpointsProtocol=https;AccountName=mystorageaccount;AccountKey=myAccountKey;EndpointSuffix=core.windows.net" "redisConfiguration.rdb-backup-enabled"="true" "redisConfiguration.rdb-backup-frequency"="15" "redisConfiguration.rdb-backup-max-snapshot-count"="1"
crafted: true
- name: Scale an Azure Cache for Redis Instance - Update to different size (An example to scale from c0 to c1).
text: az redis update --name MyRedisCache --resource-group MyResourceGroup --set "sku.capacity"="2"
crafted: true
- name: Scale an Azure Cache for Redis Instance - Update to different tier (From Basic to Standard or Standard to Premium).
text: az redis update --name MyRedisCache --resource-group MyResourceGroup --set "sku.name"="Premium" "sku.capacity"="1" "sku.family"="P"
crafted: true
- name: Scale an Azure Cache for Redis Instance - Enable Clustering.
text: az redis update --name MyRedisCache --resource-group MyResourceGroup --set "shardCount"="1"
crafted: true
- name: Scale an Azure Cache for Redis Instance in/out using Redis Cluster.
text: az redis update --name MyRedisCache --resource-group MyResourceGroup --set "shardCount"="2"
crafted: true
"""
helps['redis force-reboot'] = """
type: command
short-summary: Reboot specified Redis node(s).
long-summary: Usage example - az redis force-reboot --name testCacheName --resource-group testResourceGroup --reboot-type {AllNodes, PrimaryNode, SecondaryNode} [--shard-id]
"""
helps['redis import-method'] = """
type: command
short-summary: Import data into Redis cache.
long-summary: Usage example - az redis import-method --name testCacheName --resource-group testResourceGroup --files [--file-format]
"""
helps['redis patch-schedule delete'] = """
type: command
short-summary: Deletes the patching schedule of a redis cache.
long-summary: Usage example - az redis patch-schedule delete --name testCacheName --resource-group testResourceGroup
"""
helps['redis patch-schedule show'] = """
type: command
short-summary: Gets the patching schedule of a redis cache.
long-summary: Usage example - az redis patch-schedule show --name testCacheName --resource-group testResourceGroup [--query-examples]
"""
helps['redis regenerate-keys'] = """
type: command
short-summary: Regenerate Redis cache's access keys.
long-summary: Usage example - az redis regenerate-keys --name testCacheName --resource-group testResourceGroup --key-type {Primary, Secondary}
"""
| 47.486667
| 402
| 0.732557
|
4a00f1a697718e3fc1454210854ebcfed0fb02c1
| 192
|
py
|
Python
|
nyamuk/utils.py
|
MasterScott/nyamuk
|
ac4c6028de288a4c8e0b332ae16eae889deb643d
|
[
"BSD-2-Clause"
] | 49
|
2015-01-27T15:06:31.000Z
|
2022-02-18T13:51:48.000Z
|
nyamuk/utils.py
|
MasterScott/nyamuk
|
ac4c6028de288a4c8e0b332ae16eae889deb643d
|
[
"BSD-2-Clause"
] | 10
|
2015-03-19T13:24:33.000Z
|
2019-03-01T10:06:23.000Z
|
nyamuk/utils.py
|
MasterScott/nyamuk
|
ac4c6028de288a4c8e0b332ae16eae889deb643d
|
[
"BSD-2-Clause"
] | 19
|
2015-01-27T15:13:29.000Z
|
2021-05-23T13:43:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# encode unicode string to utf8
#
def utf8encode(unistr):
if type(unistr) is unicode:
return unistr.encode('utf8')
return unistr
| 17.454545
| 36
| 0.645833
|
4a00f1e4516292a987ecf992719b07f404334f17
| 4,537
|
py
|
Python
|
painless_redirects/middleware-legacy-dry.py
|
benzkji/django-painless-redirects
|
03c6623f11794f7dff6a4a996daca96c8775094c
|
[
"MIT"
] | 1
|
2020-05-30T09:10:50.000Z
|
2020-05-30T09:10:50.000Z
|
painless_redirects/middleware-legacy-dry.py
|
bnzk/django-painless-redirects
|
03c6623f11794f7dff6a4a996daca96c8775094c
|
[
"MIT"
] | 26
|
2017-03-21T10:30:29.000Z
|
2022-03-23T14:36:15.000Z
|
painless_redirects/middleware-legacy-dry.py
|
benzkji/django-painless-redirects
|
03c6623f11794f7dff6a4a996daca96c8775094c
|
[
"MIT"
] | 2
|
2017-03-18T10:48:44.000Z
|
2017-10-10T07:18:54.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from django.conf import settings
from django import http
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.contrib.sites.models import Site
from .models import Redirect
class ForceSiteDomainRedirectMiddleware(object):
"""
redirect to the main domain, if not yet there.
do nothing if settings.DEBUG
"""
def process_request(self, request):
if settings.DEBUG:
return None
host = request.get_host()
site = Site.objects.get_current()
if host == site.domain:
return None
new_uri = '%s://%s%s%s' % (
request.is_secure() and 'https' or 'http',
site.domain,
urlquote(request.path),
len(request.GET) > 0 and '?%s' % request.GET.urlencode() or ''
)
return http.HttpResponsePermanentRedirect(new_uri)
class ManualRedirectMiddleware(object):
def process_request(self, request):
"""
if a domain redirect is found...redirect
mostly used by "domain collectors"
"""
host = request.get_host()
current_site = Site.objects.get_current()
if host == current_site.domain:
return None
redirect = None
# better match?
redirect = Redirect.objects.filter(domain=host, old_path=request.path)
# only domain. redirect anyway!
if not redirect.count():
redirect = Redirect.objects.filter(domain=host)
if redirect.count():
new_uri = redirect[0].redirect_value(request.scheme)
return http.HttpResponsePermanentRedirect(new_uri)
def process_response(self, request, response):
"""
if 404, and there is a redirect...
"""
if response.status_code != 404:
# No need to check for a redirect for non-404 responses.
return response
# TODO: this code looks like debt. not DRY at all
# TODO: force_text ok like this?
current_site = Site.objects.get_current()
current_path = force_text(request.path)
if request.META.get('QUERY_STRING', None):
current_path += '?' + force_text(request.META.get('QUERY_STRING'))
redirect = None
# exact match of path and site. yay.
redirect = Redirect.objects.filter(
old_path=current_path, site=current_site)
# wildcard match, with matching site
if not redirect.count():
remaining_path, rubbish = current_path.rsplit("/", 1)
right_path = ""
while remaining_path:
redirect = Redirect.objects.filter(
old_path=remaining_path + "/", wildcard_match=True,
site=current_site)
if redirect.count():
break
remaining_path, right_side = remaining_path.rsplit("/", 1)
right_path = "%s/%s" % (right_side, right_path)
# exact path match
if not redirect.count():
redirect = Redirect.objects.filter(old_path=current_path, site=None)
# wildcard match
if not redirect.count():
remaining_path, rubbish = current_path.rsplit("/", 1)
right_path = ""
while remaining_path:
redirect = Redirect.objects.filter(
old_path=remaining_path + "/", wildcard_match=True, site=None)
if redirect.count():
break
remaining_path, right_side = remaining_path.rsplit("/", 1)
right_path = '%s/%s' % (right_side, right_path)
if redirect.count():
new_uri = redirect[0].redirect_value(request.scheme)
return http.HttpResponsePermanentRedirect(new_uri)
return response
def _check_for_redirect(self, path, field, **kwargs):
redirect = Redirect.objects.filter(old_path=path, **kwargs)
# wildcard match
if not redirect.count():
remaining_path, rubbish = path.rsplit("/", 1)
right_path = ""
while remaining_path:
redirect = Redirect.objects.filter(
old_path=remaining_path + "/", wildcard_match=True, **kwargs)
if redirect.count():
break
remaining_path, right_side = remaining_path.rsplit("/", 1)
right_path = '%s/%s' % (right_side, right_path)
return redirect
| 39.112069
| 82
| 0.592242
|
4a00f237cafb3642648392a5f7a5b1d8f51fe3c7
| 4,229
|
py
|
Python
|
web/tests/functional/skip/__init__.py
|
ryankurte/codechecker
|
737424ee77c181304f242d5a2adef3e6d9369998
|
[
"Apache-2.0"
] | 1,601
|
2015-07-22T20:01:32.000Z
|
2022-03-31T03:04:36.000Z
|
web/tests/functional/skip/__init__.py
|
ryankurte/codechecker
|
737424ee77c181304f242d5a2adef3e6d9369998
|
[
"Apache-2.0"
] | 2,056
|
2015-07-20T09:39:25.000Z
|
2022-03-31T10:18:08.000Z
|
web/tests/functional/skip/__init__.py
|
ryankurte/codechecker
|
737424ee77c181304f242d5a2adef3e6d9369998
|
[
"Apache-2.0"
] | 288
|
2015-07-15T18:57:18.000Z
|
2022-03-30T13:40:13.000Z
|
# coding=utf-8
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""Setup for the package tests."""
import os
import shutil
import sys
import uuid
from libtest import codechecker
from libtest import env
from libtest import project
TEST_WORKSPACE = None
def setup_package():
"""Setup the environment for the tests."""
global TEST_WORKSPACE
TEST_WORKSPACE = env.get_workspace('skip')
os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE
test_project = 'cpp'
test_config = {}
project_info = project.get_info(test_project)
test_config['test_project'] = project_info
suppress_file = None
# Generate skip list file for the tests.
skip_list_file = os.path.join(TEST_WORKSPACE, 'skip_file')
if os.path.isfile(skip_list_file):
os.remove(skip_list_file)
_generate_skip_list_file(skip_list_file)
test_env = env.test_env(TEST_WORKSPACE)
codechecker_cfg = {
'suppress_file': suppress_file,
'skip_file': skip_list_file,
'check_env': test_env,
'workspace': TEST_WORKSPACE,
'checkers': []
}
ret = project.clean(test_project, test_env)
if ret:
sys.exit(ret)
# Start or connect to the running CodeChecker server and get connection
# details.
print("This test uses a CodeChecker server... connecting...")
server_access = codechecker.start_or_get_server()
server_access['viewer_product'] = 'skip'
codechecker.add_test_package_product(server_access, TEST_WORKSPACE)
# Extend the checker configuration with the server access.
codechecker_cfg.update(server_access)
test_project_name = project_info['name'] + '_' + uuid.uuid4().hex
skip_file = codechecker_cfg.pop('skip_file')
output_dir = codechecker_cfg['reportdir'] \
if 'reportdir' in codechecker_cfg \
else os.path.join(codechecker_cfg['workspace'], 'reports')
codechecker_cfg['reportdir'] = output_dir
# Analyze without skip.
ret = codechecker.log_and_analyze(codechecker_cfg,
project.path(test_project))
if ret:
print("Analyzing the test project without a skip file failed.")
sys.exit(1)
codechecker_cfg['skip_file'] = skip_file
# Analyze with skip.
ret = codechecker.log_and_analyze(codechecker_cfg,
project.path(test_project))
if ret:
print("Analyzing the test project with a skip file failed.")
sys.exit(1)
ret = codechecker.store(codechecker_cfg,
test_project_name)
if ret:
print("Storing the results failed.")
sys.exit(1)
codechecker_cfg['run_names'] = [test_project_name]
test_config['codechecker_cfg'] = codechecker_cfg
env.export_test_cfg(TEST_WORKSPACE, test_config)
def teardown_package():
"""Clean up after the test."""
# TODO: If environment variable is set keep the workspace
# and print out the path.
global TEST_WORKSPACE
check_env = env.import_test_cfg(TEST_WORKSPACE)[
'codechecker_cfg']['check_env']
codechecker.remove_test_package_product(TEST_WORKSPACE, check_env)
print("Removing: " + TEST_WORKSPACE)
shutil.rmtree(TEST_WORKSPACE, ignore_errors=True)
def _generate_skip_list_file(skip_list_file):
"""
Generate skip list file.
file_to_be_skipped.cpp is a valid file in the cpp project
with bugs in it.
"""
skip_list_content = ["-*randtable.c", "-*blocksort.c", "-*huffman.c",
"-*decompress.c", "-*crctable.c",
"-*file_to_be_skipped.cpp", "-*path_end.h",
"-*skip.h"]
print('Skip list file content: ' + skip_list_file)
print('\n'.join(skip_list_content))
s_file = open(skip_list_file, 'w', encoding="utf-8", errors="ignore")
for k in skip_list_content:
s_file.write(k + '\n')
s_file.close()
| 28.768707
| 75
| 0.641996
|
4a00f36bdd723f4abaf0c2dafd240e7ad0ffe2fd
| 115
|
py
|
Python
|
app/orders/apps.py
|
Zed-chi/Shultais_Django_introduction
|
875c7f309083dc6c92eba275d86779e83a34830e
|
[
"MIT"
] | 1
|
2020-05-28T17:04:56.000Z
|
2020-05-28T17:04:56.000Z
|
app/orders/apps.py
|
Zed-chi/Shultais_Django_introduction
|
875c7f309083dc6c92eba275d86779e83a34830e
|
[
"MIT"
] | null | null | null |
app/orders/apps.py
|
Zed-chi/Shultais_Django_introduction
|
875c7f309083dc6c92eba275d86779e83a34830e
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class OrdersConfig(AppConfig):
name = "orders"
verbose_name = "Заказы"
| 16.428571
| 33
| 0.721739
|
4a00f42812f589d83777f0fb6367f01335a2e638
| 1,935
|
py
|
Python
|
setup.py
|
fredhallgren/nystrompca
|
7f8923af08551ad477a446c383822b555326f4bf
|
[
"Apache-2.0"
] | 4
|
2021-09-14T08:46:10.000Z
|
2021-10-31T09:44:06.000Z
|
setup.py
|
fredhallgren/nystrompca
|
7f8923af08551ad477a446c383822b555326f4bf
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
fredhallgren/nystrompca
|
7f8923af08551ad477a446c383822b555326f4bf
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
with open("requirements.txt") as f:
requirements_list = f.read().split('\n')
setup(name = "nystrompca",
version = "1.0.2",
description = "Kernel PCA with the Nyström method",
author = "Fredrik Hallgren",
author_email = "fredrik.hallgren@ucl.ac.uk",
url = "https://github.com/fredhallgren/nystrompca",
packages = find_packages(),
long_description = """This package implements an efficient non-linear PCA by combining kernel PCA with the Nyström randomized subsampling method, as well as a confidence bound on the accuracy of the method.\n\nPlease see www.github.com/fredhallgren/nystrompca for further details. """,
long_description_content_type="text/plain",
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence"],
python_requires = ">=3.6",
install_requires = requirements_list,
include_package_data = True,
data_files = [('data', ['nystrompca/data/magic_gamma_telescope.dat',
'nystrompca/data/yeast.dat',
'nystrompca/data/cardiotocography.dat',
'nystrompca/data/segmentation.dat',
'nystrompca/data/nips.dat',
'nystrompca/data/drug.dat',
'nystrompca/data/dailykos.dat',
'nystrompca/data/airfoil.dat'])],
entry_points = {
'console_scripts': [
'nystrompca=nystrompca.experiments.console_script:main']})
| 47.195122
| 295
| 0.57261
|
4a00f51c296577000f07d8ff66f0b05e1d95e269
| 1,161
|
py
|
Python
|
bayesian/examples/gaussian_bayesian_networks/river.py
|
Ecotrust/bayesian-belief-networks
|
9bf4a5c75789204ea8d83e83bd410f1716cd81e0
|
[
"Apache-2.0"
] | 1
|
2015-10-18T22:45:39.000Z
|
2015-10-18T22:45:39.000Z
|
bayesian/examples/gaussian_bayesian_networks/river.py
|
Ecotrust/bayesian-belief-networks
|
9bf4a5c75789204ea8d83e83bd410f1716cd81e0
|
[
"Apache-2.0"
] | null | null | null |
bayesian/examples/gaussian_bayesian_networks/river.py
|
Ecotrust/bayesian-belief-networks
|
9bf4a5c75789204ea8d83e83bd410f1716cd81e0
|
[
"Apache-2.0"
] | null | null | null |
'''Simple Example Using Gaussian Variables'''
from bayesian.gaussian_bayesian_network import gaussian, conditional_gaussian
from bayesian.gaussian_bayesian_network import build_graph
from bayesian.utils import shrink_matrix
'''
This example comes from page 3 of
http://people.cs.aau.dk/~uk/papers/castillo-kjaerulff-03.pdf
Note that to create a Guassian Node
we supply mean and standard deviation,
this differs from the example in the
above paper which uses variance (=std. dev.) ** 2
Note in the paper they specify variance,
wheres as this example we are using std. dev.
instead hence for A the variance is 4 and std_dev is 2.
'''
@gaussian(3, 2)
def f_a(a):
'''represents point A in the river system'''
pass
@conditional_gaussian(1, 1, 1)
def f_b(a, b):
'''Point b is a conditional Guassian
with parent a.
'''
pass
@conditional_gaussian(3, 2, 2)
def f_c(a, c):
'''Point c is a conditional Guassian
with parent a'''
pass
@conditional_gaussian(1, 1, betas=dict(b=1, c=1))
def f_d(b, c, d):
pass
if __name__ == '__main__':
g = build_graph(
f_a,
f_b,
f_c,
f_d)
g.q()
| 20.732143
| 77
| 0.6882
|
4a00f786e0478c7f51698ee68a355e4f9712b062
| 920
|
py
|
Python
|
gkmerge/data_tools/data_analysis.py
|
ISCOTTYI/gk-merge
|
d12421c23ea2d28f5f2bf7985c54b1e3f062cdbc
|
[
"MIT"
] | null | null | null |
gkmerge/data_tools/data_analysis.py
|
ISCOTTYI/gk-merge
|
d12421c23ea2d28f5f2bf7985c54b1e3f062cdbc
|
[
"MIT"
] | null | null | null |
gkmerge/data_tools/data_analysis.py
|
ISCOTTYI/gk-merge
|
d12421c23ea2d28f5f2bf7985c54b1e3f062cdbc
|
[
"MIT"
] | null | null | null |
from gkmerge.data_tools.util import *
def contagion_extend(df_or_af_lst, cascade_threshold):
"""
For a given list of defaulted fractions from various simulation runs
calculates the mean contagion extend where a global cascade is defined
accoring to cadcade_threshold.
"""
return filtered_mean(df_or_af_lst, lambda df: df > cascade_threshold)
def cascade_steps(steps_lst, df_or_af_lst, cascade_threshold):
return externally_filtered_mean(steps_lst, df_or_af_lst, lambda df: df > cascade_threshold)
def contagion_frequency(df_or_af_lst, cascade_threshold):
"""
For a given list of defaulted fractions from various simulation runs
calculates the contagion frequency among the runs, where a global cascade
is defined accoring to cadcade_threshold.
"""
return fraction(df_or_af_lst, lambda df: df > cascade_threshold)
def mean_degree(z_lst):
return mean(z_lst)
| 32.857143
| 95
| 0.769565
|
4a00f7baab9fc54a99bfd240dd2b34442dc73e6e
| 16,153
|
py
|
Python
|
nobrainer/dataset.py
|
kaczmarj/nobrainer
|
c1b17831a0e816d19ed79dbf620401f989f13bc2
|
[
"Apache-2.0"
] | 17
|
2018-03-19T03:13:53.000Z
|
2019-03-27T11:10:55.000Z
|
nobrainer/dataset.py
|
kaczmarj/nobrainer
|
c1b17831a0e816d19ed79dbf620401f989f13bc2
|
[
"Apache-2.0"
] | 29
|
2018-02-08T14:49:06.000Z
|
2019-03-19T21:03:58.000Z
|
nobrainer/dataset.py
|
kaczmarj/nobrainer
|
c1b17831a0e816d19ed79dbf620401f989f13bc2
|
[
"Apache-2.0"
] | 12
|
2018-01-29T20:36:31.000Z
|
2019-03-25T22:52:09.000Z
|
"""Methods for creating `tf.data.Dataset` objects."""
import math
import os
from pathlib import Path
import fsspec
import nibabel as nb
import numpy as np
import tensorflow as tf
from .io import _is_gzipped, verify_features_labels
from .tfrecord import _labels_all_scalar, parse_example_fn, write
from .volume import binarize, replace, standardize, to_blocks
AUTOTUNE = tf.data.experimental.AUTOTUNE
def tfrecord_dataset(
file_pattern,
volume_shape,
shuffle,
scalar_label,
compressed=True,
num_parallel_calls=AUTOTUNE,
):
"""Return `tf.data.Dataset` from TFRecord files."""
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=shuffle)
# Read each of these files as a TFRecordDataset.
# Assume all files have same compression type as the first file.
compression_type = "GZIP" if compressed else None
cycle_length = 1 if num_parallel_calls is None else num_parallel_calls
parse_fn = parse_example_fn(volume_shape=volume_shape, scalar_label=scalar_label)
if not shuffle:
# Determine examples_per_shard from the first TFRecord shard
# Then set block_length to equal the number of examples per shard
# so that the interleave method does not inadvertently shuffle data.
first_shard = (
dataset.take(1)
.flat_map(
lambda x: tf.data.TFRecordDataset(x, compression_type=compression_type)
)
.map(map_func=parse_fn, num_parallel_calls=num_parallel_calls)
)
block_length = len([0 for _ in first_shard])
else:
# If the dataset is being shuffled, then we don't care if interleave
# further shuffles that data even further
block_length = None
dataset = dataset.interleave(
map_func=lambda x: tf.data.TFRecordDataset(
x, compression_type=compression_type
),
cycle_length=cycle_length,
block_length=block_length,
num_parallel_calls=num_parallel_calls,
)
dataset = dataset.map(map_func=parse_fn, num_parallel_calls=num_parallel_calls)
return dataset
def get_dataset(
file_pattern,
n_classes,
batch_size,
volume_shape,
scalar_label=False,
block_shape=None,
n_epochs=None,
mapping=None,
augment=None,
normalizer=standardize,
shuffle_buffer_size=None,
num_parallel_calls=AUTOTUNE,
):
"""Return `tf.data.Dataset` that preprocesses data for training or prediction.
Labels are preprocessed for binary or multiclass segmentation according to
`n_classes`.
Parameters
----------
file_pattern: str, expression that can be globbed to get TFRecords files
for this dataset. For example 'data/training_*.tfrecords'.
n_classes: int, number of classes to segment. Values of 1 and 2 indicate
binary segmentation (foreground vs background), and values greater than
2 indicate multiclass segmentation.
batch_size: int, number of elements per batch.
volume_shape: tuple of at least length 3, the shape of every volume in the TFRecords
files. Every volume must have the same shape.
scalar_label: boolean, if `True`, labels are scalars.
block_shape: tuple of at least length 3, the shape of the non-overlapping sub-volumes
to take from the full volumes. If None, do not separate the full volumes
into sub-volumes. Separating into non-overlapping sub-volumes is useful
(sometimes even necessary) to overcome memory limitations depending on
the number of model parameters.
n_epochs: int, number of epochs for the dataset to repeat. If None, the
dataset will be repeated indefinitely.
mapping: dict, mapping to replace label values. Values equal to a key in
the mapping are replaced with the corresponding values in the mapping.
Values not in `mapping.keys()` are replaced with zeros.
augment: None, or list of different transforms in the executable sequence
the corresponding arguments in tuple as e.g.:
[(addGaussianNoise, {'noise_mean':0.1,'noise_std':0.5}), (...)]
normalizer: callable, applies this normalization function when creating the
dataset. to maintain compatibility with prior nobrainer release, this is
set to standardize by default.
shuffle_buffer_size: int, buffer of full volumes to shuffle. If this is not
None, then the list of files found by 'file_pattern' is also shuffled
at every iteration.
num_parallel_calls: int, number of parallel calls to make for data loading
and processing.
Returns
-------
`tf.data.Dataset` of features and labels. If block_shape is not None, the
shape of features is `(batch_size, *block_shape, 1)` and the shape of labels
is `(batch_size, *block_shape, n_classes)`. If block_shape is None, then
the shape of features is `(batch_size, *volume_shape, 1)` and the shape of
labels is `(batch_size, *volume_shape, n_classes)`. If `scalar_label` is `True,
the shape of labels is always `(batch_size,)`.
"""
fs, _, _ = fsspec.get_fs_token_paths(file_pattern)
files = fs.glob(file_pattern)
if not files:
raise ValueError("no files found for pattern '{}'".format(file_pattern))
# Create dataset of all TFRecord files. After this point, the dataset will have
# two value per iteration: (feature, label).
shuffle = bool(shuffle_buffer_size)
compressed = _is_gzipped(files[0], filesys=fs)
dataset = tfrecord_dataset(
file_pattern=file_pattern,
volume_shape=volume_shape,
shuffle=shuffle,
scalar_label=scalar_label,
compressed=compressed,
num_parallel_calls=num_parallel_calls,
)
if normalizer is not None:
# Standard-score the features.
dataset = dataset.map(lambda x, y: (normalizer(x), y))
# Augment examples if requested.
if isinstance(augment, bool):
raise ValueError("Augment no longer supports a boolean expression")
if augment is not None:
for transform, kwargs in augment:
dataset = dataset.map(
lambda x, y: tf.cond(
tf.random.uniform((1,)) > 0.5,
true_fn=lambda: transform(x, y, **kwargs),
false_fn=lambda: (x, y),
),
num_parallel_calls=num_parallel_calls,
)
# Separate into blocks, if requested.
if block_shape is not None:
if not scalar_label:
dataset = dataset.map(
lambda x, y: (to_blocks(x, block_shape), to_blocks(y, block_shape)),
num_parallel_calls=num_parallel_calls,
)
# This step is necessary because separating into blocks adds a dimension.
dataset = dataset.unbatch()
if scalar_label:
def _f(x, y):
x = to_blocks(x, block_shape)
n_blocks = x.shape[0]
y = tf.repeat(y, n_blocks)
return (x, y)
dataset = dataset.map(_f, num_parallel_calls=num_parallel_calls)
# This step is necessary because separating into blocks adds a dimension.
dataset = dataset.unbatch()
else:
if scalar_label:
dataset = dataset.map(lambda x, y: (x, tf.squeeze(y)))
# Binarize or replace labels according to mapping.
if not scalar_label:
if n_classes < 1:
raise ValueError("n_classes must be > 0.")
elif n_classes == 1:
dataset = dataset.map(lambda x, y: (x, tf.expand_dims(binarize(y), -1)))
elif n_classes == 2:
dataset = dataset.map(lambda x, y: (x, tf.one_hot(binarize(y), n_classes)))
elif n_classes > 2:
if mapping is not None:
dataset = dataset.map(lambda x, y: (x, replace(y, mapping=mapping)))
dataset = dataset.map(lambda x, y: (x, tf.one_hot(y, n_classes)))
# If volume_shape is only three dims, add grayscale channel to features.
# Otherwise, assume that the channels are already in the features.
if len(volume_shape) == 3:
dataset = dataset.map(lambda x, y: (tf.expand_dims(x, -1), y))
# Prefetch data to overlap data production with data consumption. The
# TensorFlow documentation suggests prefetching `batch_size` elements.
dataset = dataset.prefetch(buffer_size=batch_size)
# Batch the dataset, so each iteration gives `batch_size` elements. We drop
# the remainder so that when training on multiple GPUs, the batch will
# always be evenly divisible by the number of GPUs. Otherwise, the last
# batch might have fewer than `batch_size` elements and will cause errors.
if batch_size is not None:
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
# Optionally shuffle. We also optionally shuffle the list of files.
# The TensorFlow recommend shuffling and then repeating.
if shuffle_buffer_size:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
# Repeat the dataset for n_epochs. If n_epochs is None, then repeat
# indefinitely. If n_epochs is 1, then the dataset will only be iterated
# through once.
dataset = dataset.repeat(n_epochs)
return dataset
def get_steps_per_epoch(n_volumes, volume_shape, block_shape, batch_size):
def get_n(a, k):
return (a - k) / k + 1
n_blocks = tuple(get_n(aa, kk) for aa, kk in zip(volume_shape, block_shape))
for n in n_blocks:
if not n.is_integer() or n < 1:
raise ValueError(
"cannot create non-overlapping blocks with the given parameters."
)
n_blocks_per_volume = np.prod(n_blocks).astype(int)
steps = n_blocks_per_volume * n_volumes / batch_size
steps = math.ceil(steps)
return steps
def write_multi_resolution(
paths,
tfrecdir=Path(os.getcwd()) / "data",
resolutions=None,
shard_size=3,
n_processes=1,
):
resolutions = resolutions or [8, 16, 32, 64, 128, 256]
tfrecdir = Path(tfrecdir)
tfrecdir.mkdir(exist_ok=True, parents=True)
template = tfrecdir / "data-train_shard-{shard:03d}.tfrec"
write(
features_labels=paths,
filename_template=str(template),
examples_per_shard=shard_size, # change for larger dataset
multi_resolution=True,
resolutions=resolutions,
processes=n_processes,
)
datasets = {}
for resolution in resolutions:
datasets[resolution] = dict(
file_pattern=str(tfrecdir / f"*res-{resolution:03d}.tfrec"),
batch_size=1,
normalizer=None,
)
return datasets
class Dataset:
"""Represent datasets for training, and validation"""
def __init__(
self, n_classes, batch_size, block_shape, volume_shape=None, n_epochs: int = 1
):
self.n_classes = n_classes
self.volume_shape = volume_shape
self.block_shape = block_shape
self.batch_size = batch_size
self.n_epochs = n_epochs
def nbd_from_tfrec(
self,
volume_shape,
scalar_labels,
n_volumes,
template="data/data-train_shard-*.tfrec",
augment=None,
shuffle_buffer_size=None,
num_parallel_calls=1,
):
"""Function to retrieve a saved tf record
template: str, the path to which TFRecord files should be written.
num_parallel_calls: int, number of processes to use for multiprocessing. If
None, will use all available processes.
"""
self.volume_shape = volume_shape
# replace shard formatting code with * for globbing
dataset = get_dataset(
file_pattern=template,
n_classes=self.n_classes,
batch_size=self.batch_size,
volume_shape=self.volume_shape,
block_shape=self.block_shape,
n_epochs=self.n_epochs,
augment=augment,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_calls=num_parallel_calls,
)
# Add nobrainer specific attributes
dataset.scalar_labels = scalar_labels
dataset.n_volumes = n_volumes
dataset.volume_shape = self.volume_shape
return dataset
def to_nbd(
self,
paths,
eval_size=0.1,
tfrecdir=Path(os.getcwd()) / "data",
shard_size=3,
augment=None,
shuffle_buffer_size=None,
num_parallel_calls=1,
check_shape=True,
check_labels_int=False,
check_labels_gte_zero=False,
):
"""
template: str, the path to which TFRecord files should be written. A string
formatting key `shard` should be included to indicate the unique TFRecord file
when writing to multiple TFRecord files. For example,
`data_shard-{shard:03d}.tfrec`.
shard_size: int, number of pairs of `(feature, label)` per TFRecord file.
check_shape: boolean, if true, validate that the shape of both volumes is
equal to 'volume_shape'.
check_labels_int: boolean, if true, validate that every labels volume is an
integer type or can be safely converted to an integer type.
check_labels_gte_zero: boolean, if true, validate that every labels volume
has values greater than or equal to zero.
num_parallel_calls: int, number of processes to use for multiprocessing. If
None, will use all available processes.
"""
# Test that the `filename_template` has a `shard` formatting key.
template = str(Path(tfrecdir) / "data-{intent}")
shard_ext = "shard-{shard:03d}.tfrec"
Neval = np.ceil(len(paths) * eval_size).astype(int)
Ntrain = len(paths) - Neval
verify_result = verify_features_labels(
paths,
check_shape=check_shape,
check_labels_int=check_labels_int,
check_labels_gte_zero=check_labels_gte_zero,
)
if len(verify_result) == 0:
Path(tfrecdir).mkdir(exist_ok=True, parents=True)
if self.volume_shape is None:
self.volume_shape = nb.load(paths[0][0]).shape
write(
features_labels=paths[:Ntrain],
filename_template=template.format(intent=f"train_{shard_ext}"),
examples_per_shard=shard_size,
processes=num_parallel_calls,
)
if Neval > 0:
write(
features_labels=paths[Ntrain:],
filename_template=template.format(intent=f"eval_{shard_ext}"),
examples_per_shard=shard_size,
processes=num_parallel_calls,
)
labels = (y for _, y in paths)
scalar_labels = _labels_all_scalar(labels)
# replace shard formatting code with * for globbing
template_train = template.format(intent="train_*.tfrec")
ds_train = self.nbd_from_tfrec(
self.volume_shape,
scalar_labels,
len(paths[:Ntrain]),
template=template_train,
augment=augment,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_calls=num_parallel_calls,
)
ds_eval = None
if Neval > 0:
template_eval = template.format(intent="eval_*.tfrec")
ds_eval = self.nbd_from_tfrec(
self.volume_shape,
scalar_labels,
len(paths[Ntrain:]),
template=template_eval,
augment=None,
shuffle_buffer_size=None,
num_parallel_calls=num_parallel_calls,
)
return ds_train, ds_eval
raise ValueError(
"Provided paths did not pass validation. Please "
"check that they have the same shape, and the "
"targets have appropriate labels"
)
| 38.922892
| 90
| 0.642977
|
4a00f853b1d03eb38845603319bf1209e0320898
| 3,669
|
py
|
Python
|
tests/test_stream2.py
|
dfeeley/alpaca-trade-api-python
|
392436697231f971d1dc83d4ee5c07ac84c23ff3
|
[
"Apache-2.0"
] | 2
|
2020-02-15T22:03:20.000Z
|
2021-04-09T16:30:14.000Z
|
tests/test_stream2.py
|
dfeeley/alpaca-trade-api-python
|
392436697231f971d1dc83d4ee5c07ac84c23ff3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_stream2.py
|
dfeeley/alpaca-trade-api-python
|
392436697231f971d1dc83d4ee5c07ac84c23ff3
|
[
"Apache-2.0"
] | null | null | null |
from alpaca_trade_api.stream2 import StreamConn
from alpaca_trade_api.entity import Account
from alpaca_trade_api.polygon.entity import Entity as PolyEntity
import asyncio
import json
import pytest
from unittest import mock
def AsyncMock(*args, **kwargs):
"""Create an async function mock."""
m = mock.MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
@pytest.fixture
def websockets():
with mock.patch('alpaca_trade_api.stream2.websockets') as websockets:
yield websockets
def _run(coro):
return asyncio.get_event_loop().run_until_complete(coro)
def test_stream(websockets):
# _connect
connect = AsyncMock()
websockets.connect = connect
ws = connect.mock()
ws.send = AsyncMock()
ws.recv = AsyncMock(return_value=json.dumps({
'stream': 'authentication',
'data': {
'status': 'authorized',
}
}).encode())
conn = StreamConn('key-id', 'secret-key')
conn._consume_msg = AsyncMock()
@conn.on('authorized')
async def on_auth(conn, stream, msg):
on_auth.msg = msg
_run(conn._connect())
assert on_auth.msg.status == 'authorized'
assert conn._consume_msg.mock.called
conn.deregister('authorized')
assert len(conn._handlers) == 0
with pytest.raises(ValueError):
conn.register('nonasync', lambda x: x)
# _consume_msg
conn = StreamConn('key-id', 'secret-key')
ws = mock.Mock()
conn._ws = ws
ws.recv = AsyncMock(return_value=json.dumps({
'stream': 'raise',
'data': {
'key': 'value',
}
}))
ws.close = AsyncMock()
class TestException(Exception):
pass
@conn.on('raise')
async def on_raise(conn, stream, msg):
raise TestException()
with pytest.raises(TestException):
_run(conn._consume_msg())
assert ws.close.mock.called
# _ensure_nats
conn = StreamConn('key-id', 'secret-key')
with mock.patch('alpaca_trade_api.stream2.polygon') as polygon:
polygon.Stream().connect = AsyncMock()
_run(conn._ensure_nats())
assert conn.polygon is not None
assert conn.polygon.connect.mock.called
# _ensure_ws
conn = StreamConn('key-id', 'secret-key')
conn._connect = AsyncMock()
_run(conn._ensure_ws())
assert conn._connect.mock.called
assert conn._ws is not None
# subscribe
conn = StreamConn('key-id', 'secret-key')
conn._ensure_ws = AsyncMock()
conn._ws = mock.Mock()
conn._ws.send = AsyncMock()
conn._ensure_nats = AsyncMock()
conn.polygon = mock.Mock()
conn.polygon.subscribe = AsyncMock()
_run(conn.subscribe(['Q.*', 'account_updates']))
assert conn._ws.send.mock.called
assert conn.polygon.subscribe.mock.called
# close
conn = StreamConn('key-id', 'secret-key')
conn._ws = mock.Mock()
conn._ws.close = AsyncMock()
conn.polygon = mock.Mock()
conn.polygon.close = AsyncMock()
_run(conn.close())
assert conn._ws.close.mock.called
assert conn.polygon.close.mock.called
# _cast
conn = StreamConn('key-id', 'secret-key')
ent = conn._cast('account_updates', {})
assert isinstance(ent, Account)
ent = conn._cast('other', {'key': 'value'})
assert ent.key == 'value'
# _dispatch_nats
conn = StreamConn('key-id', 'secret-key')
@conn.on('^Q.')
async def on_q(conn, subject, data):
on_q.data = data
_run(conn._dispatch_nats(conn, 'Q.SPY', PolyEntity({'key': 'value'})))
assert on_q.data.key == 'value'
| 26.586957
| 74
| 0.635323
|
4a00f92a4ec8400b3719c9cc287ee960c480b0a9
| 5,462
|
py
|
Python
|
Wrappers/Python/ccpi/optimisation/operators/MaskOperator.py
|
KrisThielemans/CCPi-Framework
|
fed2e86a65fa67aa89d0a690438bf76f4828266e
|
[
"Apache-2.0"
] | null | null | null |
Wrappers/Python/ccpi/optimisation/operators/MaskOperator.py
|
KrisThielemans/CCPi-Framework
|
fed2e86a65fa67aa89d0a690438bf76f4828266e
|
[
"Apache-2.0"
] | null | null | null |
Wrappers/Python/ccpi/optimisation/operators/MaskOperator.py
|
KrisThielemans/CCPi-Framework
|
fed2e86a65fa67aa89d0a690438bf76f4828266e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# CCP in Tomographic Imaging (CCPi) Core Imaging Library (CIL).
# Copyright 2017-2020 UKRI-STFC
# Copyright 2017-2020 University of Manchester
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from ccpi.optimisation.operators import DiagonalOperator
class MaskOperator(DiagonalOperator):
r'''MaskOperator: D: X -> X, takes in a DataContainer or subclass
thereof, mask, with True or 1.0 representing a value to be
kept and False or 0.0 a value to be lost/set to zero. Maps an element of
:math:`x\in X` onto the element :math:`y \in X, y = mask*x`,
where * denotes elementwise multiplication.
:param mask: DataContainer of datatype bool or with 1/0 elements
'''
def __init__(self, mask):
# Special case of DiagonalOperator which is the superclass of
# MaskOperator, so simply instanciate a DiagonalOperator with mask.
super(MaskOperator, self).__init__(mask)
self.mask = self.diagonal
if __name__ == '__main__':
import matplotlib.pyplot as plt
from ccpi.optimisation.algorithms import PDHG
from ccpi.optimisation.operators import BlockOperator, Gradient
from ccpi.optimisation.functions import ZeroFunction, L1Norm, \
MixedL21Norm, BlockFunction, L2NormSquared,\
KullbackLeibler
from ccpi.framework import TestData
import os
import sys
# Specify which which type of noise to use.
which_noise = 0
print ("which_noise ", which_noise)
# Load in test image
loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
data = loader.load(TestData.SHAPES)
ig = data.geometry
ag = ig
# Create mask with four rectangles to be masked, set up MaskOperator
mask = ig.allocate(True,dtype=np.bool)
amask = mask.as_array()
amask[140:160,10:90] = False
amask[70:130,140:160] = False
amask[15:50,180:240] = False
amask[95:105,180:295] = False
MO = MaskOperator(mask)
# Create noisy and masked data: First add noise, then mask the image with
# MaskOperator.
noises = ['gaussian', 'poisson', 's&p']
noise = noises[which_noise]
if noise == 's&p':
n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2)
elif noise == 'poisson':
scale = 5
n1 = TestData.random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale
elif noise == 'gaussian':
n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10)
else:
raise ValueError('Unsupported Noise ', noise)
noisy_data = ig.allocate()
noisy_data.fill(n1)
noisy_data = MO.direct(noisy_data)
# Regularisation Parameter depending on the noise distribution
if noise == 's&p':
alpha = 0.8
elif noise == 'poisson':
alpha = 1.0
elif noise == 'gaussian':
alpha = .3
# Choose data fidelity dependent on noise type.
if noise == 's&p':
f2 = L1Norm(b=noisy_data)
elif noise == 'poisson':
f2 = KullbackLeibler(noisy_data)
elif noise == 'gaussian':
f2 = 0.5 * L2NormSquared(b=noisy_data)
# Create operators
op1 = Gradient(ig, correlation=Gradient.CORRELATION_SPACE)
op2 = MO
# Create BlockOperator
operator = BlockOperator(op1, op2, shape=(2,1) )
# Create functions
f = BlockFunction(alpha * MixedL21Norm(), f2)
g = ZeroFunction()
# Compute operator Norm
normK = operator.norm()
# Primal & dual stepsizes
sigma = 1
tau = 1/(sigma*normK**2)
# Setup and run the PDHG algorithm
pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
pdhg.max_iteration = 2000
pdhg.update_objective_interval = 100
pdhg.run(2000)
# Show results
plt.figure(figsize=(20,5))
plt.subplot(1,4,1)
plt.imshow(data.as_array())
plt.title('Ground Truth')
plt.colorbar()
plt.subplot(1,4,2)
plt.imshow(noisy_data.as_array())
plt.title('Noisy Data')
plt.colorbar()
plt.subplot(1,4,3)
plt.imshow(pdhg.get_output().as_array())
plt.title('TV Reconstruction')
plt.colorbar()
plt.subplot(1,4,4)
plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), data.as_array()[int(ig.shape[0]/2),:], label = 'GTruth')
plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), noisy_data.as_array()[int(ig.shape[0]/2),:], label = 'Noisy and masked')
plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output().as_array()[int(ig.shape[0]/2),:], label = 'TV reconstruction')
plt.legend()
plt.title('Middle Line Profiles')
plt.show()
| 34.352201
| 133
| 0.641706
|
4a00f9509e7fcd255bdc819a8a8c71fafe79d788
| 18,536
|
py
|
Python
|
demisto_sdk/tests/constants_test.py
|
kfirstri/demisto-sdk
|
59d99cf4b5016be8a4a333c2541418e1612549e1
|
[
"MIT"
] | null | null | null |
demisto_sdk/tests/constants_test.py
|
kfirstri/demisto-sdk
|
59d99cf4b5016be8a4a333c2541418e1612549e1
|
[
"MIT"
] | null | null | null |
demisto_sdk/tests/constants_test.py
|
kfirstri/demisto-sdk
|
59d99cf4b5016be8a4a333c2541418e1612549e1
|
[
"MIT"
] | null | null | null |
import demisto_sdk.commands.common.constants as constants
import pytest
from demisto_sdk.commands.common.legacy_git_tools import git_path
GIT_ROOT = "{}".format(git_path())
INVALID_PLAYBOOK_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/Playbooks.playbook-invalid.yml"
VALID_TEST_PLAYBOOK_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/Playbooks.playbook-test.yml"
VALID_BETA_PLAYBOOK_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/beta-playbook-valid.yml"
VALID_PLAYBOOK_ARCSIGHT_ADD_DOMAIN_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/Playbooks." \
f"playbook-ArcSight_Add_Domain_Indicators.yml"
INVALID_INTEGRATION_NO_TESTS = f'{GIT_ROOT}/demisto_sdk/tests/test_files/non-valid-integration-no-test-playbooks.yml'
INVALID_INTEGRATION_NON_CONFIGURED_TESTS = f'{GIT_ROOT}/demisto_sdk/tests/test_files/' \
f'non-valid-integration-test-not-configured.yml'
TEST_PLAYBOOK = f'{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-TestPlaybooks.yml'
VALID_PYTHON_INTEGRATION_TEST_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration_test.py"
VALID_PYTHON_INTEGRATION_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-test.py"
VALID_METADATA1_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/1.pack_metadata.json'
VALID_METADATA2_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/2.pack_metadata.json'
VALID_DESCRIPTION_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/integration-test_description.md'
VALID_README_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/integration-test_README.md'
VALID_IMAGE_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/integration-test_image.png'
NOT_VALID_IMAGE_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/default.png'
VALID_PIPEFILE_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/Pipfile'
VALID_PIPEFILE_LOCK_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/Pipfile.lock'
VALID_PACK_IGNORE_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/.pack-ignore'
VALID_SECRETS_IGNORE_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/.secrets-ignore'
VALID_CLASSIFIER_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/classifier.json'
VALID_JSON_FILE_FOR_UNIT_TESTING = f'{GIT_ROOT}/demisto_sdk/tests/test_files/fake_pack/Integrations/' \
f'test_data/results.json'
VALID_DOC_FILES_PATH_FOR_UNIT_TESTING = f"{GIT_ROOT}/demisto_sdk/tests/test_files/content_slim/Packs/Sample01/" \
f"doc_files/sample_packs.png"
VALID_INTEGRATION_TEST_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-test.yml"
INVALID_INTEGRATION_WITH_NO_TEST_PLAYBOOK = 'demisto_sdk/tests/test_files/integration-test-with-no-test-playbook.yml'
VALID_INTEGRATION_ID_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-valid-id-test.yml"
INVALID_INTEGRATION_ID_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-invalid-id-test.yml"
VALID_BETA_INTEGRATION_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-test-beta.yml"
INVALID_PLAYBOOK_PATH_FROM_ROOT = f"{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-disconnected_from_root.yml"
VALID_PLAYBOOK_ID_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-valid-id-test.yml"
INVALID_PLAYBOOK_ID_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-invalid-id-test.yml"
INVALID_PLAYBOOK_CONDITION_1 = f"{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-Invalid_condition_unhandled_" \
f"branch.yml"
INVALID_IGNORED_UNIFIED_INTEGRATION = f'{GIT_ROOT}/demisto_sdk/tests/test_files/integration_ignored_invalid_unified.yml'
IGNORED_PNG = f'{GIT_ROOT}/demisto_sdk/tests/test_files/docs_test/closing_params.png'
SCRIPT_WITH_PLAYBOOK = 'demisto_sdk/tests/test_files/script-with-test-playbook.yml'
INVALID_PLAYBOOK_CONDITION_2 = f"{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-Invalid_condition_unhandled_" \
f"branch_and_unhandled_condition.yml"
VALID_PLAYBOOK_CONDITION = f"{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-valid_condition.yml"
VALID_REPUTATION_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/reputations-valid.json"
INVALID_REPUTATION_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/reputations-invalid.json"
VALID_LAYOUT_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/layout-valid.json"
INVALID_LAYOUT_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/layout-invalid.json"
VALID_LAYOUT_CONTAINER_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/layoutscontainer_valid.json"
INVALID_LAYOUT_CONTAINER_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/layoutscontainer_invalid.json"
VALID_INCIDENT_TYPE_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/incidenttype-valid.json"
VALID_WIDGET_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/widget-valid.json"
INVALID_WIDGET_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/widget-invalid.json"
VALID_DASHBOARD_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/dashboard-valid.json"
INVALID_DASHBOARD_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/dashboard-invalid.json"
VALID_INCIDENT_FIELD_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/incidentfield-valid.json"
INVALID_INCIDENT_FIELD_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/incidentfield-invalid.json"
VALID_INDICATOR_FIELD_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/indicatorfield-valid.json"
INVALID_WIDGET_VERSION_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/widget-invalid-version.json"
VALID_SCRIPT_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/script-valid.yml"
INVALID_SCRIPT_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/script-invalid.yml"
VALID_ONE_LINE_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/valid-one-line_CHANGELOG.md"
VALID_ONE_LINE_LIST_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/valid-one-line-list_CHANGELOG.md"
VALID_MULTI_LINE_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/valid-multi-line_CHANGELOG.md"
VALID_MULTI_LINE_LIST_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/valid-multi-line-list_CHANGELOG.md"
INVALID_ONE_LINE_1_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid-one-line_1_CHANGELOG.md"
INVALID_ONE_LINE_2_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid-one-line_2_CHANGELOG.md"
INVALID_ONE_LINE_LIST_1_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid-one-line-list_1_CHANGELOG.md"
INVALID_ONE_LINE_LIST_2_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid-one-line-list_2_CHANGELOG.md"
INVALID_MULTI_LINE_1_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid-multi-line_1_CHANGELOG.md"
INVALID_MULTI_LINE_2_CHANGELOG_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid-multi-line_2_CHANGELOG.md"
PACK_TARGET = "Packs/TestPack"
LAYOUT_TARGET = f"{PACK_TARGET}/Layouts/layout-mock.json"
LAYOUTS_CONTAINER_TARGET = f"{PACK_TARGET}/Layouts/layoutscontainer-mock.json"
INDICATOR_TYPE_TARGET = f"{PACK_TARGET}/IndicatorTypes/reputations-valid.json"
WIDGET_TARGET = f"{PACK_TARGET}/Widgets/widget-mocks.json"
DASHBOARD_TARGET = f"{PACK_TARGET}/Dashboards/dashboard-mocks.json"
PLAYBOOK_TARGET = f"{PACK_TARGET}/Playbooks/playbook-test.yml"
INTEGRATION_TARGET = f"{PACK_TARGET}/Integrations/integration-test.yml"
INCIDENT_FIELD_TARGET = f"{PACK_TARGET}/IncidentFields/incidentfield-test.json"
INCIDENT_TYPE_TARGET = f"{PACK_TARGET}/IncidentTypes/incidenttype-valid.json"
PLAYBOOK_PACK_TARGET = "Packs/Int/Playbooks/playbook-test.yml"
INVALID_TEST_PLAYBOOK_UNHANDLED_CONDITION = f'{GIT_ROOT}/demisto_sdk/tests/test_files/content_repo_example/Packs/' \
f'FeedAzure/TestPlaybooks/playbook-FeedAzure_test_copy_no_prefix.yml'
INVALID_PLAYBOOK_UNHANDLED_CONDITION = f'{GIT_ROOT}/demisto_sdk/tests/test_files/content_repo_example/Packs/' \
f'FeedAzure/Playbooks/FeedAzure_test.yml'
SCRIPT_TARGET = f"{PACK_TARGET}/Scripts/script-test.yml"
SCRIPT_RELEASE_NOTES_TARGET = f"{PACK_TARGET}/Scripts/script-test_CHANGELOG.md"
INTEGRATION_RELEASE_NOTES_TARGET = f"{PACK_TARGET}/Integrations/integration-test_CHANGELOG.md"
SOURCE_FORMAT_INTEGRATION_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_New_Integration_copy.yml"
DESTINATION_FORMAT_INTEGRATION_COPY = "new_format_New_Integration_copy.yml"
SOURCE_FORMAT_SCRIPT_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_New_script_copy.yml"
DESTINATION_FORMAT_SCRIPT_COPY = "new_format_New_script_copy.yml"
SOURCE_FORMAT_PLAYBOOK_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_new_playbook_copy.yml"
DESTINATION_FORMAT_PLAYBOOK_COPY = "playbook-new_format_new_playbook_copy.yml"
INTEGRATION_WITH_TEST_PLAYBOOKS = f'{GIT_ROOT}/demisto_sdk/tests/test_files/format_Integration_with_test_playbooks.yml'
PLAYBOOK_WITH_TEST_PLAYBOOKS = f'{GIT_ROOT}/demisto_sdk/tests/test_files/format_playbook_with_test_playbooks.yml'
PLAYBOOK_WITH_INCIDENT_INDICATOR_SCRIPTS = f"{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-with-incidnet-" \
f"indicator-fields.yml"
SCRIPT_WITH_TEST_PLAYBOOKS = f'{GIT_ROOT}/demisto_sdk/tests/test_files/format_script_with_test_playbooks.yml'
INDICATORFIELD_EXTRA_FIELDS = f"{GIT_ROOT}/demisto_sdk/tests/test_files/indicatorfield-extra-fields.json"
INDICATORFIELD_EXACT_SCHEME = f"{GIT_ROOT}/demisto_sdk/tests/test_files/indicator-field-exact-scheme.json"
INDICATORFIELD_MISSING_FIELD = f"{GIT_ROOT}/demisto_sdk/tests/test_files/indicator-field-missing-field.json"
INDICATORFIELD_MISSING_AND_EXTRA_FIELDS = f"{GIT_ROOT}/demisto_sdk/tests/test_files/" \
f"indicatorfield-missing-and-extra-fields.json"
INVALID_INTEGRATION_YML_1 = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-invalid-yml1.yml"
INVALID_INTEGRATION_YML_2 = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-invalid-yml2.yml"
INVALID_INTEGRATION_YML_3 = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-invalid-yml3.yml"
INVALID_INTEGRATION_YML_4 = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-invalid-yml4.yml"
VALID_REPUTATION_FILE = f"{GIT_ROOT}/demisto_sdk/tests/test_files/reputation-cidr-valid.json"
INVALID_REPUTATION_FILE = f"{GIT_ROOT}/demisto_sdk/tests/test_files/reputation-cidr-invalid.json"
EQUAL_VAL_FORMAT_PLAYBOOK_SOURCE = f"{GIT_ROOT}/demisto_sdk/tests/test_files/playbook-invalid-equal.yml"
EQUAL_VAL_FORMAT_PLAYBOOK_DESTINATION = "Playbooks/playbook-invalid-equal.yml"
EQUAL_VAL_PATH = 'Playbooks'
INVALID_NO_HIDDEN_PARAMS = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-invalid-no-hidden-params.yml"
VALID_NO_HIDDEN_PARAMS = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-valid-no-unallowed-hidden-params.yml"
GIT_HAVE_MODIFIED_AND_NEW_FILES = f"{GIT_ROOT}/demisto_sdk/tests/test_files/git_have_modified_and_new_files.json"
SOURCE_FORMAT_INCIDENTFIELD_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_incidentfield-copy.json"
DESTINATION_FORMAT_INCIDENTFIELD_COPY = "IncidentFields/incidentfield-copy.json"
INCIDENTFIELD_PATH = "IncidentFields"
SOURCE_FORMAT_INCIDENTTYPE_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_incidenttype-copy.json"
SOURCE_DESCRIPTION_WITH_CONTRIB_DETAILS = f"{GIT_ROOT}/demisto_sdk/tests/test_files/description_with_contrib_details.md"
SOURCE_DESCRIPTION_FORMATTED_CONTRIB_DETAILS = f"{GIT_ROOT}/demisto_sdk/tests/test_files/" \
f"description_formatted_contrib_details.md"
DESTINATION_FORMAT_DESCRIPTION_COPY = "Description/formatted_description-test.md"
DESCRIPTION_PATH = "Description"
DESTINATION_FORMAT_INCIDENTTYPE_COPY = "IncidentTypes/incidenttype-copy.json"
INCIDENTTYPE_PATH = "IncidentTypes"
SOURCE_FORMAT_INDICATORFIELD_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_indicatorfield-copy.json"
DESTINATION_FORMAT_INDICATORFIELD_COPY = "IndicatorFields/incidentfield-copy.json"
INDICATORFIELD_PATH = "IndicatorFields"
SOURCE_FORMAT_INDICATORTYPE_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_indicatortype-copy.json"
DESTINATION_FORMAT_INDICATORTYPE_COPY = "Packs/Base/Misc/reputation-copy.json"
INDICATORTYPE_PATH = "Packs/Base/Misc"
SOURCE_FORMAT_LAYOUT_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_layout-copy.json"
DESTINATION_FORMAT_LAYOUT_COPY = "Layouts/layout-copy.json"
DESTINATION_FORMAT_LAYOUT_INVALID_NAME_COPY = "Layouts/layoutt-copy.json"
LAYOUT_PATH = "Layouts"
LAYOUT_SCHEMA_PATH = f"{GIT_ROOT}/demisto_sdk/commands/common/schemas/layout.yml"
SOURCE_FORMAT_LAYOUTS_CONTAINER = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_layoutscontainer-for-class-test.json"
SOURCE_FORMAT_LAYOUTS_CONTAINER_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_layoutscontainer-test.json"
DESTINATION_FORMAT_LAYOUTS_CONTAINER_COPY = "Layouts/formatted_layoutscontainer-test.json"
LAYOUTS_CONTAINER_PATH = "Layouts"
LAYOUTS_CONTAINER_SCHEMA_PATH = f"{GIT_ROOT}/demisto_sdk/commands/common/schemas/layoutscontainer.yml"
SOURCE_FORMAT_CLASSIFIER_5_9_9 = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_classifier_5_9_9.json"
DESTINATION_FORMAT_CLASSIFIER_5_9_9 = "Classifiers/formatted_classifier_5_9_9.json"
CLASSIFIER_5_9_9_SCHEMA_PATH = f"{GIT_ROOT}/demisto_sdk/commands/common/schemas/classifier_5_9_9.yml"
SOURCE_FORMAT_CLASSIFIER = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_new_classifier.json"
DESTINATION_FORMAT_CLASSIFIER = "Classifiers/formatted_classifier.json"
CLASSIFIER_SCHEMA_PATH = f"{GIT_ROOT}/demisto_sdk/commands/common/schemas/classifier.yml"
CLASSIFIER_PATH = "Classifiers"
SOURCE_FORMAT_MAPPER = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_mapper.json"
DESTINATION_FORMAT_MAPPER = "Classifiers/formatted_mapper.json"
MAPPER_SCHEMA_PATH = f"{GIT_ROOT}/demisto_sdk/commands/common/schemas/mapper.yml"
MAPPER_PATH = "Classifiers"
SOURCE_FORMAT_DASHBOARD_COPY = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_dashboard-copy.json"
DESTINATION_FORMAT_DASHBOARD_COPY = "Dashboards/dashboard-copy.json"
DASHBOARD_PATH = "Dashboards"
SOURCE_FORMAT_WIDGET = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_widget.json"
DESTINATION_FORMAT_WIDGET = "Widgets/formatted-widget.json"
WIDGET_PATH = "Widgets"
SOURCE_FORMAT_REPORT = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_report.json"
DESTINATION_FORMAT_REPORT = "Reports/formatted-Reports.json"
REPORT_PATH = "Reports"
SOURCE_FORMAT_PLAYBOOK = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_playbook.yml"
DESTINATION_FORMAT_PLAYBOOK = "Playbook/playbook.yml"
PLAYBOOK_PATH = "Playbook"
SOURCE_FORMAT_TEST_PLAYBOOK = f"{GIT_ROOT}/demisto_sdk/tests/test_files/format_test_playbook.yml"
DESTINATION_FORMAT_TEST_PLAYBOOK = "TestPlaybook/test-playbook.yml"
TEST_PLAYBOOK_PATH = "TestPlaybook"
VALID_MD = f'{git_path()}/demisto_sdk/tests/test_files/README-valid.md'
INVALID_MD = f'{git_path()}/demisto_sdk/tests/test_files/README-invalid.md'
DEFAULT_IMAGE = f'{git_path()}/demisto_sdk/tests/test_files/default_image.png'
VALID_PACK = f'{git_path()}/demisto_sdk/tests/test_files/content_repo_example/Packs/FeedAzure'
VALID_PACK_RELATIVE_PATH = 'Packs/FeedAzure'
VALID_BETA_INTEGRATION = f'{git_path()}/demisto_sdk/tests/test_files/valid-beta-integration.yml'
INVALID_BETA_INTEGRATION = f'{git_path()}/demisto_sdk/tests/test_files/invalid-beta-integration.yml'
INVALID_OUTPUT_PATH = f"{GIT_ROOT}/demisto_sdk/tests/test_files"
CONF_JSON_MOCK_PATH = f'{GIT_ROOT}/demisto_sdk/tests/test_files/conf.json'
SOURCE_FORMAT_INTEGRATION_VALID = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-fetch-valid.yml"
SOURCE_FORMAT_INTEGRATION_INVALID = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-fetch-invalid.yml"
FEED_INTEGRATION_VALID = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-feedvalid.yml"
FEED_INTEGRATION_EMPTY_VALID = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-feed-empty-valid.yml"
FEED_INTEGRATION_INVALID = f"{GIT_ROOT}/demisto_sdk/tests/test_files/integration-feed-invalid.yml"
XSOAR_LINTER_PY3_VALID = f"{GIT_ROOT}/demisto_sdk/tests/test_files/valid_py3_XSOARLinter.py"
XSOAR_LINTER_PY3_INVALID = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid_py3_XSOARLinter.py"
XSOAR_LINTER_PY3_INVALID_WARNINGS = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid_py3_XSOARLinter_warnings.py"
XSOAR_LINTER_PY3_INVALID_WARNINGS_PARTNER = f"{GIT_ROOT}/demisto_sdk/tests/test_files/invalid_py3_XSOARLinter_warnings_partner.py"
DESTINATION_FORMAT_INTEGRATION = "Integrations/integration.yml"
INTEGRATION_PATH = "Integrations"
CONNECTION_SCHEMA_PATH = f"{GIT_ROOT}/demisto_sdk/commands/common/schemas/canvas-context-connections.yml"
DIR_LIST = [
f'{PACK_TARGET}/{constants.INTEGRATIONS_DIR}',
f'{PACK_TARGET}/{constants.SCRIPTS_DIR}',
f'{PACK_TARGET}/{constants.PLAYBOOKS_DIR}',
f'{PACK_TARGET}/{constants.REPORTS_DIR}',
f'{PACK_TARGET}/{constants.DASHBOARDS_DIR}',
f'{PACK_TARGET}/{constants.WIDGETS_DIR}',
f'{PACK_TARGET}/{constants.INCIDENT_TYPES_DIR}',
f'{PACK_TARGET}/{constants.INCIDENT_FIELDS_DIR}',
f'{PACK_TARGET}/{constants.LAYOUTS_DIR}',
f'{PACK_TARGET}/{constants.CLASSIFIERS_DIR}',
f'{PACK_TARGET}/{constants.INDICATOR_TYPES_DIR}',
f'{PACK_TARGET}/{constants.CONNECTIONS_DIR}',
f'{PACK_TARGET}/{constants.INDICATOR_FIELDS_DIR}',
constants.TESTS_DIR
]
class TestGithubContentConfig:
@pytest.mark.parametrize(
'url',
[
'ssh://git@github.com/demisto/content-dist.git',
'git@github.com:demisto/content-dist.git', # clone using github ssh example
'https://github.com/demisto/content-dist.git', # clone using github https example
'https://github.com/demisto/content-dist'
]
)
def test_get_repo_name(self, url: str):
"""
Given:
No repository (not running in git)
When:
A known output of git.Repo().remotes().url
Then:
Validate the correct repo got back (demisto/content)
"""
github_config = constants.GithubContentConfig()
assert github_config._get_repository_name([url]) == 'demisto/content-dist'
def test_get_repo_name_empty_case(self):
"""
Given:
No repository (not running in git)
When:
Searching for repository name
Then:
Validate the correct repo got back - demisto/content
"""
github_config = constants.GithubContentConfig()
assert github_config._get_repository_name([]) == github_config.OFFICIAL_CONTENT_REPO_NAME
| 69.423221
| 130
| 0.81107
|
4a00f973ba48cb6d3ca1345fc8bff3b36f18c329
| 2,989
|
py
|
Python
|
hw1_fit_a_function/main.py
|
ybs985/DeepLearning
|
d38b682be054f5c997bbcefc7c9056e9da9ddf4b
|
[
"MIT"
] | null | null | null |
hw1_fit_a_function/main.py
|
ybs985/DeepLearning
|
d38b682be054f5c997bbcefc7c9056e9da9ddf4b
|
[
"MIT"
] | null | null | null |
hw1_fit_a_function/main.py
|
ybs985/DeepLearning
|
d38b682be054f5c997bbcefc7c9056e9da9ddf4b
|
[
"MIT"
] | null | null | null |
import paddle
from paddle.nn import Linear
import paddle.nn.functional as F
import numpy as np
import math
import random
from matplotlib import pyplot as plt
#定义正态分布函数,生成数据点
sample_point=1000
def f(x):
return math.exp(-x**2/2)/math.sqrt(2*math.pi)
x=np.zeros(sample_point)
y=np.zeros(sample_point)
for i in range(sample_point):
x[i]=random.uniform(-2.0,2.0)
y[i]=f(x[i])
#转为Tensor,否则报错
x=paddle.to_tensor(x,dtype='float32')
y=paddle.to_tensor(y,dtype='float32')
#建立多层感知机
class Net(paddle.nn.Layer):
def __init__(self,n_feature,n_hidden,n_output):
super(Net,self).__init__()
self.hidden=Linear(n_feature,n_hidden)
self.predict=Linear(n_hidden,n_output)
def forward(self,x):
x=self.hidden(x)
x=F.relu(x)
x=self.predict(x)
return x
net=Net(2,32,2)#网络参数
#训练模型
epochs=200
batch_cnt=400
x_traingraph=[]
y_traingraph=[]#存放最后一轮epoch训练结果的数组
optimizer=paddle.optimizer.SGD(learning_rate=0.1,parameters=net.parameters())
for i in range(epochs):
for j in range(batch_cnt):
x_train=x[j*2:j*2+2]#与网络参数适配
y_train=y[j*2:j*2+2]
prediction=net(x_train)
if i==(epochs-1):
x_traingraph.append(x_train)
y_traingraph.append(prediction)
loss=F.square_error_cost(prediction,y_train)#两个多维Tensor分项平方之差
loss_avg=paddle.mean(loss)
if i%10==0:
print("epoch:{},loss:{}".format(i,loss_avg.numpy()))
loss_avg.backward()
optimizer.step()
optimizer.clear_grad()
paddle.save(net.state_dict(),'MLP.pdparams')#保存模型参数
plt.clf()
#画最后一轮训练结果图
x_traingraph=np.array(x_traingraph)
y_traingraph=np.array(y_traingraph)
plt.figure(1)
plt.plot(x_traingraph,y_traingraph,'r.')
#原始图像
x_gauss=x[0:800]
y_gauss=y[0:800]
x_gauss=np.array(x_gauss)
y_gauss=np.array(y_gauss)
plt.plot(x_gauss,y_gauss,'g.')
plt.show()
#测试模型
x_testgraph=[]
y_testgraph=[]
params_file_path='MLP.pdparams'
param_dict=paddle.load(params_file_path)#加载模型参数
net.load_dict(param_dict)
net.eval()#灌入数据
for j in range(100):
x_test=x[800+j*2:800+j*2+2]
y_test=y[800+j*2:800+j*2+2]
test=net(x_test)
x_testgraph.append(x_test)
y_testgraph.append(test)
loss=F.square_error_cost(test,y_test)
loss_avg=paddle.mean(loss)
print("loss:{}".format(loss_avg.numpy()))
#画最后一轮训练结果图
x_testgraph=np.array(x_testgraph)
y_testgraph=np.array(y_testgraph)
plt.figure(2)
plt.plot(x_testgraph,y_testgraph,'r.')
#原始图像
x_gauss=x[800:1000]
y_gauss=y[800:1000]
x_gauss=np.array(x_gauss)
y_gauss=np.array(y_gauss)
plt.plot(x_gauss,y_gauss,'g.')
plt.show()
#绘制拟合后函数
#原函数
plt.figure(3)
x_gauss=x_gauss[np.argsort(x_gauss)]
for i in range(np.size(x_gauss)):
y_gauss[i]=f(x_gauss[i])
plt.plot(x_gauss,y_gauss,ls='-',color='green')
#拟合后函数
arrIndex=np.argsort(x_testgraph[:,0],axis=0)
x_testgraph=x_testgraph[:,0][arrIndex]
y_testgraph=y_testgraph[:,0][arrIndex]
plt.plot(x_testgraph,y_testgraph,ls='-',color='red')
plt.legend(["Primitive Function","Fitted Function"])
plt.show()
| 26.451327
| 77
| 0.71094
|
4a00f9a38bc6588836dbf7f78612191b750b1219
| 2,249
|
py
|
Python
|
Pipelines/general_ner.py
|
FredHutch/HutchNER
|
567f48b2ea283ba302c9246eb5bb494d6b19b34c
|
[
"Apache-2.0"
] | null | null | null |
Pipelines/general_ner.py
|
FredHutch/HutchNER
|
567f48b2ea283ba302c9246eb5bb494d6b19b34c
|
[
"Apache-2.0"
] | null | null | null |
Pipelines/general_ner.py
|
FredHutch/HutchNER
|
567f48b2ea283ba302c9246eb5bb494d6b19b34c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016-2017 Fred Hutchinson Cancer Research Center
#
# Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0
#
from flask import json
from DataLoading.JSONDataLoader import JSONDataLoader
def docs2json(docs):
doc_dict=dict()
for id, doc in docs.items():
doc_dict[doc.document_id]=dict()
doc_dict[doc.document_id]['text'] = doc.text
doc_dict[doc.document_id]['NER_labels'] = list()
for tok in doc.tokens:
if tok.ent_type_ == "":
entity = "O"
else:
entity = tok.ent_type_
doc_dict[doc.document_id]['NER_labels'].append({
'start':tok.idx,
'stop':tok.idx + len(tok.orth_),
'text':tok.orth_,
'confidence':1,
'label': entity
})
return json.dumps(doc_dict, ensure_ascii=False)
def main(documents, model):
text_dl = JSONDataLoader(documents=documents)
docs = text_dl.preprocess(spacy_model=model)
json_response = docs2json(docs)
return json_response
if __name__ == '__main__':
# initialize large models on server startup
import en_core_web_sm
spacy_model = en_core_web_sm.load()
documents = {"1":"HISTORY OF PRESENT ILLNESS: Mr. Bob is a 1000000-year-old gentleman with coronary artery disease, hypertension, hypercholesterolemia, COPD and tobacco abuse. He reports doing well. He did have some more knee pain for a few weeks, but this has resolved. He is having more trouble with his sinuses. I had started him on Flonase back in December. He says this has not really helped. Over the past couple weeks he has had significant congestion and thick discharge. No fevers or headaches but does have diffuse upper right-sided teeth pain. He denies any chest pains, nausea, PND, orthopnea, edema or syncope. His breathing is doing fine. No cough. He continues to smoke about half-a-pack per day. He plans on trying the patches again.\
\
CURRENT MEDICATIONS: Updated on CIS. They include aspirin, atenolol, Lipitor, Advair, Spiriva, albuterol and will add Singulair today.\
\
ALLERGIES: Sulfa caused a rash."}
response =main(documents=documents, model=spacy_model)
| 46.854167
| 754
| 0.691418
|
4a00fb7c0d63aa461b25ddd615d1c2292099b314
| 13,833
|
py
|
Python
|
python/GafferUITest/WidgetTest.py
|
timlehr/gaffer
|
354acd6af7500e0bd1ce19d7c417929e2f0a919e
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUITest/WidgetTest.py
|
timlehr/gaffer
|
354acd6af7500e0bd1ce19d7c417929e2f0a919e
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUITest/WidgetTest.py
|
timlehr/gaffer
|
354acd6af7500e0bd1ce19d7c417929e2f0a919e
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import weakref
import sys
import imath
import IECore
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
class TestWidget( GafferUI.Widget ) :
def __init__( self, **kw ) :
GafferUI.Widget.__init__( self, QtWidgets.QLabel( "hello" ), **kw )
class TestWidget2( GafferUI.Widget ) :
def __init__( self ) :
self.topLevelGafferWidget = TestWidget()
GafferUI.Widget.__init__( self, self.topLevelGafferWidget )
class WidgetTest( GafferUITest.TestCase ) :
def testOwner( self ) :
w = TestWidget()
self.assert_( GafferUI.Widget._owner( w._qtWidget() ) is w )
def testParent( self ) :
w = TestWidget()
self.assert_( w.parent() is None )
def testCanDie( self ) :
w = TestWidget()
wr1 = weakref.ref( w )
wr2 = weakref.ref( w._qtWidget() )
del w
self.assert_( wr1() is None )
self.assert_( wr2() is None )
def testAncestor( self ) :
w = GafferUI.Window( "test" )
l = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
p = GafferUI.SplitContainer()
l.append( p )
w.setChild( l )
self.assert_( p.ancestor( GafferUI.ListContainer ) is l )
self.assert_( p.ancestor( GafferUI.Window ) is w )
self.assert_( p.ancestor( GafferUI.Menu ) is None )
def testIsAncestorOf( self ) :
with GafferUI.Window( "test" ) as w :
with GafferUI.SplitContainer() as p :
with GafferUI.ListContainer() as l1 :
b1 = GafferUI.Button()
with GafferUI.ListContainer() as l2 :
b2 = GafferUI.Button()
self.assertTrue( l2.isAncestorOf( b2 ) )
self.assertFalse( l1.isAncestorOf( b2 ) )
self.assertTrue( p.isAncestorOf( b2 ) )
self.assertTrue( w.isAncestorOf( b2 ) )
self.assertFalse( b2.isAncestorOf( b1 ) )
self.assertFalse( b2.isAncestorOf( l1 ) )
self.assertFalse( b2.isAncestorOf( l2 ) )
self.assertFalse( b2.isAncestorOf( p ) )
self.assertFalse( b2.isAncestorOf( w ) )
self.assertTrue( l1.isAncestorOf( b1 ) )
self.assertFalse( l2.isAncestorOf( b1 ) )
self.assertTrue( p.isAncestorOf( b1 ) )
self.assertTrue( w.isAncestorOf( b1 ) )
def testGafferWidgetAsTopLevel( self ) :
w = TestWidget2()
self.assert_( GafferUI.Widget._owner( w._qtWidget() ) is w )
self.assert_( w.topLevelGafferWidget.parent() is w )
self.assert_( GafferUI.Widget._owner( w.topLevelGafferWidget._qtWidget() ) is not w )
def testToolTip( self ) :
w = TestWidget()
self.assertEqual( w.getToolTip(), "" )
w = TestWidget( toolTip="hi" )
self.assertEqual( w.getToolTip(), "hi" )
w.setToolTip( "a" )
self.assertEqual( w.getToolTip(), "a" )
def testMarkdownToolTips( self ) :
markdownToolTip = "# header\n\n- list 1\nlist 2"
w = TestWidget()
w.setToolTip( markdownToolTip )
# We don't want any conversion to HTML to be "baked in" - we expect
# to get back exactly the same thing as we saved.
self.assertEqual( w.getToolTip(), markdownToolTip )
def testEnabledState( self ) :
w = TestWidget()
self.assertEqual( w.getEnabled(), True )
self.assertEqual( w.enabled(), True )
w.setEnabled( False )
self.assertEqual( w.getEnabled(), False )
self.assertEqual( w.enabled(), False )
w.setEnabled( True )
self.assertEqual( w.getEnabled(), True )
self.assertEqual( w.enabled(), True )
def testDisabledWidgetsDontGetSignals( self ) :
w = TestWidget()
def f( w, event ) :
WidgetTest.signalsEmitted += 1
c = w.buttonPressSignal().connect( f )
WidgetTest.signalsEmitted = 0
event = QtGui.QMouseEvent( QtCore.QEvent.MouseButtonPress, QtCore.QPoint( 0, 0 ), QtCore.Qt.LeftButton, QtCore.Qt.LeftButton, QtCore.Qt.NoModifier )
QtWidgets.QApplication.instance().sendEvent( w._qtWidget(), event )
self.assertEqual( WidgetTest.signalsEmitted, 1 )
w.setEnabled( False )
QtWidgets.QApplication.instance().sendEvent( w._qtWidget(), event )
self.assertEqual( WidgetTest.signalsEmitted, 1 )
w.setEnabled( True )
QtWidgets.QApplication.instance().sendEvent( w._qtWidget(), event )
self.assertEqual( WidgetTest.signalsEmitted, 2 )
def testCanDieAfterUsingSignals( self ) :
w = TestWidget()
wr1 = weakref.ref( w )
wr2 = weakref.ref( w._qtWidget() )
w.buttonPressSignal()
w.buttonReleaseSignal()
w.mouseMoveSignal()
w.wheelSignal()
del w
self.assert_( wr1() is None )
self.assert_( wr2() is None )
def testVisibility( self ) :
with GafferUI.Window() as w :
with GafferUI.ListContainer() as l :
t = TestWidget()
self.assertEqual( w.getVisible(), False )
self.assertEqual( l.getVisible(), True )
self.assertEqual( t.getVisible(), True )
self.assertEqual( w.visible(), False )
self.assertEqual( l.visible(), False )
self.assertEqual( t.visible(), False )
w.setVisible( True )
self.assertEqual( w.getVisible(), True )
self.assertEqual( l.getVisible(), True )
self.assertEqual( t.getVisible(), True )
self.assertEqual( w.visible(), True )
self.assertEqual( l.visible(), True )
self.assertEqual( t.visible(), True )
w.setVisible( False )
self.assertEqual( w.getVisible(), False )
self.assertEqual( l.getVisible(), True )
self.assertEqual( t.getVisible(), True )
self.assertEqual( w.visible(), False )
self.assertEqual( l.visible(), False )
self.assertEqual( t.visible(), False )
self.assertEqual( t.visible( relativeTo = l ), True )
self.assertEqual( t.visible( relativeTo = w ), True )
w.setVisible( True )
t.setVisible( False )
self.assertEqual( t.getVisible(), False )
self.assertEqual( t.visible(), False )
self.assertEqual( t.visible( relativeTo = l ), False )
def testGetVisibleForNewWidgets( self ) :
w = TestWidget()
self.assertEqual( w.getVisible(), True )
def testVisibilityOfParentlessWidgets( self ) :
w = GafferUI.Window()
t = TestWidget()
# windows must be explicitly shown
self.assertEqual( w.getVisible(), False )
self.assertEqual( w.visible(), False )
# widgets don't need to be explicitly shown but
# must not be visible on screen until parented
# to a window
self.assertEqual( t.getVisible(), True )
self.assertEqual( t.visible(), False )
w.setVisible( True )
self.assertEqual( w.getVisible(), True )
self.assertEqual( w.visible(), True )
w.setChild( t )
self.assertEqual( t.getVisible(), True )
self.assertEqual( t.visible(), True )
# removing a widget from its parent must not
# leave it visible on screen.
w.removeChild( t )
self.assertEqual( t.parent(), None )
self.assertEqual( t.getVisible(), True )
self.assertEqual( t.visible(), False )
def testVisibilityWhenTransferringWidgets( self ) :
w1 = GafferUI.Window()
w1.setVisible( True )
w2 = GafferUI.Window()
w2.setVisible( True )
v = TestWidget()
self.assertEqual( v.getVisible(), True )
self.assertEqual( v.visible(), False )
h = TestWidget()
self.assertEqual( h.getVisible(), True )
h.setVisible( False )
self.assertEqual( h.getVisible(), False )
self.assertEqual( h.visible(), False )
w1.setChild( v )
self.assertEqual( v.getVisible(), True )
self.assertEqual( v.visible(), True )
self.assertEqual( h.getVisible(), False )
self.assertEqual( h.visible(), False )
w2.setChild( v )
self.assertEqual( v.getVisible(), True )
self.assertEqual( v.visible(), True )
self.assertEqual( h.getVisible(), False )
self.assertEqual( h.visible(), False )
w1.setChild( h )
self.assertEqual( v.getVisible(), True )
self.assertEqual( v.visible(), True )
self.assertEqual( h.getVisible(), False )
self.assertEqual( h.visible(), False )
w2.setChild( h )
self.assertEqual( v.getVisible(), True )
self.assertEqual( v.visible(), False )
self.assertEqual( h.getVisible(), False )
self.assertEqual( h.visible(), False )
def testSignals( self ) :
w = TestWidget()
for s in [
( "keyPressSignal", GafferUI.WidgetEventSignal ),
( "keyReleaseSignal", GafferUI.WidgetEventSignal ),
( "buttonPressSignal", GafferUI.WidgetEventSignal ),
( "buttonReleaseSignal", GafferUI.WidgetEventSignal ),
( "buttonDoubleClickSignal", GafferUI.WidgetEventSignal ),
( "mouseMoveSignal", GafferUI.WidgetEventSignal ),
( "enterSignal", GafferUI.WidgetSignal ),
( "leaveSignal", GafferUI.WidgetSignal ),
( "wheelSignal", GafferUI.WidgetEventSignal ),
( "visibilityChangedSignal", GafferUI.WidgetSignal ),
( "contextMenuSignal", GafferUI.WidgetSignal ),
( "parentChangedSignal", GafferUI.WidgetSignal ),
] :
self.failUnless( isinstance( getattr( w, s[0] )(), s[1] ) )
self.failUnless( getattr( w, s[0] )() is getattr( w, s[0] )() )
def testBound( self ) :
w = GafferUI.Window( borderWidth = 8 )
b = GafferUI.Button()
w.setChild( b )
w.setVisible( True )
w.setPosition( imath.V2i( 100 ) )
self.waitForIdle( 1000 )
wb = w.bound()
bb = b.bound()
bbw = b.bound( relativeTo = w )
self.failUnless( isinstance( wb, imath.Box2i ) )
self.failUnless( isinstance( bb, imath.Box2i ) )
self.failUnless( isinstance( bbw, imath.Box2i ) )
self.assertEqual( bb.size(), bbw.size() )
self.assertEqual( bbw.min(), bb.min() - wb.min() )
self.assertEqual( b.size(), bb.size() )
def testParentChangedSignal( self ) :
w = TestWidget()
window = GafferUI.Window()
cs = GafferTest.CapturingSlot( w.parentChangedSignal() )
self.assertEqual( len( cs ), 0 )
window.setChild( w )
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( w, ) )
window.setChild( None )
self.assertEqual( len( cs ), 2 )
self.assertEqual( cs[1], ( w, ) )
def testHighlighting( self ) :
w = TestWidget()
self.assertEqual( w.getHighlighted(), False )
w.setHighlighted( True )
self.assertEqual( w.getHighlighted(), True )
w.setHighlighted( False )
self.assertEqual( w.getHighlighted(), False )
def testWidgetAt( self ) :
with GafferUI.Window() as w1 :
t1 = GafferUI.TextWidget( "hello" )
with GafferUI.Window() as w2 :
t2 = GafferUI.TextWidget( "hello" )
w1.setVisible( True )
w2.setVisible( True )
w1.setPosition( imath.V2i( 100 ) )
w2.setPosition( imath.V2i( 300 ) )
self.waitForIdle( 1000 )
self.assertTrue( GafferUI.Widget.widgetAt( w1.bound().center() ) is t1 )
self.assertTrue( GafferUI.Widget.widgetAt( w2.bound().center() ) is t2 )
self.assertTrue( GafferUI.Widget.widgetAt( w1.bound().center(), widgetType=GafferUI.Window ) is w1 )
self.assertTrue( GafferUI.Widget.widgetAt( w2.bound().center(), widgetType=GafferUI.Window ) is w2 )
def testMousePosition( self ) :
w = GafferUI.Window( borderWidth = 8 )
b = GafferUI.Button()
w.setChild( b )
w.setVisible( True )
w.setPosition( imath.V2i( 100 ) )
self.waitForIdle( 1000 )
mouseGlobal = GafferUI.Widget.mousePosition()
mouseLocal = GafferUI.Widget.mousePosition( relativeTo = b )
self.assertEqual( mouseGlobal, mouseLocal + b.bound().min() )
def testAddressAndObject( self ) :
button = GafferUI.Button()
address = GafferUI._qtAddress( button._qtWidget() )
self.assertTrue( isinstance( address, int ) )
widget = GafferUI._qtObject( address, QtWidgets.QPushButton )
self.assertTrue( isinstance( widget, QtWidgets.QPushButton ) )
def testSetVisibleWithNonBool( self ) :
w = TestWidget()
self.assertTrue( w.getVisible() is True )
w.setVisible( 0 )
self.assertTrue( w.getVisible() is False )
w.setVisible( 1 )
self.assertTrue( w.getVisible() is True )
def testStyleProperties( self ) :
w = GafferUI.Widget( QtWidgets.QLabel( "base" ))
self.assertEqual( w._qtWidget().property( 'gafferClass' ), 'GafferUI.Widget' )
w = TestWidget()
self.assertEqual( w._qtWidget().property( 'gafferClass' ), 'GafferUITest.WidgetTest.TestWidget' )
class TestWidgetChild( TestWidget ) :
pass
w = TestWidgetChild()
self.assertEqual( w._qtWidget().property( 'gafferClasses' ), [
'GafferUITest.WidgetTest.TestWidgetChild',
'GafferUITest.WidgetTest.TestWidget',
'GafferUI.Widget'
] )
if __name__ == "__main__":
unittest.main()
| 28.81875
| 150
| 0.686908
|
4a00fbba470573f32094f91aa94882f1d83d815b
| 46
|
py
|
Python
|
seleniumcrawler/__init__.py
|
Goku0858756/selenium-crawler
|
e49df4c1f40a330af19d90b7c59d49ac1acee86c
|
[
"MIT"
] | 83
|
2015-10-07T06:34:45.000Z
|
2022-03-09T06:26:54.000Z
|
seleniumcrawler/__init__.py
|
corywalker/selenium-crawler
|
e49df4c1f40a330af19d90b7c59d49ac1acee86c
|
[
"MIT"
] | null | null | null |
seleniumcrawler/__init__.py
|
corywalker/selenium-crawler
|
e49df4c1f40a330af19d90b7c59d49ac1acee86c
|
[
"MIT"
] | 29
|
2015-12-04T13:08:16.000Z
|
2021-07-15T23:15:36.000Z
|
from seleniumcrawler.handle import handle_url
| 23
| 45
| 0.891304
|
4a00fbdf02d1563797801d28bbfc1b5455dce41d
| 16,041
|
py
|
Python
|
disnake/sticker.py
|
Kraots/disnake
|
9eb9ab81915dae7249ffb2b757dd6dee6090341e
|
[
"MIT"
] | null | null | null |
disnake/sticker.py
|
Kraots/disnake
|
9eb9ab81915dae7249ffb2b757dd6dee6090341e
|
[
"MIT"
] | null | null | null |
disnake/sticker.py
|
Kraots/disnake
|
9eb9ab81915dae7249ffb2b757dd6dee6090341e
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Literal, TYPE_CHECKING, List, Optional, Tuple, Type, Union
import unicodedata
from .mixins import Hashable
from .asset import Asset, AssetMixin
from .utils import cached_slot_property, find, snowflake_time, get, MISSING
from .errors import InvalidData
from .enums import StickerType, StickerFormatType, try_enum
__all__ = (
"StickerPack",
"StickerItem",
"Sticker",
"StandardSticker",
"GuildSticker",
)
if TYPE_CHECKING:
import datetime
from .state import ConnectionState
from .user import User
from .guild import Guild
from .types.sticker import (
StickerPack as StickerPackPayload,
StickerItem as StickerItemPayload,
Sticker as StickerPayload,
StandardSticker as StandardStickerPayload,
GuildSticker as GuildStickerPayload,
ListPremiumStickerPacks as ListPremiumStickerPacksPayload,
EditGuildSticker,
)
class StickerPack(Hashable):
"""Represents a sticker pack.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker pack.
.. describe:: x == y
Checks if the sticker pack is equal to another sticker pack.
.. describe:: x != y
Checks if the sticker pack is not equal to another sticker pack.
Attributes
-----------
name: :class:`str`
The name of the sticker pack.
description: :class:`str`
The description of the sticker pack.
id: :class:`int`
The id of the sticker pack.
stickers: List[:class:`StandardSticker`]
The stickers of this sticker pack.
sku_id: :class:`int`
The SKU ID of the sticker pack.
cover_sticker_id: :class:`int`
The ID of the sticker used for the cover of the sticker pack.
cover_sticker: :class:`StandardSticker`
The sticker used for the cover of the sticker pack.
"""
__slots__ = (
"_state",
"id",
"stickers",
"name",
"sku_id",
"cover_sticker_id",
"cover_sticker",
"description",
"_banner",
)
def __init__(self, *, state: ConnectionState, data: StickerPackPayload) -> None:
self._state: ConnectionState = state
self._from_data(data)
def _from_data(self, data: StickerPackPayload) -> None:
self.id: int = int(data["id"])
stickers = data["stickers"]
self.stickers: List[StandardSticker] = [
StandardSticker(state=self._state, data=sticker) for sticker in stickers
]
self.name: str = data["name"]
self.sku_id: int = int(data["sku_id"])
self.cover_sticker_id: int = int(data["cover_sticker_id"])
self.cover_sticker: StandardSticker = get(self.stickers, id=self.cover_sticker_id) # type: ignore
self.description: str = data["description"]
self._banner: int = int(data["banner_asset_id"])
@property
def banner(self) -> Asset:
""":class:`Asset`: The banner asset of the sticker pack."""
return Asset._from_sticker_banner(self._state, self._banner)
def __repr__(self) -> str:
return f"<StickerPack id={self.id} name={self.name!r} description={self.description!r}>"
def __str__(self) -> str:
return self.name
class _StickerTag(Hashable, AssetMixin):
__slots__ = ()
id: int
format: StickerFormatType
async def read(self) -> bytes:
"""|coro|
Retrieves the content of this sticker as a :class:`bytes` object.
.. note::
Stickers that use the :attr:`StickerFormatType.lottie` format cannot be read.
Raises
------
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
TypeError
The sticker is a lottie type.
Returns
-------
:class:`bytes`
The content of the asset.
"""
if self.format is StickerFormatType.lottie:
raise TypeError('Cannot read stickers of format "lottie".')
return await super().read()
class StickerItem(_StickerTag):
"""Represents a sticker item.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker item.
.. describe:: x == y
Checks if the sticker item is equal to another sticker item.
.. describe:: x != y
Checks if the sticker item is not equal to another sticker item.
Attributes
-----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The id of the sticker.
format: :class:`StickerFormatType`
The format for the sticker's image.
url: :class:`str`
The URL for the sticker's image.
"""
__slots__ = ("_state", "name", "id", "format", "url")
def __init__(self, *, state: ConnectionState, data: StickerItemPayload):
self._state: ConnectionState = state
self.name: str = data["name"]
self.id: int = int(data["id"])
self.format: StickerFormatType = try_enum(StickerFormatType, data["format_type"])
self.url: str = f"{Asset.BASE}/stickers/{self.id}.{self.format.file_extension}"
def __repr__(self) -> str:
return f"<StickerItem id={self.id} name={self.name!r} format={self.format}>"
def __str__(self) -> str:
return self.name
async def fetch(self) -> Union[Sticker, StandardSticker, GuildSticker]:
"""|coro|
Attempts to retrieve the full sticker data of the sticker item.
Raises
--------
HTTPException
Retrieving the sticker failed.
Returns
--------
Union[:class:`StandardSticker`, :class:`GuildSticker`]
The retrieved sticker.
"""
data: StickerPayload = await self._state.http.get_sticker(self.id)
cls, _ = _sticker_factory(data["type"]) # type: ignore
return cls(state=self._state, data=data)
class Sticker(_StickerTag):
"""Represents a sticker.
.. versionadded:: 1.6
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The id of the sticker.
description: :class:`str`
The description of the sticker.
pack_id: :class:`int`
The id of the sticker's pack.
format: :class:`StickerFormatType`
The format for the sticker's image.
url: :class:`str`
The URL for the sticker's image.
"""
__slots__ = ("_state", "id", "name", "description", "format", "url")
def __init__(self, *, state: ConnectionState, data: StickerPayload) -> None:
self._state: ConnectionState = state
self._from_data(data)
def _from_data(self, data: StickerPayload) -> None:
self.id: int = int(data["id"])
self.name: str = data["name"]
self.description: str = data["description"]
self.format: StickerFormatType = try_enum(StickerFormatType, data["format_type"])
self.url: str = f"{Asset.BASE}/stickers/{self.id}.{self.format.file_extension}"
def __repr__(self) -> str:
return f"<Sticker id={self.id} name={self.name!r}>"
def __str__(self) -> str:
return self.name
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the sticker's creation time in UTC."""
return snowflake_time(self.id)
class StandardSticker(Sticker):
"""Represents a sticker that is found in a standard sticker pack.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The id of the sticker.
description: :class:`str`
The description of the sticker.
pack_id: :class:`int`
The id of the sticker's pack.
format: :class:`StickerFormatType`
The format for the sticker's image.
tags: List[:class:`str`]
A list of tags for the sticker.
sort_value: :class:`int`
The sticker's sort order within its pack.
"""
__slots__ = ("sort_value", "pack_id", "type", "tags")
def _from_data(self, data: StandardStickerPayload) -> None:
super()._from_data(data)
self.sort_value: int = data["sort_value"]
self.pack_id: int = int(data["pack_id"])
self.type: StickerType = StickerType.standard
try:
self.tags: List[str] = [tag.strip() for tag in data["tags"].split(",")]
except KeyError:
self.tags = []
def __repr__(self) -> str:
return f"<StandardSticker id={self.id} name={self.name!r} pack_id={self.pack_id}>"
async def pack(self) -> StickerPack:
"""|coro|
Retrieves the sticker pack that this sticker belongs to.
Raises
--------
InvalidData
The corresponding sticker pack was not found.
HTTPException
Retrieving the sticker pack failed.
Returns
--------
:class:`StickerPack`
The retrieved sticker pack.
"""
data: ListPremiumStickerPacksPayload = await self._state.http.list_premium_sticker_packs()
packs = data["sticker_packs"]
pack = find(lambda d: int(d["id"]) == self.pack_id, packs)
if pack:
return StickerPack(state=self._state, data=pack)
raise InvalidData(f"Could not find corresponding sticker pack for {self!r}")
class GuildSticker(Sticker):
"""Represents a sticker that belongs to a guild.
.. versionadded:: 2.0
.. container:: operations
.. describe:: str(x)
Returns the name of the sticker.
.. describe:: x == y
Checks if the sticker is equal to another sticker.
.. describe:: x != y
Checks if the sticker is not equal to another sticker.
Attributes
----------
name: :class:`str`
The sticker's name.
id: :class:`int`
The id of the sticker.
description: :class:`str`
The description of the sticker.
format: :class:`StickerFormatType`
The format for the sticker's image.
available: :class:`bool`
Whether this sticker is available for use.
guild_id: :class:`int`
The ID of the guild that this sticker is from.
user: Optional[:class:`User`]
The user that created this sticker. This can only be retrieved using :meth:`Guild.fetch_sticker` and
having the :attr:`~Permissions.manage_emojis_and_stickers` permission.
emoji: :class:`str`
The name of a unicode emoji that represents this sticker.
"""
__slots__ = ("available", "guild_id", "user", "emoji", "type", "_cs_guild")
def _from_data(self, data: GuildStickerPayload) -> None:
super()._from_data(data)
self.available: bool = data["available"]
self.guild_id: int = int(data["guild_id"])
user = data.get("user")
self.user: Optional[User] = self._state.store_user(user) if user else None
self.emoji: str = data["tags"]
self.type: StickerType = StickerType.guild
def __repr__(self) -> str:
return f"<GuildSticker name={self.name!r} id={self.id} guild_id={self.guild_id} user={self.user!r}>"
@cached_slot_property("_cs_guild")
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild that this sticker is from.
Could be ``None`` if the bot is not in the guild.
.. versionadded:: 2.0
"""
return self._state._get_guild(self.guild_id)
async def edit(
self,
*,
name: str = MISSING,
description: str = MISSING,
emoji: str = MISSING,
reason: Optional[str] = None,
) -> GuildSticker:
"""|coro|
Edits a :class:`GuildSticker` for the guild.
Parameters
-----------
name: :class:`str`
The sticker's new name. Must be at least 2 characters.
description: Optional[:class:`str`]
The sticker's new description. Can be ``None``.
emoji: :class:`str`
The name of a unicode emoji that represents the sticker's expression.
reason: :class:`str`
The reason for editing this sticker. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to edit stickers.
HTTPException
An error occurred editing the sticker.
Returns
--------
:class:`GuildSticker`
The newly modified sticker.
"""
payload: EditGuildSticker = {}
if name is not MISSING:
payload["name"] = name
if description is not MISSING:
payload["description"] = description
if emoji is not MISSING:
try:
emoji = unicodedata.name(emoji)
except TypeError:
pass
else:
emoji = emoji.replace(" ", "_")
payload["tags"] = emoji
data: GuildStickerPayload = await self._state.http.modify_guild_sticker(
self.guild_id, self.id, payload, reason
)
return GuildSticker(state=self._state, data=data)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom :class:`Sticker` from the guild.
You must have :attr:`~Permissions.manage_emojis_and_stickers` permission to
do this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this sticker. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete stickers.
HTTPException
An error occurred deleting the sticker.
"""
await self._state.http.delete_guild_sticker(self.guild_id, self.id, reason)
def _sticker_factory(
sticker_type: Literal[1, 2]
) -> Tuple[Type[Union[StandardSticker, GuildSticker, Sticker]], StickerType]:
value = try_enum(StickerType, sticker_type)
if value == StickerType.standard:
return StandardSticker, value
elif value == StickerType.guild:
return GuildSticker, value
else:
return Sticker, value
| 30.266038
| 108
| 0.614737
|
4a00ffce90d53e6873bf27daeff3eb374749fb17
| 136
|
py
|
Python
|
src/ltdconveyor/s3/exceptions.py
|
lsst-sqre/ltd-conveyor
|
713cdd3325a45a96261eea07933df3ce37998d85
|
[
"MIT"
] | null | null | null |
src/ltdconveyor/s3/exceptions.py
|
lsst-sqre/ltd-conveyor
|
713cdd3325a45a96261eea07933df3ce37998d85
|
[
"MIT"
] | 8
|
2017-02-02T21:50:24.000Z
|
2022-03-11T18:45:43.000Z
|
src/ltdconveyor/s3/exceptions.py
|
lsst-sqre/ltd-conveyor
|
713cdd3325a45a96261eea07933df3ce37998d85
|
[
"MIT"
] | null | null | null |
__all__ = ("S3Error",)
from ..exceptions import ConveyorError
class S3Error(ConveyorError):
"""Error related to AWS S3 usage."""
| 17
| 40
| 0.705882
|
4a00ffcf0101591a0de613ec9c5e3bfced22d4e0
| 415
|
py
|
Python
|
SleekSecurity/layers/plugins/fingerprint/framework/play.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
SleekSecurity/layers/plugins/fingerprint/framework/play.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
SleekSecurity/layers/plugins/fingerprint/framework/play.py
|
GitInitDev/ZohoUniv
|
966704837e65f58b52492b56d08e7958df3d220a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# @name: Wascan - Web Application Scanner
# @repo: https://github.com/m4ll0k/Wascan
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'LICENSE.txt
from re import search,I
def play(headers,content):
_ = False
for header in headers.items():
_ |= search("play! framework;",header[1]) is not None
if _ : break
if _ : return "Play - Java Framework"
| 25.9375
| 55
| 0.662651
|
4a01000b82d47dbd71f1eba8a1e1aba0f1d97d9b
| 14,249
|
py
|
Python
|
src/Testing/ZopeTestCase/testBaseTestCase.py
|
dek4nice/Zope
|
ec4765fc0007c4e78aafcbeef510077444f8551a
|
[
"ZPL-2.1"
] | 1
|
2018-11-30T12:39:27.000Z
|
2018-11-30T12:39:27.000Z
|
src/Testing/ZopeTestCase/testBaseTestCase.py
|
dek4nice/Zope
|
ec4765fc0007c4e78aafcbeef510077444f8551a
|
[
"ZPL-2.1"
] | null | null | null |
src/Testing/ZopeTestCase/testBaseTestCase.py
|
dek4nice/Zope
|
ec4765fc0007c4e78aafcbeef510077444f8551a
|
[
"ZPL-2.1"
] | 1
|
2018-11-30T12:39:34.000Z
|
2018-11-30T12:39:34.000Z
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests the base.TestCase class
NOTE: This is *not* an example TestCase. Do not
use this file as a blueprint for your own tests!
See testPythonScript.py and testShoppingCart.py for
example test cases. See testSkeleton.py for a quick
way of getting started.
"""
import gc
import transaction
from Testing.ZopeTestCase import base
from Testing.ZopeTestCase import utils
from Testing.ZopeTestCase import connections
from Testing.ZopeTestCase import sandbox
from Acquisition import aq_base
from AccessControl import getSecurityManager
from AccessControl.SecurityManagement import newSecurityManager
class HookTest(base.TestCase):
def setUp(self):
self._called = []
base.TestCase.setUp(self)
def beforeSetUp(self):
self._called.append('beforeSetUp')
base.TestCase.beforeSetUp(self)
def _setup(self):
self._called.append('_setup')
base.TestCase._setup(self)
def afterSetUp(self):
self._called.append('afterSetUp')
base.TestCase.afterSetUp(self)
def beforeTearDown(self):
self._called.append('beforeTearDown')
base.TestCase.beforeTearDown(self)
def beforeClose(self):
self._called.append('beforeClose')
base.TestCase.beforeClose(self)
def afterClear(self):
self._called.append('afterClear')
base.TestCase.afterClear(self)
def assertHooks(self, sequence):
self.assertEqual(self._called, sequence)
class TestTestCase(HookTest):
def testSetUp(self):
self.assertHooks(['beforeSetUp', '_setup', 'afterSetUp'])
def testTearDown(self):
self._called = []
self.tearDown()
self.assertHooks(['beforeTearDown', 'beforeClose', 'afterClear'])
def testAppOpensConnection(self):
self.assertEqual(connections.count(), 1)
self._app()
self.assertEqual(connections.count(), 2)
def testClearCallsCloseHook(self):
self._called = []
self._clear(1)
self.assertHooks(['beforeClose', 'afterClear'])
def testClearSkipsCloseHook(self):
self._called = []
self._clear()
self.assertHooks(['afterClear'])
def testClearAbortsTransaction(self):
self.assertEqual(len(self.getObjectsInTransaction()), 0)
self.app.foo = 1
self.assertEqual(len(self.getObjectsInTransaction()), 1)
self._clear()
self.assertEqual(len(self.getObjectsInTransaction()), 0)
def testClearClosesConnection(self):
self.assertEqual(connections.count(), 1)
self._clear()
self.assertEqual(connections.count(), 0)
def testClearClosesAllConnections(self):
self._app()
self.assertEqual(connections.count(), 2)
self._clear()
self.assertEqual(connections.count(), 0)
def testClearLogsOut(self):
uf = self.app.acl_users
uf.userFolderAddUser('user_1', '', [], [])
newSecurityManager(None, uf.getUserById('user_1').__of__(uf))
self.assertEqual(
getSecurityManager().getUser().getUserName(), 'user_1')
self._clear()
self.assertEqual(
getSecurityManager().getUser().getUserName(), 'Anonymous User')
def testClearSurvivesDoubleCall(self):
self._called = []
self._clear()
self._clear()
self.assertHooks(['afterClear', 'afterClear'])
def testClearSurvivesClosedConnection(self):
self._called = []
self._close()
self._clear()
self.assertHooks(['afterClear'])
def testClearSurvivesBrokenApp(self):
self._called = []
self.app = None
self._clear()
self.assertHooks(['afterClear'])
def testClearSurvivesMissingApp(self):
self._called = []
delattr(self, 'app')
self._clear()
self.assertHooks(['afterClear'])
def testClearSurvivesMissingRequest(self):
self._called = []
self.app = aq_base(self.app)
self._clear()
self.assertHooks(['afterClear'])
def testCloseAbortsTransaction(self):
self.assertEqual(len(self.getObjectsInTransaction()), 0)
self.app.foo = 1
self.assertEqual(len(self.getObjectsInTransaction()), 1)
self._close()
self.assertEqual(len(self.getObjectsInTransaction()), 0)
def testCloseClosesConnection(self):
self.assertEqual(connections.count(), 1)
self._close()
self.assertEqual(connections.count(), 0)
def testCloseClosesAllConnections(self):
self._app()
self.assertEqual(connections.count(), 2)
self._close()
self.assertEqual(connections.count(), 0)
def testLogoutLogsOut(self):
uf = self.app.acl_users
uf.userFolderAddUser('user_1', '', [], [])
newSecurityManager(None, uf.getUserById('user_1').__of__(uf))
self.assertEqual(
getSecurityManager().getUser().getUserName(), 'user_1')
self.logout()
self.assertEqual(
getSecurityManager().getUser().getUserName(), 'Anonymous User')
def getObjectsInTransaction(self):
# Lets us spy into the transaction
t = transaction.get()
if hasattr(t, '_objects'): # Zope < 2.8
return t._objects
elif hasattr(t, '_resources'): # Zope >= 2.8
return t._resources
else:
raise Exception('Unknown version')
class TestSetUpRaises(HookTest):
class Error(Exception):
pass
def setUp(self):
try:
HookTest.setUp(self)
except self.Error:
self.assertHooks(['beforeSetUp', '_setup', 'afterClear'])
# Connection has been closed
self.assertEqual(connections.count(), 0)
def _setup(self):
HookTest._setup(self)
raise self.Error
def testTrigger(self):
pass
class TestTearDownRaises(HookTest):
class Error(Exception):
pass
def tearDown(self):
self._called = []
try:
HookTest.tearDown(self)
except self.Error:
self.assertHooks(['beforeTearDown', 'beforeClose', 'afterClear'])
# Connection has been closed
self.assertEqual(connections.count(), 0)
def beforeClose(self):
HookTest.beforeClose(self)
raise self.Error
def testTrigger(self):
pass
class TestConnectionRegistry(base.TestCase):
'''Test the registry with Connection-like objects'''
class Conn(object):
_closed = 0
def close(self):
self._closed = 1
def closed(self):
return self._closed
Klass = Conn
def afterSetUp(self):
self.reg = connections.ConnectionRegistry()
self.conns = [self.Klass(), self.Klass(), self.Klass()]
for conn in self.conns:
self.reg.register(conn)
def testRegister(self):
# Should be able to register connections
assert len(self.reg) == 3
assert self.reg.count() == 3
def testCloseConnection(self):
# Should be able to close a single registered connection
assert len(self.reg) == 3
self.reg.close(self.conns[0])
assert len(self.reg) == 2
assert self.conns[0].closed() == 1
assert self.conns[1].closed() == 0
assert self.conns[2].closed() == 0
def testCloseSeveralConnections(self):
# Should be able to close all registered connections one-by-one
assert len(self.reg) == 3
self.reg.close(self.conns[0])
assert len(self.reg) == 2
assert self.conns[0].closed() == 1
assert self.conns[1].closed() == 0
assert self.conns[2].closed() == 0
self.reg.close(self.conns[2])
assert len(self.reg) == 1
assert self.conns[0].closed() == 1
assert self.conns[1].closed() == 0
assert self.conns[2].closed() == 1
self.reg.close(self.conns[1])
assert len(self.reg) == 0
assert self.conns[0].closed() == 1
assert self.conns[1].closed() == 1
assert self.conns[2].closed() == 1
def testCloseForeignConnection(self):
# Should be able to close a connection that has not been registered
assert len(self.reg) == 3
conn = self.Klass()
self.reg.close(conn)
assert len(self.reg) == 3
assert self.conns[0].closed() == 0
assert self.conns[1].closed() == 0
assert self.conns[2].closed() == 0
assert conn.closed() == 1
def testCloseAllConnections(self):
# Should be able to close all registered connections at once
assert len(self.reg) == 3
self.reg.closeAll()
assert len(self.reg) == 0
assert self.conns[0].closed() == 1
assert self.conns[1].closed() == 1
assert self.conns[2].closed() == 1
def testContains(self):
# Should be able to check if a connection is registered
assert len(self.reg) == 3
assert self.reg.contains(self.conns[0])
assert self.reg.contains(self.conns[1])
assert self.reg.contains(self.conns[2])
class TestApplicationRegistry(TestConnectionRegistry):
'''Test the registry with Application-like objects'''
class App(object):
class Conn(object):
_closed = 0
def close(self):
self._closed = 1
def closed(self):
return self._closed
def __init__(self):
self.REQUEST = self.Conn()
self._p_jar = self.Conn()
def closed(self):
if self.REQUEST.closed() and self._p_jar.closed():
return 1
return 0
Klass = App
class TestListConverter(base.TestCase):
'''Test utils.makelist'''
def testList0(self):
self.assertEqual(utils.makelist([]), [])
def testList1(self):
self.assertEqual(utils.makelist(['foo']), ['foo'])
def testList2(self):
self.assertEqual(utils.makelist(['foo', 'bar']), ['foo', 'bar'])
def testTuple0(self):
self.assertEqual(utils.makelist(()), [])
def testTuple1(self):
self.assertEqual(utils.makelist(('foo',)), ['foo'])
def testTuple2(self):
self.assertEqual(utils.makelist(('foo', 'bar')), ['foo', 'bar'])
def testString0(self):
self.assertEqual(utils.makelist(''), [])
def testString1(self):
self.assertEqual(utils.makelist('foo'), ['foo'])
def testString2(self):
self.assertEqual(utils.makelist('foo, bar'), ['foo, bar'])
def testInteger(self):
self.assertRaises(ValueError, utils.makelist, 0)
def testObject(self):
class Dummy(object):
pass
self.assertRaises(ValueError, utils.makelist, Dummy())
class TestRequestVariables(base.TestCase):
'''Makes sure the REQUEST contains required variables'''
def testRequestVariables(self):
request = self.app.REQUEST
self.assertNotEqual(request.get('SERVER_NAME', ''), '')
self.assertNotEqual(request.get('SERVER_PORT', ''), '')
self.assertNotEqual(request.get('REQUEST_METHOD', ''), '')
self.assertNotEqual(request.get('URL', ''), '')
self.assertNotEqual(request.get('SERVER_URL', ''), '')
self.assertNotEqual(request.get('URL0', ''), '')
self.assertNotEqual(request.get('URL1', ''), '')
self.assertNotEqual(request.get('BASE0', ''), '')
self.assertNotEqual(request.get('BASE1', ''), '')
self.assertNotEqual(request.get('BASE2', ''), '')
self.assertNotEqual(request.get('ACTUAL_URL', ''), '')
_sentinel1 = []
_sentinel2 = []
_sentinel3 = []
class TestRequestGarbage1(base.TestCase):
'''Make sure base.app + base.close does not leak REQUEST._held'''
class Held(object):
def __del__(self):
_sentinel1.append('__del__')
def afterSetUp(self):
_sentinel1[:] = []
self.anApp = base.app()
self.anApp.REQUEST._hold(self.Held())
def testBaseCloseClosesRequest(self):
base.close(self.anApp)
gc.collect()
self.assertEqual(_sentinel1, ['__del__'])
class TestRequestGarbage2(base.TestCase):
'''Make sure self._app + self._clear does not leak REQUEST._held'''
class Held(object):
def __del__(self):
_sentinel2.append('__del__')
def afterSetUp(self):
_sentinel2[:] = []
self.app.REQUEST._hold(self.Held())
def testClearClosesRequest(self):
self._clear()
gc.collect()
self.assertEqual(_sentinel2, ['__del__'])
class TestRequestGarbage3(sandbox.Sandboxed, base.TestCase):
'''Make sure self._app + self._clear does not leak REQUEST._held'''
class Held(object):
def __del__(self):
_sentinel3.append('__del__')
def afterSetUp(self):
_sentinel3[:] = []
self.app.REQUEST._hold(self.Held())
def testClearClosesRequest(self):
self._clear()
gc.collect()
self.assertEqual(_sentinel3, ['__del__'])
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestTestCase))
suite.addTest(makeSuite(TestSetUpRaises))
suite.addTest(makeSuite(TestTearDownRaises))
suite.addTest(makeSuite(TestConnectionRegistry))
suite.addTest(makeSuite(TestApplicationRegistry))
suite.addTest(makeSuite(TestListConverter))
suite.addTest(makeSuite(TestRequestVariables))
suite.addTest(makeSuite(TestRequestGarbage1))
suite.addTest(makeSuite(TestRequestGarbage2))
suite.addTest(makeSuite(TestRequestGarbage3))
return suite
| 30.381663
| 78
| 0.620605
|
4a0101542bbb8664c38e58e93c45789ef70af82c
| 8,048
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/storagepool/v20200315preview/get_disk_pool.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/storagepool/v20200315preview/get_disk_pool.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/storagepool/v20200315preview/get_disk_pool.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetDiskPoolResult',
'AwaitableGetDiskPoolResult',
'get_disk_pool',
]
@pulumi.output_type
class GetDiskPoolResult:
"""
Response for Disk pool request.
"""
def __init__(__self__, additional_capabilities=None, availability_zones=None, disks=None, id=None, location=None, name=None, provisioning_state=None, status=None, subnet_id=None, system_data=None, tags=None, tier=None, type=None):
if additional_capabilities and not isinstance(additional_capabilities, list):
raise TypeError("Expected argument 'additional_capabilities' to be a list")
pulumi.set(__self__, "additional_capabilities", additional_capabilities)
if availability_zones and not isinstance(availability_zones, list):
raise TypeError("Expected argument 'availability_zones' to be a list")
pulumi.set(__self__, "availability_zones", availability_zones)
if disks and not isinstance(disks, list):
raise TypeError("Expected argument 'disks' to be a list")
pulumi.set(__self__, "disks", disks)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if subnet_id and not isinstance(subnet_id, str):
raise TypeError("Expected argument 'subnet_id' to be a str")
pulumi.set(__self__, "subnet_id", subnet_id)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tier and not isinstance(tier, str):
raise TypeError("Expected argument 'tier' to be a str")
pulumi.set(__self__, "tier", tier)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="additionalCapabilities")
def additional_capabilities(self) -> Optional[Sequence[str]]:
"""
List of additional capabilities for Disk pool.
"""
return pulumi.get(self, "additional_capabilities")
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Sequence[str]:
"""
Logical zone for Disk pool resource; example: ["1"].
"""
return pulumi.get(self, "availability_zones")
@property
@pulumi.getter
def disks(self) -> Optional[Sequence['outputs.DiskResponse']]:
"""
List of Azure Managed Disks to attach to a Disk pool. Can attach 8 disks at most.
"""
return pulumi.get(self, "disks")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of the operation on the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> str:
"""
Operational status of the Disk pool.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
"""
Azure Resource ID of a Subnet for the Disk pool.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemMetadataResponse':
"""
Resource metadata required by ARM RPC
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def tier(self) -> str:
"""
Determines the SKU of VM deployed for Disk pool
"""
return pulumi.get(self, "tier")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
class AwaitableGetDiskPoolResult(GetDiskPoolResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDiskPoolResult(
additional_capabilities=self.additional_capabilities,
availability_zones=self.availability_zones,
disks=self.disks,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
status=self.status,
subnet_id=self.subnet_id,
system_data=self.system_data,
tags=self.tags,
tier=self.tier,
type=self.type)
def get_disk_pool(disk_pool_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDiskPoolResult:
"""
Response for Disk pool request.
:param str disk_pool_name: The name of the Disk pool.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['diskPoolName'] = disk_pool_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:storagepool/v20200315preview:getDiskPool', __args__, opts=opts, typ=GetDiskPoolResult).value
return AwaitableGetDiskPoolResult(
additional_capabilities=__ret__.additional_capabilities,
availability_zones=__ret__.availability_zones,
disks=__ret__.disks,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
status=__ret__.status,
subnet_id=__ret__.subnet_id,
system_data=__ret__.system_data,
tags=__ret__.tags,
tier=__ret__.tier,
type=__ret__.type)
| 36.089686
| 234
| 0.643141
|
4a0101be93bb230faa996574c6c56de034575603
| 7,071
|
py
|
Python
|
code/model.py
|
Run542968/Self_Attention_Pytorch
|
35b5c520904c709f35a9d3e44c0cdba92a35d10b
|
[
"Apache-2.0"
] | null | null | null |
code/model.py
|
Run542968/Self_Attention_Pytorch
|
35b5c520904c709f35a9d3e44c0cdba92a35d10b
|
[
"Apache-2.0"
] | null | null | null |
code/model.py
|
Run542968/Self_Attention_Pytorch
|
35b5c520904c709f35a9d3e44c0cdba92a35d10b
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
class SelfBiLSTMAttention(nn.Module):
def __init__(self,embedding_dim,lstm_hide_dim,max_time_len,batch_size,da,r,class_num,word2id,use_pre_embeddings=False,pre_embeddings_path=None):
super(SelfBiLSTMAttention, self).__init__()
##初始embedding
self.word2id=word2id
self.embeddings,embeddings_dim=self.load_embedding(use_pre_embeddings,pre_embeddings_path,word2id,embedding_dim)
##BiLSTM参数及构建
self.num_layer=1
self.batch_first=True
self.bidirectional=True
self.max_time_len = max_time_len
self.lstm_hide_dim=lstm_hide_dim
self.lstm_input=embedding_dim
self.batch_size=batch_size
self.hidden_state=self.init_hidden()
self.bilstm=nn.LSTM(input_size=embedding_dim,hidden_size=lstm_hide_dim,num_layers=self.num_layer,batch_first=self.batch_first,bidirectional=self.bidirectional)
#self-attention参数及构建
self.da=da
self.r=r
self.class_num=class_num
self.Ws1=nn.Linear(in_features=2*lstm_hide_dim,out_features=da)
self.Ws2=nn.Linear(in_features=da,out_features=r)
self.fc=nn.Linear(in_features=lstm_hide_dim*2,out_features=class_num)
def load_embedding(self,use_pre_embeddings,pre_embeddings_path,word2id,embeddings_dim):
if not use_pre_embeddings:
word_embeddings = torch.nn.Embedding(len(word2id), embeddings_dim, padding_idx=0)
elif use_pre_embeddings:
embeddings = np.zeros((len(word2id), embeddings_dim))
with open(pre_embeddings_path,encoding="utf-8") as f:
for line in f.readlines():
values = line.split()
word = values[0]
index = word2id.get(word)
if index:
vector = np.array(values[1:], dtype='float32')
if vector.shape[-1] != embeddings_dim:
raise Exception('Dimension not matching.')
embeddings[index] = vector
pre_embeddings=torch.from_numpy(embeddings).float()
word_embeddings = torch.nn.Embedding(pre_embeddings.size(0), pre_embeddings.size(1))
word_embeddings.weight = torch.nn.Parameter(pre_embeddings)
return word_embeddings, embeddings_dim
def init_hidden(self):
bidirection=2 if self.bidirectional else 1
return (Variable(torch.zeros(self.num_layer*bidirection,self.batch_size,self.lstm_hide_dim)),
Variable(torch.zeros(self.num_layer*bidirection,self.batch_size,self.lstm_hide_dim)))
def attention(self,H):#H:[bs,n,2u]
x = torch.tanh(self.Ws1(H))#x:[bs,n,da]
x = self.Ws2(x)#x:[bs,n,r]
# x = self.softmax(x, 1)#x:[512,200,10]
x=F.softmax(x,dim=1)
# print("aftersoftmax:",x)
# print("aftersoftmax.shape",x.shape)
A = x.transpose(1, 2)#A:[bs,r,n]
return A
def forward(self,x):#x:[bs,n]
embeddings=self.embeddings(x)#embeddings[bs,n,embeddings_dim]
#print(embeddings.size())
H,self.hidden_state=self.bilstm(embeddings.view(self.batch_size, self.max_time_len, -1),self.hidden_state)#H:[bs,n,2u]
A=self.attention(H)#A:[bs,r,n]
M = A @ H # 这里的装饰器就是矩阵乘法,M:[bs,r,2u]
avg_sentence_embeddings = torch.sum(M, 1) / self.r#[bs,2u]
output=self.fc(avg_sentence_embeddings)
return output, A
def l2_matrix_norm(self, m):
return torch.sum(torch.sum(torch.sum(m ** 2, 1) ** 0.5).type(torch.DoubleTensor))
class CNN_Pooling_MLP(nn.Module):
def __init__(self,word2id,embeddings_dim,batch_size,class_num=1,hide_dim=300,sqe_len=100):
super(CNN_Pooling_MLP, self).__init__()
self.embeddings = torch.nn.Embedding(len(word2id), embeddings_dim, padding_idx=0)
self.hide_dim=hide_dim
self.sqe_len=sqe_len
self.class_num=class_num
self.batch_size=batch_size
self.conv=nn.Conv1d(in_channels=embeddings_dim,out_channels=hide_dim,kernel_size=3,stride=1)
self.max_pooling=nn.MaxPool1d((sqe_len-3)+1)
self.fc1=nn.Sequential(
nn.Linear(in_features=hide_dim,out_features=3000),
)
self.dropout=nn.Dropout(0.5)
self.fc2=nn.Sequential(
nn.Linear(in_features=3000,out_features=class_num),
)
def forward(self,x):#x:[bs,sqe_len]
emb=self.embeddings(x)#emb:[bs,squ_len,emb_dim]
# conv_x=emb.unsqueeze(1)#conv_x:[bs,1,squ_len,emb_dim]
# x=self.conv(conv_x)#x:[bs,hide_dim,-,1]
# x=x.squeeze(3)#x:[bs,hide_dim,-]
# x=self.max_pooling(x)#x:[bs,hide_dim,1]
# x=x.squeeze(2)#x:[bs,hide_dim]
# x=self.fc1(x)#x:[bs,3000]
# x=self.dropout(x)#x:[bs,3000]
# x=self.fc2(x)#x:[bs,1]
# out=torch.sigmoid(x)#x:[bs,1]
conv_x=emb.permute(0,2,1)#conv_x:[bs,emb_dim,seq_len]
x=self.conv(conv_x)#x:[bs,hide_dim,(seq_len-3)/1+1]
x=self.max_pooling(x)
x=x.squeeze(2)
x=self.fc1(x)
x=self.dropout(x)
out=self.fc2(x)
return out
class BiLSTM_Pooling(nn.Module):
def __init__(self,word2id,embedding_dim,lstm_hide_dim,max_time_len,batch_size,class_num):
super(BiLSTM_Pooling, self).__init__()
##初始embedding
self.embeddings= torch.nn.Embedding(len(word2id), embedding_dim, padding_idx=0)
self.embeddings_dim=embedding_dim
##BiLSTM参数及构建
self.num_layer=1
self.batch_first=True
self.bidirectional=True
self.max_time_len = max_time_len
self.lstm_hide_dim=lstm_hide_dim
self.batch_size=batch_size
self.hidden_state=self.init_hidden()
self.bilstm=nn.LSTM(input_size=embedding_dim,hidden_size=lstm_hide_dim,num_layers=self.num_layer,batch_first=self.batch_first,bidirectional=self.bidirectional)
self.max_pooling=nn.MaxPool1d(kernel_size=max_time_len)
self.fc1 = nn.Sequential(
nn.Linear(in_features=2*lstm_hide_dim, out_features=3000),
)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Sequential(
nn.Linear(in_features=3000, out_features=class_num),
)
def init_hidden(self):
bidirection=2 if self.bidirectional else 1
return (Variable(torch.zeros(self.num_layer*bidirection,self.batch_size,self.lstm_hide_dim)),
Variable(torch.zeros(self.num_layer*bidirection,self.batch_size,self.lstm_hide_dim)))
def forward(self,x):#x:[bs,n],n=sqe_len
embeddings=self.embeddings(x)
H,self.hidden_state=self.bilstm(embeddings.view(self.batch_size, self.max_time_len, -1),self.hidden_state)#H:[bs,n,2u]
H=torch.transpose(H,1,2)#H:[bs,2u,n]
x=self.max_pooling(H)#x:[bs,2u,1]
x=x.squeeze(2)#x:[bs,2u]
x=self.fc1(x)#x:[bs,3000]
x=self.dropout(x)#x:[bs,3000]
out=self.fc2(x)#x[bs,5]
return out
| 43.115854
| 167
| 0.652949
|
4a010229eb52d5d2f95527995a27c711d2df0eb5
| 196
|
py
|
Python
|
Codeforces/contest636#d3/A.py
|
shivammehta007/Fun-Coding
|
1ecfd682b336f5f9bc50364375de7d4c67714cbd
|
[
"Vim",
"Fair",
"MIT"
] | null | null | null |
Codeforces/contest636#d3/A.py
|
shivammehta007/Fun-Coding
|
1ecfd682b336f5f9bc50364375de7d4c67714cbd
|
[
"Vim",
"Fair",
"MIT"
] | null | null | null |
Codeforces/contest636#d3/A.py
|
shivammehta007/Fun-Coding
|
1ecfd682b336f5f9bc50364375de7d4c67714cbd
|
[
"Vim",
"Fair",
"MIT"
] | null | null | null |
t = int(input())
while t:
n = int(input())
k = 2
while True:
x = n / (2**k - 1)
if int(x) == x:
print(int(x))
break
k += 1
t -= 1
| 14
| 26
| 0.331633
|
4a0102650003e8cfba1730dab29068b5c08148bf
| 15,080
|
py
|
Python
|
simple_romp/romp/smpl.py
|
jjandnn/ROMP
|
653a0c9de13c7e242bc304147ae6559d1c6ff283
|
[
"MIT"
] | null | null | null |
simple_romp/romp/smpl.py
|
jjandnn/ROMP
|
653a0c9de13c7e242bc304147ae6559d1c6ff283
|
[
"MIT"
] | null | null | null |
simple_romp/romp/smpl.py
|
jjandnn/ROMP
|
653a0c9de13c7e242bc304147ae6559d1c6ff283
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os,sys
import os.path as osp
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
#from .utils import time_cost
class VertexJointSelector(nn.Module):
def __init__(self, extra_joints_idxs, J_regressor_extra9, J_regressor_h36m17, dtype=torch.float32):
super(VertexJointSelector, self).__init__()
self.register_buffer('extra_joints_idxs', extra_joints_idxs)
self.register_buffer('J_regressor_extra9', J_regressor_extra9)
self.register_buffer('J_regressor_h36m17', J_regressor_h36m17)
def forward(self, vertices, joints):
extra_joints21 = torch.index_select(vertices, 1, self.extra_joints_idxs)
extra_joints9 = torch.einsum('bik,ji->bjk', [vertices, self.J_regressor_extra9])
joints_h36m17 = torch.einsum('bik,ji->bjk', [vertices, self.J_regressor_h36m17])
# 54 joints = 24 smpl joints + 21 face & feet & hands joints + 9 extra joints from different datasets + 17 joints from h36m
joints54_17 = torch.cat([joints, extra_joints21, extra_joints9, joints_h36m17], dim=1)
# use the middle of hip used in the most 2D pose datasets, not the o-th Pelvis of SMPL 24 joint
#joints_h36m17_pelvis = joints_h36m17[:,14].unsqueeze(1)
#joints_h36m17 = joints_h36m17 - joints_h36m17_pelvis
return joints54_17
class SMPL(nn.Module):
def __init__(self, model_path, model_type='smpl', dtype=torch.float32):
super(SMPL, self).__init__()
self.dtype = dtype
model_info = torch.load(model_path)
self.vertex_joint_selector = VertexJointSelector(model_info['extra_joints_index'], \
model_info['J_regressor_extra9'], model_info['J_regressor_h36m17'], dtype=self.dtype)
self.register_buffer('faces_tensor', model_info['f'])
# The vertices of the template model
self.register_buffer('v_template', model_info['v_template'])
# The shape components, take the top 10 PCA componence.
if model_type == 'smpl':
self.register_buffer('shapedirs', model_info['shapedirs'])
elif model_type == 'smpla':
self.register_buffer('shapedirs', model_info['smpla_shapedirs'])
self.register_buffer('J_regressor', model_info['J_regressor'])
# Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207, then transpose to 207 x 6890*3
self.register_buffer('posedirs', model_info['posedirs'])
# indices of parents for each joints
self.register_buffer('parents', model_info['kintree_table'])
self.register_buffer('lbs_weights',model_info['weights'])
#@time_cost('SMPL')
def forward(self, betas=None, poses=None):
''' Forward pass for the SMPL model
Parameters
----------
betas: torch.tensor, optional, shape Bx10
If given, ignore the member variable `betas` and use it
instead. For example, it can used if shape parameters
`betas` are predicted from some external model.
(default=None)
body_pose: torch.tensor, optional, shape Bx(J*3)
If given, ignore the member variable `body_pose` and use it
instead. For example, it can used if someone predicts the
pose of the body joints are predicted from some external model.
It should be a tensor that contains joint rotations in
axis-angle format. (default=None)
transl: torch.tensor, optional, shape Bx3
If given, ignore the member variable `transl` and use it
instead. For example, it can used if the translation
`transl` is predicted from some external model.
(default=None)
Return
----------
outputs: dict, {'verts': vertices of body meshes, (B x 6890 x 3),
'joints54': 54 joints of body meshes, (B x 54 x 3), }
#'joints_h36m17': 17 joints of body meshes follow h36m skeleton format, (B x 17 x 3)}
'''
if isinstance(betas,np.ndarray):
betas = torch.from_numpy(betas).type(self.dtype)
if isinstance(poses,np.ndarray):
poses = torch.from_numpy(poses).type(self.dtype)
default_device = self.shapedirs.device
betas, poses = betas.to(default_device), poses.to(default_device)
vertices, joints = lbs(betas, poses, self.v_template,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.lbs_weights, dtype=self.dtype)
joints54 = self.vertex_joint_selector(vertices, joints)
return vertices, joints54, self.faces_tensor
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, dtype=torch.float32):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = betas.shape[0]
# Add shape contribution
v_shaped = v_template + torch.einsum('bl,mkl->bmk', [betas, shapedirs])
# Get the joints
# NxJx3 array
J = torch.einsum('bik,ji->bjk', [v_shaped, J_regressor])
dtype = pose.dtype
posedirs = posedirs.type(dtype)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=J_regressor.device)
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3]).type(dtype)
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1]).type(dtype)
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(pose_feature, posedirs.type(dtype)) \
.view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=J_regressor.device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def transform_mat(R, t):
''' Creates a batch of transformation matrices
Args:
- R: Bx3x3 array of a batch of rotation matrices
- t: Bx3x1 array of a batch of translation vectors
Returns:
- T: Bx4x4 Transformation matrix
'''
# No padding left or right, only add an extra row
return torch.cat([F.pad(R, [0, 0, 0, 1]),
F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat(
rot_mats.reshape(-1, 3, 3),
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
def export_smpl_to_onnx_dynamic(smpl_model, save_file, bs=1):
"support dynamics batch size but slow"
a = torch.rand([bs, 10]).cuda()
b = torch.rand([bs, 72]).cuda()
dynamic_axes = {'smpl_betas':[0], 'smpl_thetas':[0], 'verts':[0], 'joints':[0]}
torch.onnx.export(smpl_model, (a, b),
save_file,
input_names=['smpl_betas', 'smpl_thetas'],
output_names=['verts', 'joints', 'faces'],
export_params=True,
opset_version=12,
do_constant_folding=True,
dynamic_axes=dynamic_axes)
print('SMPL onnx saved into: ', save_file)
def export_smpl_to_onnx_static(smpl_model, save_file, bs=1):
a = torch.rand([bs, 10]).cuda()
b = torch.rand([bs, 72]).cuda()
torch.onnx.export(smpl_model, (a, b),
save_file,
input_names=['smpl_betas', 'smpl_thetas'],
output_names=['verts', 'joints', 'faces'],
export_params=True,
opset_version=12,
do_constant_folding=True)
print('SMPL onnx saved into: ', save_file)
def test_smpl(smpl_model, dtype=torch.float32):
import time, cv2
from visualization import render_human_mesh
cost_time = []
batch_size = 1
a = torch.zeros([batch_size, 10]).type(dtype) #.cuda()
b = torch.zeros([batch_size, 72]).type(dtype) #.cuda()
image_length = 1024
bg_image = np.ones((image_length, image_length,3), dtype=np.uint8)*255
for _ in range(200):
start_time = time.time()
outputs = smpl_model(a,b)
verts_np = (outputs[0].cpu().numpy() * image_length/2).astype(np.float32) + + np.array([[[.5,.5,0]]]).astype(np.float32) * image_length
faces_np = outputs[2].cpu().numpy().astype(np.int32)
rendered_image = render_human_mesh(bg_image, verts_np, faces_np)
print(rendered_image.shape)
cv2.imshow('rendering', rendered_image)
cv2.waitKey(1)
end_time = time.time()
cost_time.append(end_time - start_time)
print('cost time ', np.mean(cost_time))
print(cost_time[:10])
#for key, item in outputs.items():
# print(key, item.shape)
return
def test_onnx(dtype=np.float32, batch_size=1):
smpl_onnx_path = "smpl.onnx"
import onnx, onnxruntime
#onnx_model = onnx.load(smpl_onnx_path)
#onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession(smpl_onnx_path)
import time
cost_time = []
a = np.random.random([batch_size, 10]).astype(dtype)
b = np.random.random([batch_size, 72]).astype(dtype)
ort_inputs = {'smpl_betas':a, 'smpl_thetas':b}
for _ in range(200):
start_time = time.time()
ort_outs = ort_session.run(None, ort_inputs)
end_time = time.time()
cost_time.append(end_time - start_time)
print('cost time ', np.mean(cost_time))
print(cost_time[:10])
def prepare_smpl_model(dtype):
model_path = '/home/yusun/CenterMesh/model_data/parameters/smpl_packed_info.pth'
smpl_model = SMPL(model_path, dtype=dtype).eval() #.cuda()
return smpl_model
if __name__ == '__main__':
#test_onnx(batch_size=1)
dtype = torch.float32
smpl_model = prepare_smpl_model(dtype)
test_smpl(smpl_model)
#export_smpl_to_onnx_static(smpl_model, 'smpl.onnx', bs=1)
| 40.537634
| 143
| 0.628979
|
4a01035e5eb007f9b50ca0aedae1e22efe9304df
| 3,115
|
py
|
Python
|
code/auto_download/auto-download-ucla.py
|
Stochastik-TU-Ilmenau/covid19-forecast-hub-de
|
384d9ed2d84e9733f630973fb1a43b079f2b57a1
|
[
"MIT"
] | 31
|
2020-05-20T15:38:57.000Z
|
2022-02-13T01:31:33.000Z
|
code/auto_download/auto-download-ucla.py
|
Stochastik-TU-Ilmenau/covid19-forecast-hub-de
|
384d9ed2d84e9733f630973fb1a43b079f2b57a1
|
[
"MIT"
] | 777
|
2020-05-18T14:55:53.000Z
|
2022-03-29T20:43:17.000Z
|
code/auto_download/auto-download-ucla.py
|
Stochastik-TU-Ilmenau/covid19-forecast-hub-de
|
384d9ed2d84e9733f630973fb1a43b079f2b57a1
|
[
"MIT"
] | 65
|
2020-05-20T07:42:36.000Z
|
2021-11-20T21:25:23.000Z
|
# Auto-download forecasts of UCLA-Team
# Jakob Ketterer, November 2020
import os
import urllib.request
from datetime import datetime, timedelta
if __name__ == "__main__":
############ logic to determine which files shall be downloaded
# search date of most current prediction
data_raw_dir = "./data-raw/UCLA-SuEIR"
files = os.listdir(data_raw_dir)
# determine latest forecast date already present in our repo
prefix = "pred_world_"
# get mm-dd strings from file names
dates_wo_year = sorted([f.replace(prefix, "").strip(".csv") for f in files if f.startswith(prefix)], reverse=True)
# add years based on month of submission
dates_w_year = []
for date in dates_wo_year:
# dates from September 2020 on
if int(date[:2]) >= 9:
date_w_year = "2020-" + date
# dates from 2021
else:
date_w_year = "2021-" + date
dates_w_year.append(date_w_year)
dates_w_year = sorted(dates_w_year)
# latest fc present in our repo
latest_fc_date_str = dates_w_year[-1]
latest_fc_date = datetime.strptime(latest_fc_date_str, "%Y-%m-%d")
# determine date up to which files should be downloaded
download_up_to_date = datetime.today()
print(download_up_to_date, latest_fc_date)
assert download_up_to_date > latest_fc_date, "Required forecasts already exists in the repo!"
# generate lists of dates to download
date_list = [latest_fc_date + timedelta(days=x) for x in range(1, (download_up_to_date-latest_fc_date).days+1)]
# restrict on Sundays (UCLA forecasts are usually generated on Sundays)
date_list = [date for date in date_list if date.weekday() == 6]
print("Trying to download forecasts for the following dates: ", ["".join(str(d.date())) for d in date_list])
############ url generation and download of files
# root url
root = "https://raw.githubusercontent.com/uclaml/ucla-covid19-forecasts/master/projection_result/"
# generate date specific death forecast url
# reformat date
file_names = [prefix + date.strftime("%m-%d") + ".csv" for date in date_list]
urls = [root + name for name in file_names]
# create directory names
dir_names = [os.path.join(data_raw_dir, name) for name in file_names]
# download and safe csv files
errors = False
for url, dir_name, date in zip(urls, dir_names, date_list):
urllib.request.urlretrieve(url, dir_name)
print(f"Downloaded forecast from {date.date()} and saved it to", dir_name)
# # catch URL Errors:
# try:
# urllib.request.urlretrieve(url, dir_name)
# print(f"Downloaded forecast from {date.date()} and saved it to", dir_name)
# except urllib.error.URLError as e:
# print(f"URL-ERROR: Download failed for {date.date()}. The file probably doesn't exist in the UCLA repo.")
# errors = True
# if errors:
# print("\n↯ Errors occured while downloading UCLA forecasts! See download history for details!\n")
# else:
# print("\n✓ No errors occured\n")
| 38.45679
| 119
| 0.668058
|
4a01037c39a0f35eceb6456a8be604c283ed5f75
| 8,108
|
py
|
Python
|
twopi_flask_utils/testing/__init__.py
|
TwoPiCode/twopi-flask-utils
|
fd6ae7077060661f743873f72059a14687fa0c5e
|
[
"MIT"
] | 4
|
2016-08-11T01:18:39.000Z
|
2022-03-12T00:28:22.000Z
|
twopi_flask_utils/testing/__init__.py
|
TwoPiCode/twopi-flask-utils
|
fd6ae7077060661f743873f72059a14687fa0c5e
|
[
"MIT"
] | 4
|
2016-10-13T23:34:51.000Z
|
2019-10-08T01:43:22.000Z
|
twopi_flask_utils/testing/__init__.py
|
TwoPiCode/twopi-flask-utils
|
fd6ae7077060661f743873f72059a14687fa0c5e
|
[
"MIT"
] | 2
|
2016-09-15T11:17:32.000Z
|
2017-06-15T09:21:24.000Z
|
import json
class AppReqTestHelper(object):
"""
Adds convenience request methods on the testcase object.
Assumes a flask app client is defined on ``self.client``
"""
def _req(self, meth, *args, **kwargs):
if kwargs.get('content_type') is None and meth != 'get':
kwargs['content_type'] = 'application/json'
if kwargs.get('json') is not None:
kwargs['data'] = json.dumps(kwargs.get('json'))
del kwargs['json']
# Take provided headers or fall back.
headers = kwargs.pop('headers', getattr(self, 'client_headers', {}))
func = getattr(self.client, meth)
rv = func(*args, headers=headers, **kwargs)
def get_json():
return json.loads(rv.data.decode('UTF-8'))
rv.get_json = get_json
return rv
def post(self, *args, **kwargs):
"""Perform a post request to the application."""
return self._req('post', *args, **kwargs)
def get(self, *args, **kwargs):
"""Perform a get request to the application."""
return self._req('get', *args, **kwargs)
def put(self, *args, **kwargs):
"""Perform a put request to the application."""
return self._req('put', *args, **kwargs)
def delete(self, *args, **kwargs):
"""Perform a delete request to the application."""
return self._req('delete', *args, **kwargs)
def patch(self, *args, **kwargs):
"""Perform a patch request to the application."""
return self._req('patch', *args, **kwargs)
class PrivilegeTestHelper(AppReqTestHelper):
"""
Adds a helper to test endpoint privileges in an application.
"""
def do_test_privileges(self, endpoint, data, object_id, expected_codes):
"""
Test privileges on a specific endpoint.
:param endpoint: The endpoint to test. e.g. `/api/v1/classes`
:param data: The data to use when performing a `PUT`/`POST` (dict)
:param object_id: The id of the singular object to test permissions on `PUT`/`DELETE`/`GET`
:param expected_codes: The expected response codes when performing each
endpoint request. E.g.
.. code::
{
'plural-get': 200,
'get': 200,
'delete': 403,
'post': 200,
'put': 200,
}
"""
for meth, expected_code in expected_codes:
assert meth in ['plural-get', 'get', 'delete', 'put', 'post']
kwargs = {}
endp = endpoint
_meth = meth
if meth in ['put', 'post']:
kwargs['json'] = data
if meth in ['put', 'delete', 'get']:
endp = endpoint + '/{}'.format(object_id)
if meth == 'plural-get':
_meth = 'get'
rv = self._req(_meth, endp, **kwargs)
self.assertEqual(rv.status_code, expected_code,
"Expected {} for method {} but got {}. {}".format(
expected_code, meth, rv.status_code, rv.get_json()))
class CRUDTestHelper(AppReqTestHelper):
"""
A helper to test generic CRUD operations on an endpoint.
"""
def do_crud_test(self, endpoint, data_1=None, data_2=None, key='id',
check_keys=[], keys_from_prev=[], create=True, delete=True,
update=True, read=True, initial_count=0):
"""
Begins the CRUD test.
:param endpoint: ``string``: The endpoint to test
:param data1: ``dict``: Data to create the initial entity with (POST)
:param data2: ``dict``: Data to update the entity with (PUT)
:param key: ``string``: The key field in the response returned when performing
a create.
:param check_keys: ``list``: A list of keys to compare ``data_1`` and
``data_2`` to returned API responses. (To
ensure expected response data)
:param keys_from_prev: ``list``: A list of keys to check that they persisted
after a create/update.
:param create: ``bool``: Should create a new object and test it's existence
:param delete: ``bool``: Should delete the newly created object and test
that it has been deleted.
:param update: ``bool``: Should performs PUT (update)
:param read: ``bool``: Should perform a plural read
:param initial_count: ``int``: The initial number of entities in the endpoint's
dataset
"""
if read:
# Plural read on initial set.
rv = self.get(endpoint)
self.assertEqual(rv.status_code, 200)
self.assertEqual(len(rv.get_json()), initial_count)
if create:
# Create
rv = self.post(endpoint, json=data_1)
self.assertEqual(rv.status_code, 200)
self.assertEqualDicts(check_keys, rv.get_json(), data_1)
key_id = rv.get_json()[key]
if read:
# Plural read
rv = self.get(endpoint)
self.assertEqual(rv.status_code, 200)
if create:
self.assertEqual(len(rv.get_json()), initial_count + 1)
# Ensure that at least one of the items is equal to the one we made
for item in rv.get_json():
if self.equalDicts(check_keys, item, data_1):
break
else: # nobreak
self.fail("Could not find the object that was created in the response.")
else:
# No object should have been created
self.assertEqual(len(rv.get_json()), initial_count)
if read and create:
# Singular Read
rv = self.get(endpoint + '/' + str(key_id))
self.assertEqual(rv.status_code, 200)
prev_data = rv.get_json() # Keep this data so we can use it after update.
self.assertEqualDicts(check_keys, prev_data, data_1)
if update and create:
# Singular Update
rv = self.put(endpoint + '/' + str(key_id), json=data_2)
self.assertEqual(rv.status_code, 200)
self.assertEqualDicts(check_keys, rv.get_json(), data_2)
self.assertEqualDicts(keys_from_prev, rv.get_json(), prev_data)
if read and create:
# Singular Read to confirm persisted.
rv = self.get(endpoint + '/' + str(key_id))
self.assertEqual(rv.status_code, 200)
self.assertEqualDicts(check_keys, rv.get_json(), data_2)
self.assertEqualDicts(keys_from_prev, rv.get_json(), prev_data)
if delete and create:
# Singular Deletion
rv = self.delete(endpoint + '/' + str(key_id))
self.assertEqual(rv.status_code, 200)
if read and create:
# Singular read on object which doesn't exist.
rv = self.get(endpoint + '/' + str(key_id))
self.assertEqual(rv.status_code, 404)
if read:
# Plural read on empty set
rv = self.get(endpoint)
self.assertEqual(rv.status_code, 200)
self.assertEqual(len(rv.get_json()), initial_count)
def filteredDicts(self, keys, *dicts):
ret = []
for d in dicts:
d_filtered = dict((k, v) for k, v in d.items()
if k in keys)
ret.append(d_filtered)
return ret
def assertEqualDicts(self, keys, d1, d2):
self.assertEqual(*self.filteredDicts(keys, d1, d2))
def equalDicts(self, keys, d1, d2):
d1, d2 = self.filteredDicts(keys, d1, d2)
return d1 == d2
| 38.980769
| 99
| 0.536877
|
4a0103eacaa9bcd7839172a5be7d4dbbbd41b29c
| 653
|
py
|
Python
|
python/Guazina/ex109/__init__.py
|
FaunoGuazina/X_very_old_algorithm_exercises
|
c9b9ec78e8b82f2e23ef85ba9a5e7fd6e0deaea6
|
[
"MIT"
] | null | null | null |
python/Guazina/ex109/__init__.py
|
FaunoGuazina/X_very_old_algorithm_exercises
|
c9b9ec78e8b82f2e23ef85ba9a5e7fd6e0deaea6
|
[
"MIT"
] | null | null | null |
python/Guazina/ex109/__init__.py
|
FaunoGuazina/X_very_old_algorithm_exercises
|
c9b9ec78e8b82f2e23ef85ba9a5e7fd6e0deaea6
|
[
"MIT"
] | null | null | null |
from Guazina import ex108, ex107
def METADE(x, exibir=False):
metade = ex107.moedaMETADE(x)
if exibir:
return ex108.formaEURO(metade)
else:
return metade
def DOBRO(x, exibir=False):
dobro = ex107.moedaDOBRO(x)
if exibir:
return ex108.formaEURO(dobro)
else:
return dobro
def AUMENTA(x, y, exibir=False):
aumenta = ex107.moedaAUMENTA(x, y)
if exibir:
return ex108.formaEURO(aumenta)
else:
return aumenta
def DIMINUI(x, y, exibir=False):
diminui = ex107.moedaDIMINUI(x, y)
if exibir:
return ex108.formaEURO(diminui)
else:
return diminui
| 19.787879
| 39
| 0.630934
|
4a010402ecb357d296a574c2c89a93ec116051ad
| 5,189
|
py
|
Python
|
mhcflurry/predict_command.py
|
juvejones/mhcflurry_pan
|
08b6fd3116230f954db37a1917e70107f1ffe9d9
|
[
"Apache-2.0"
] | 1
|
2020-08-06T06:53:46.000Z
|
2020-08-06T06:53:46.000Z
|
mhcflurry/predict_command.py
|
juvejones/mhcflurry_pan
|
08b6fd3116230f954db37a1917e70107f1ffe9d9
|
[
"Apache-2.0"
] | null | null | null |
mhcflurry/predict_command.py
|
juvejones/mhcflurry_pan
|
08b6fd3116230f954db37a1917e70107f1ffe9d9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Run MHCflurry predictor on specified peptide/allele pairs.
Examples:
Write a CSV file containing the contents of INPUT.csv plus an
additional column giving MHCflurry binding affinity predictions:
mhcflurry-predict INPUT.csv --out RESULT.csv
The input CSV file is expected to contain columns 'allele' and 'peptide'.
The predictions are written to a column called 'mhcflurry_prediction'.
These default column names may be changed with the --allele-column,
--peptide-column, and --prediction-column options.
If --out is not specified, results are writtent to standard out.
You can also run on alleles and peptides specified on the commandline, in
which case predictions are written for all combinations of alleles and
peptides:
mhcflurry-predict --alleles HLA-A0201 H-2Kb --peptides SIINFEKL DENDREKLLL
'''
from __future__ import (
print_function,
division,
absolute_import,
)
import sys
import argparse
import logging
import pandas
import itertools
from .downloads import get_path
from . import class1_allele_specific
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"input",
metavar="FILE.csv",
nargs="?",
help="Input CSV")
parser.add_argument(
"--out",
metavar="FILE.csv",
help="Output CSV")
parser.add_argument(
"--alleles",
metavar="ALLELE",
nargs="+",
help="Alleles to predict (exclusive with --input)")
parser.add_argument(
"--peptides",
metavar="PEPTIDE",
nargs="+",
help="Peptides to predict (exclusive with --input)")
parser.add_argument(
"--allele-column",
metavar="NAME",
default="allele",
help="Input column name for alleles. Default: '%(default)s'")
parser.add_argument(
"--peptide-column",
metavar="NAME",
default="peptide",
help="Input column name for peptides. Default: '%(default)s'")
parser.add_argument(
"--prediction-column",
metavar="NAME",
default="mhcflurry_prediction",
help="Output column name for predictions. Default: '%(default)s'")
parser.add_argument(
"--models-class1-allele-specific-single",
metavar="DIR",
default=get_path("models_class1_allele_specific_single"),
help="Directory containing class1 allele specific single models. "
"Default: '%(default)s'")
def run(argv=sys.argv[1:]):
args = parser.parse_args(argv)
if args.input:
if args.alleles or args.peptides:
parser.error(
"If an input file is specified, do not specify --alleles "
"or --peptides")
df = pandas.read_csv(args.input)
print("Read input CSV with %d rows, columns are: %s" % (
len(df), ", ".join(df.columns)))
for col in [args.allele_column, args.peptide_column]:
if col not in df.columns:
raise ValueError(
"No such column '%s' in CSV. Columns are: %s" % (
col, ", ".join(["'%s'" % c for c in df.columns])))
else:
if not args.alleles or not args.peptides:
parser.error(
"Specify either an input CSV file or both the "
"--alleles and --peptides arguments")
pairs = list(itertools.product(args.alleles, args.peptides))
df = pandas.DataFrame({
"allele": [p[0] for p in pairs],
"peptide": [p[1] for p in pairs],
})
print("Predicting for %d alleles and %d peptides = %d predictions" % (
len(args.alleles), len(args.peptides), len(df)))
class1_allele_specific_loader = (
class1_allele_specific.load.Class1AlleleSpecificPredictorLoader(
args.models_class1_allele_specific_single))
predictions = {} # allele -> peptide -> value
for (allele, sub_df) in df.groupby(args.allele_column):
logging.info("Running %d predictions for allele %s" % (
len(sub_df), allele))
model = class1_allele_specific_loader.from_allele_name(allele)
peptides = sub_df[args.peptide_column].values
predictions[allele] = dict(
(peptide, prediction)
for (peptide, prediction)
in zip(peptides, model.predict(peptides)))
logging.info("Collecting result")
df[args.prediction_column] = [
predictions[row[args.allele_column]][row[args.peptide_column]]
for (_, row) in df.iterrows()
]
if args.out:
df.to_csv(args.out, index=False)
print("Wrote: %s" % args.out)
else:
df.to_csv(sys.stdout, index=False)
| 32.43125
| 78
| 0.66429
|
4a01062b5f3905951dee211a14fbab089df07dc4
| 1,340
|
py
|
Python
|
RecoTauTag/TauTagTools/python/TauTruthProduction_cfi.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
RecoTauTag/TauTagTools/python/TauTruthProduction_cfi.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 3
|
2018-08-23T13:40:24.000Z
|
2019-12-05T21:16:03.000Z
|
RecoTauTag/TauTagTools/python/TauTruthProduction_cfi.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.JetMCAlgos.TauGenJets_cfi import tauGenJets
from PhysicsTools.HepMCCandAlgos.genParticles_cfi import genParticles
from RecoJets.JetProducers.ak4GenJets_cfi import ak4GenJets
from RecoJets.Configuration.GenJetParticles_cff import genParticlesForJets
trueHadronicTaus = cms.EDFilter(
"TauGenJetDecayModeSelector",
src = cms.InputTag("tauGenJets"),
select = cms.vstring(
'oneProng0Pi0', 'oneProng1Pi0', 'oneProng2Pi0', 'oneProngOther',
'threeProng0Pi0', 'threeProng1Pi0', 'threeProngOther', 'rare'),
filter = cms.bool(False)
)
trueCommonHadronicTaus = trueHadronicTaus.clone(
src = cms.InputTag("tauGenJets"),
select = cms.vstring(
'oneProng0Pi0', 'oneProng1Pi0', 'oneProng2Pi0',
'threeProng0Pi0', 'threeProng1Pi0'),
filter = cms.bool(False)
)
trueMuonicTaus = trueHadronicTaus.clone(
src = cms.InputTag("tauGenJets"),
select = cms.vstring('muon'),
filter = cms.bool(False)
)
trueElecronicTaus = trueHadronicTaus.clone(
src = cms.InputTag("tauGenJets"),
select = cms.vstring('electron'),
filter = cms.bool(False)
)
tauTruthSequence = cms.Sequence(
genParticles *
genParticlesForJets *
ak4GenJets *
tauGenJets *
trueHadronicTaus *
trueMuonicTaus *
trueElecronicTaus)
| 29.777778
| 74
| 0.728358
|
4a0106b403f8bbe977bf0354d22cf1136f9d64f6
| 1,245
|
py
|
Python
|
student_core/consumers.py
|
michaelchen-lab/LMS_Backend
|
f8727398c66b94926e625ebd194e8330481727eb
|
[
"MIT"
] | null | null | null |
student_core/consumers.py
|
michaelchen-lab/LMS_Backend
|
f8727398c66b94926e625ebd194e8330481727eb
|
[
"MIT"
] | null | null | null |
student_core/consumers.py
|
michaelchen-lab/LMS_Backend
|
f8727398c66b94926e625ebd194e8330481727eb
|
[
"MIT"
] | null | null | null |
import json
from asgiref.sync import async_to_sync
from channels.generic.websocket import AsyncWebsocketConsumer
from channels.db import database_sync_to_async
class StudentConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.user = self.scope['user']
if self.user.user_type != 1:
await self.close()
## Classroom group
code = await self.get_class_code()
await self.channel_layer.group_add(
'student_{}'.format(code),
self.channel_name
)
## Individual student group
await self.channel_layer.group_add(
'student_{}'.format(self.user.id),
self.channel_name
)
await self.accept()
@database_sync_to_async
def get_class_code(self):
return self.user.studentprofile.assigned_class_code
async def disconnect(self, close_code):
pass
async def send_task(self, event):
task = event['task']
await self.send(text_data=json.dumps({
'task': task
}))
async def send_submission(self, event):
submission = event['submission']
await self.send(text_data=json.dumps({
'submission': submission
}))
| 28.295455
| 61
| 0.629719
|
4a01076d0d61016f95e93354c1de2473952b70f2
| 4,252
|
py
|
Python
|
openshift/test/test_v1_local_subject_access_review.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/test/test_v1_local_subject_access_review.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/test/test_v1_local_subject_access_review.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v3.6.0-alpha.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_local_subject_access_review import V1LocalSubjectAccessReview
class TestV1LocalSubjectAccessReview(unittest.TestCase):
""" V1LocalSubjectAccessReview unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1LocalSubjectAccessReview(self):
"""
Test V1LocalSubjectAccessReview
"""
model = openshift.client.models.v1_local_subject_access_review.V1LocalSubjectAccessReview()
if __name__ == '__main__':
unittest.main()
| 98.883721
| 3,380
| 0.793274
|
4a0107ac02c8363e4608f6178fbf89cc1b337304
| 344
|
py
|
Python
|
django/core/management/commands/sql.py
|
rawwell/django
|
6b3264671ead4604f26cbd2b71e8d6a02945bf0c
|
[
"BSD-3-Clause"
] | 1
|
2016-05-08T12:24:22.000Z
|
2016-05-08T12:24:22.000Z
|
django/core/management/commands/sql.py
|
rawwell/django
|
6b3264671ead4604f26cbd2b71e8d6a02945bf0c
|
[
"BSD-3-Clause"
] | null | null | null |
django/core/management/commands/sql.py
|
rawwell/django
|
6b3264671ead4604f26cbd2b71e8d6a02945bf0c
|
[
"BSD-3-Clause"
] | 1
|
2015-11-19T14:45:16.000Z
|
2015-11-19T14:45:16.000Z
|
from django.core.management.base import AppCommand
class Command(AppCommand):
help = "Prints the CREATE TABLE SQL statements for the given app name(s)."
output_transaction = True
def handle_app(self, app, **options):
from django.core.management.sql import sql_create
return '\n'.join(sql_create(app, self.style))
| 31.272727
| 78
| 0.718023
|
4a010949b409025eb0d91dab893929f60ea27c9b
| 387
|
py
|
Python
|
telly/wsgi.py
|
el-Joft/telly
|
0c97471e46ee491bd1f6019250ce7a3f21de67af
|
[
"MIT"
] | null | null | null |
telly/wsgi.py
|
el-Joft/telly
|
0c97471e46ee491bd1f6019250ce7a3f21de67af
|
[
"MIT"
] | null | null | null |
telly/wsgi.py
|
el-Joft/telly
|
0c97471e46ee491bd1f6019250ce7a3f21de67af
|
[
"MIT"
] | null | null | null |
"""
WSGI config for telly project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'telly.settings')
application = get_wsgi_application()
| 22.764706
| 78
| 0.782946
|
4a0109dc35081817faa810b2ee473aabf742daf6
| 42,352
|
py
|
Python
|
src/networks/net_places365.py
|
sklcg/SHLight
|
6ac575055070fb83b9deb74e4ccb03d47a547883
|
[
"MIT"
] | 13
|
2018-10-10T06:17:54.000Z
|
2022-01-06T02:34:50.000Z
|
src/networks/net_places365.py
|
sklcg/SHLight
|
6ac575055070fb83b9deb74e4ccb03d47a547883
|
[
"MIT"
] | 11
|
2018-11-21T06:44:00.000Z
|
2021-08-08T04:22:31.000Z
|
src/networks/net_places365.py
|
sklcg/SHLight
|
6ac575055070fb83b9deb74e4ccb03d47a547883
|
[
"MIT"
] | 1
|
2019-03-05T11:42:57.000Z
|
2019-03-05T11:42:57.000Z
|
import tensorflow as tf
import numpy as np
from .net_caffee2tf import C2T_Network
class PretrainNetwork(C2T_Network):
def __init__(self, inputs, weight = None, trainable=True):
if weight != None:
self.var = np.load(weight).tolist()
# for key in self.var.keys():
# print(key, self.var[key].keys())
self.from_scratch = (weight == None)
super(PretrainNetwork,self).__init__(inputs, trainable)
def make_var(self, name, shape):
if self.from_scratch:
return tf.get_variable(name, shape)
else:
scope = tf.get_variable_scope().name.split('/')[-1]
return tf.get_variable(name, initializer=tf.constant(self.var[scope][name]), trainable = self.trainable)
class VGG_Places365(PretrainNetwork):
# auto generated by caffe2tensorflow_python3
# https://github.com/GZHermit/caffe2tensorflow_python3
def setup(self):
(self.feed('data')
.conv(3, 3, 64, 1, 1, name='conv1_1')
.conv(3, 3, 64, 1, 1, name='conv1_2')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1')
.conv(3, 3, 128, 1, 1, name='conv2_2')
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3')
.max_pool(2, 2, 2, 2, name='pool4')
# .conv(3, 3, 512, 1, 1, name='conv5_1')
# .conv(3, 3, 512, 1, 1, name='conv5_2')
# .conv(3, 3, 512, 1, 1, name='conv5_3')
# .max_pool(2, 2, 2, 2, name='pool5')
# .fc(4096, name='fc6')
# .fc(4096, name='fc7')
# .fc(365, relu=False, name='fc8a')
# .softmax(name='prob')
)
class Alex_Places365(PretrainNetwork):
# auto generated by caffe2tensorflow_python3
# https://github.com/GZHermit/caffe2tensorflow_python3
def setup(self):
(self.feed('data')
.conv(11, 11, 96, 4, 4, padding='VALID', name='conv1')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
.lrn(2, 1.9999999494757503e-05, 0.75, name='norm1')
.conv(5, 5, 256, 1, 1, group=2, name='conv2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.lrn(2, 1.9999999494757503e-05, 0.75, name='norm2')
.conv(3, 3, 384, 1, 1, name='conv3')
.conv(3, 3, 384, 1, 1, group=2, name='conv4')
.conv(3, 3, 256, 1, 1, group=2, name='conv5')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool5')
# .fc(4096, name='fc6')
# .fc(4096, name='fc7')
# .fc(365, relu=False, name='fc8')
# .softmax(name='prob')
)
class Googlenet_Places365(PretrainNetwork):
# auto generated by caffe2tensorflow_python3
# https://github.com/GZHermit/caffe2tensorflow_python3
def setup(self):
(self.feed('data')
.conv(7, 7, 64, 2, 2, name='conv1_7x7_s2')
.max_pool(3, 3, 2, 2, name='pool1_3x3_s2')
.lrn(2, 1.9999999494757503e-05, 0.75, name='pool1_norm1')
.conv(1, 1, 64, 1, 1, name='conv2_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='conv2_3x3')
.lrn(2, 1.9999999494757503e-05, 0.75, name='conv2_norm2')
.max_pool(3, 3, 2, 2, name='pool2_3x3_s2')
.conv(1, 1, 64, 1, 1, name='inception_3a_1x1'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_3a_3x3_reduce')
.conv(3, 3, 128, 1, 1, name='inception_3a_3x3'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_3a_5x5_reduce')
.conv(5, 5, 32, 1, 1, name='inception_3a_5x5'))
(self.feed('pool2_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_3a_pool')
.conv(1, 1, 32, 1, 1, name='inception_3a_pool_proj'))
(self.feed('inception_3a_1x1',
'inception_3a_3x3',
'inception_3a_5x5',
'inception_3a_pool_proj')
.concat(3, name='inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_1x1'))
(self.feed('inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='inception_3b_3x3'))
(self.feed('inception_3a_output')
.conv(1, 1, 32, 1, 1, name='inception_3b_5x5_reduce')
.conv(5, 5, 96, 1, 1, name='inception_3b_5x5'))
(self.feed('inception_3a_output')
.max_pool(3, 3, 1, 1, name='inception_3b_pool')
.conv(1, 1, 64, 1, 1, name='inception_3b_pool_proj'))
(self.feed('inception_3b_1x1',
'inception_3b_3x3',
'inception_3b_5x5',
'inception_3b_pool_proj')
.concat(3, name='inception_3b_output')
.max_pool(3, 3, 2, 2, name='pool3_3x3_s2')
.conv(1, 1, 192, 1, 1, name='inception_4a_1x1'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_4a_3x3_reduce')
.conv(3, 3, 208, 1, 1, name='inception_4a_3x3'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_4a_5x5_reduce')
.conv(5, 5, 48, 1, 1, name='inception_4a_5x5'))
(self.feed('pool3_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_4a_pool')
.conv(1, 1, 64, 1, 1, name='inception_4a_pool_proj'))
(self.feed('inception_4a_1x1',
'inception_4a_3x3',
'inception_4a_5x5',
'inception_4a_pool_proj')
.concat(3, name='inception_4a_output')
.conv(1, 1, 160, 1, 1, name='inception_4b_1x1'))
(self.feed('inception_4a_output')
.conv(1, 1, 112, 1, 1, name='inception_4b_3x3_reduce')
.conv(3, 3, 224, 1, 1, name='inception_4b_3x3'))
(self.feed('inception_4a_output')
.conv(1, 1, 24, 1, 1, name='inception_4b_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4b_5x5'))
(self.feed('inception_4a_output')
.max_pool(3, 3, 1, 1, name='inception_4b_pool')
.conv(1, 1, 64, 1, 1, name='inception_4b_pool_proj'))
(self.feed('inception_4b_1x1',
'inception_4b_3x3',
'inception_4b_5x5',
'inception_4b_pool_proj')
.concat(3, name='inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_1x1'))
(self.feed('inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_3x3_reduce')
.conv(3, 3, 256, 1, 1, name='inception_4c_3x3'))
(self.feed('inception_4b_output')
.conv(1, 1, 24, 1, 1, name='inception_4c_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4c_5x5'))
(self.feed('inception_4b_output')
.max_pool(3, 3, 1, 1, name='inception_4c_pool')
.conv(1, 1, 64, 1, 1, name='inception_4c_pool_proj'))
(self.feed('inception_4c_1x1',
'inception_4c_3x3',
'inception_4c_5x5',
'inception_4c_pool_proj')
.concat(3, name='inception_4c_output')
.conv(1, 1, 112, 1, 1, name='inception_4d_1x1'))
(self.feed('inception_4c_output')
.conv(1, 1, 144, 1, 1, name='inception_4d_3x3_reduce')
.conv(3, 3, 288, 1, 1, name='inception_4d_3x3'))
(self.feed('inception_4c_output')
.conv(1, 1, 32, 1, 1, name='inception_4d_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4d_5x5'))
(self.feed('inception_4c_output')
.max_pool(3, 3, 1, 1, name='inception_4d_pool')
.conv(1, 1, 64, 1, 1, name='inception_4d_pool_proj'))
(self.feed('inception_4d_1x1',
'inception_4d_3x3',
'inception_4d_5x5',
'inception_4d_pool_proj')
.concat(3, name='inception_4d_output')
.conv(1, 1, 256, 1, 1, name='inception_4e_1x1'))
(self.feed('inception_4d_output')
.conv(1, 1, 160, 1, 1, name='inception_4e_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_4e_3x3'))
(self.feed('inception_4d_output')
.conv(1, 1, 32, 1, 1, name='inception_4e_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_4e_5x5'))
(self.feed('inception_4d_output')
.max_pool(3, 3, 1, 1, name='inception_4e_pool')
.conv(1, 1, 128, 1, 1, name='inception_4e_pool_proj'))
(self.feed('inception_4e_1x1',
'inception_4e_3x3',
'inception_4e_5x5',
'inception_4e_pool_proj')
.concat(3, name='inception_4e_output')
.max_pool(3, 3, 2, 2, name='pool4_3x3_s2')
.conv(1, 1, 256, 1, 1, name='inception_5a_1x1'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 160, 1, 1, name='inception_5a_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_5a_3x3'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 32, 1, 1, name='inception_5a_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5a_5x5'))
(self.feed('pool4_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_5a_pool')
.conv(1, 1, 128, 1, 1, name='inception_5a_pool_proj'))
(self.feed('inception_5a_1x1',
'inception_5a_3x3',
'inception_5a_5x5',
'inception_5a_pool_proj')
.concat(3, name='inception_5a_output')
.conv(1, 1, 384, 1, 1, name='inception_5b_1x1'))
(self.feed('inception_5a_output')
.conv(1, 1, 192, 1, 1, name='inception_5b_3x3_reduce')
.conv(3, 3, 384, 1, 1, name='inception_5b_3x3'))
(self.feed('inception_5a_output')
.conv(1, 1, 48, 1, 1, name='inception_5b_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5b_5x5'))
(self.feed('inception_5a_output')
.max_pool(3, 3, 1, 1, name='inception_5b_pool')
.conv(1, 1, 128, 1, 1, name='inception_5b_pool_proj'))
# (self.feed('inception_5b_1x1',
# 'inception_5b_3x3',
# 'inception_5b_5x5',
# 'inception_5b_pool_proj')
# .concat(3, name='inception_5b_output')
# .avg_pool(7, 7, 1, 1, padding='VALID', name='pool5_7x7_s1')
# .fc(365, relu=False, name='loss3_classifier')
# .softmax(name='prob'))
class Res152_Places365(PretrainNetwork):
# auto generated by caffe2tensorflow_python3
# https://github.com/GZHermit/caffe2tensorflow_python3
def setup(self):
(self.feed('data')
.conv(7, 7, 64, 2, 2, biased=False, relu=False, name='conv1')
.batch_normalization(relu=True, name='bn_conv1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(name='bn2a_branch1'))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(relu=True, name='bn2a_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(relu=True, name='bn2a_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(name='bn2a_branch2c'))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(relu=True, name='bn2b_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(relu=True, name='bn2b_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(name='bn2b_branch2c'))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(relu=True, name='bn2c_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(relu=True, name='bn2c_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(name='bn2c_branch2c'))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1')
.batch_normalization(name='bn3a_branch1'))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a')
.batch_normalization(relu=True, name='bn3a_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(relu=True, name='bn3a_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(name='bn3a_branch2c'))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2a')
.batch_normalization(relu=True, name='bn3b1_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2b')
.batch_normalization(relu=True, name='bn3b1_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b1_branch2c')
.batch_normalization(name='bn3b1_branch2c'))
(self.feed('res3a_relu',
'bn3b1_branch2c')
.add(name='res3b1')
.relu(name='res3b1_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2a')
.batch_normalization(relu=True, name='bn3b2_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2b')
.batch_normalization(relu=True, name='bn3b2_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b2_branch2c')
.batch_normalization(name='bn3b2_branch2c'))
(self.feed('res3b1_relu',
'bn3b2_branch2c')
.add(name='res3b2')
.relu(name='res3b2_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2a')
.batch_normalization(relu=True, name='bn3b3_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2b')
.batch_normalization(relu=True, name='bn3b3_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b3_branch2c')
.batch_normalization(name='bn3b3_branch2c'))
(self.feed('res3b2_relu',
'bn3b3_branch2c')
.add(name='res3b3')
.relu(name='res3b3_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b4_branch2a')
.batch_normalization(relu=True, name='bn3b4_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b4_branch2b')
.batch_normalization(relu=True, name='bn3b4_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b4_branch2c')
.batch_normalization(name='bn3b4_branch2c'))
(self.feed('res3b3_relu',
'bn3b4_branch2c')
.add(name='res3b4')
.relu(name='res3b4_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b5_branch2a')
.batch_normalization(relu=True, name='bn3b5_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b5_branch2b')
.batch_normalization(relu=True, name='bn3b5_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b5_branch2c')
.batch_normalization(name='bn3b5_branch2c'))
(self.feed('res3b4_relu',
'bn3b5_branch2c')
.add(name='res3b5')
.relu(name='res3b5_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b6_branch2a')
.batch_normalization(relu=True, name='bn3b6_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b6_branch2b')
.batch_normalization(relu=True, name='bn3b6_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b6_branch2c')
.batch_normalization(name='bn3b6_branch2c'))
(self.feed('res3b5_relu',
'bn3b6_branch2c')
.add(name='res3b6')
.relu(name='res3b6_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b7_branch2a')
.batch_normalization(relu=True, name='bn3b7_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b7_branch2b')
.batch_normalization(relu=True, name='bn3b7_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b7_branch2c')
.batch_normalization(name='bn3b7_branch2c'))
(self.feed('res3b6_relu',
'bn3b7_branch2c')
.add(name='res3b7')
.relu(name='res3b7_relu')
.conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1')
.batch_normalization(name='bn4a_branch1'))
(self.feed('res3b7_relu')
.conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a')
.batch_normalization(relu=True, name='bn4a_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(relu=True, name='bn4a_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(name='bn4a_branch2c'))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b1_branch2a')
.batch_normalization(relu=True, name='bn4b1_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b1_branch2b')
.batch_normalization(relu=True, name='bn4b1_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b1_branch2c')
.batch_normalization(name='bn4b1_branch2c'))
(self.feed('res4a_relu',
'bn4b1_branch2c')
.add(name='res4b1')
.relu(name='res4b1_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b2_branch2a')
.batch_normalization(relu=True, name='bn4b2_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b2_branch2b')
.batch_normalization(relu=True, name='bn4b2_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b2_branch2c')
.batch_normalization(name='bn4b2_branch2c'))
(self.feed('res4b1_relu',
'bn4b2_branch2c')
.add(name='res4b2')
.relu(name='res4b2_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b3_branch2a')
.batch_normalization(relu=True, name='bn4b3_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b3_branch2b')
.batch_normalization(relu=True, name='bn4b3_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b3_branch2c')
.batch_normalization(name='bn4b3_branch2c'))
(self.feed('res4b2_relu',
'bn4b3_branch2c')
.add(name='res4b3')
.relu(name='res4b3_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b4_branch2a')
.batch_normalization(relu=True, name='bn4b4_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b4_branch2b')
.batch_normalization(relu=True, name='bn4b4_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b4_branch2c')
.batch_normalization(name='bn4b4_branch2c'))
(self.feed('res4b3_relu',
'bn4b4_branch2c')
.add(name='res4b4')
.relu(name='res4b4_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b5_branch2a')
.batch_normalization(relu=True, name='bn4b5_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b5_branch2b')
.batch_normalization(relu=True, name='bn4b5_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b5_branch2c')
.batch_normalization(name='bn4b5_branch2c'))
(self.feed('res4b4_relu',
'bn4b5_branch2c')
.add(name='res4b5')
.relu(name='res4b5_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b6_branch2a')
.batch_normalization(relu=True, name='bn4b6_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b6_branch2b')
.batch_normalization(relu=True, name='bn4b6_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b6_branch2c')
.batch_normalization(name='bn4b6_branch2c'))
(self.feed('res4b5_relu',
'bn4b6_branch2c')
.add(name='res4b6')
.relu(name='res4b6_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b7_branch2a')
.batch_normalization(relu=True, name='bn4b7_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b7_branch2b')
.batch_normalization(relu=True, name='bn4b7_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b7_branch2c')
.batch_normalization(name='bn4b7_branch2c'))
(self.feed('res4b6_relu',
'bn4b7_branch2c')
.add(name='res4b7')
.relu(name='res4b7_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b8_branch2a')
.batch_normalization(relu=True, name='bn4b8_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b8_branch2b')
.batch_normalization(relu=True, name='bn4b8_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b8_branch2c')
.batch_normalization(name='bn4b8_branch2c'))
(self.feed('res4b7_relu',
'bn4b8_branch2c')
.add(name='res4b8')
.relu(name='res4b8_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b9_branch2a')
.batch_normalization(relu=True, name='bn4b9_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b9_branch2b')
.batch_normalization(relu=True, name='bn4b9_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b9_branch2c')
.batch_normalization(name='bn4b9_branch2c'))
(self.feed('res4b8_relu',
'bn4b9_branch2c')
.add(name='res4b9')
.relu(name='res4b9_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b10_branch2a')
.batch_normalization(relu=True, name='bn4b10_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b10_branch2b')
.batch_normalization(relu=True, name='bn4b10_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b10_branch2c')
.batch_normalization(name='bn4b10_branch2c'))
(self.feed('res4b9_relu',
'bn4b10_branch2c')
.add(name='res4b10')
.relu(name='res4b10_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b11_branch2a')
.batch_normalization(relu=True, name='bn4b11_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b11_branch2b')
.batch_normalization(relu=True, name='bn4b11_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b11_branch2c')
.batch_normalization(name='bn4b11_branch2c'))
(self.feed('res4b10_relu',
'bn4b11_branch2c')
.add(name='res4b11')
.relu(name='res4b11_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b12_branch2a')
.batch_normalization(relu=True, name='bn4b12_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b12_branch2b')
.batch_normalization(relu=True, name='bn4b12_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b12_branch2c')
.batch_normalization(name='bn4b12_branch2c'))
(self.feed('res4b11_relu',
'bn4b12_branch2c')
.add(name='res4b12')
.relu(name='res4b12_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b13_branch2a')
.batch_normalization(relu=True, name='bn4b13_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b13_branch2b')
.batch_normalization(relu=True, name='bn4b13_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b13_branch2c')
.batch_normalization(name='bn4b13_branch2c'))
(self.feed('res4b12_relu',
'bn4b13_branch2c')
.add(name='res4b13')
.relu(name='res4b13_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b14_branch2a')
.batch_normalization(relu=True, name='bn4b14_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b14_branch2b')
.batch_normalization(relu=True, name='bn4b14_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b14_branch2c')
.batch_normalization(name='bn4b14_branch2c'))
(self.feed('res4b13_relu',
'bn4b14_branch2c')
.add(name='res4b14')
.relu(name='res4b14_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b15_branch2a')
.batch_normalization(relu=True, name='bn4b15_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b15_branch2b')
.batch_normalization(relu=True, name='bn4b15_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b15_branch2c')
.batch_normalization(name='bn4b15_branch2c'))
(self.feed('res4b14_relu',
'bn4b15_branch2c')
.add(name='res4b15')
.relu(name='res4b15_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b16_branch2a')
.batch_normalization(relu=True, name='bn4b16_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b16_branch2b')
.batch_normalization(relu=True, name='bn4b16_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b16_branch2c')
.batch_normalization(name='bn4b16_branch2c'))
(self.feed('res4b15_relu',
'bn4b16_branch2c')
.add(name='res4b16')
.relu(name='res4b16_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b17_branch2a')
.batch_normalization(relu=True, name='bn4b17_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b17_branch2b')
.batch_normalization(relu=True, name='bn4b17_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b17_branch2c')
.batch_normalization(name='bn4b17_branch2c'))
(self.feed('res4b16_relu',
'bn4b17_branch2c')
.add(name='res4b17')
.relu(name='res4b17_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b18_branch2a')
.batch_normalization(relu=True, name='bn4b18_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b18_branch2b')
.batch_normalization(relu=True, name='bn4b18_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b18_branch2c')
.batch_normalization(name='bn4b18_branch2c'))
(self.feed('res4b17_relu',
'bn4b18_branch2c')
.add(name='res4b18')
.relu(name='res4b18_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b19_branch2a')
.batch_normalization(relu=True, name='bn4b19_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b19_branch2b')
.batch_normalization(relu=True, name='bn4b19_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b19_branch2c')
.batch_normalization(name='bn4b19_branch2c'))
(self.feed('res4b18_relu',
'bn4b19_branch2c')
.add(name='res4b19')
.relu(name='res4b19_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b20_branch2a')
.batch_normalization(relu=True, name='bn4b20_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b20_branch2b')
.batch_normalization(relu=True, name='bn4b20_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b20_branch2c')
.batch_normalization(name='bn4b20_branch2c'))
(self.feed('res4b19_relu',
'bn4b20_branch2c')
.add(name='res4b20')
.relu(name='res4b20_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b21_branch2a')
.batch_normalization(relu=True, name='bn4b21_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b21_branch2b')
.batch_normalization(relu=True, name='bn4b21_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b21_branch2c')
.batch_normalization(name='bn4b21_branch2c'))
(self.feed('res4b20_relu',
'bn4b21_branch2c')
.add(name='res4b21')
.relu(name='res4b21_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b22_branch2a')
.batch_normalization(relu=True, name='bn4b22_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b22_branch2b')
.batch_normalization(relu=True, name='bn4b22_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b22_branch2c')
.batch_normalization(name='bn4b22_branch2c'))
(self.feed('res4b21_relu',
'bn4b22_branch2c')
.add(name='res4b22')
.relu(name='res4b22_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b23_branch2a')
.batch_normalization(relu=True, name='bn4b23_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b23_branch2b')
.batch_normalization(relu=True, name='bn4b23_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b23_branch2c')
.batch_normalization(name='bn4b23_branch2c'))
(self.feed('res4b22_relu',
'bn4b23_branch2c')
.add(name='res4b23')
.relu(name='res4b23_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b24_branch2a')
.batch_normalization(relu=True, name='bn4b24_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b24_branch2b')
.batch_normalization(relu=True, name='bn4b24_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b24_branch2c')
.batch_normalization(name='bn4b24_branch2c'))
(self.feed('res4b23_relu',
'bn4b24_branch2c')
.add(name='res4b24')
.relu(name='res4b24_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b25_branch2a')
.batch_normalization(relu=True, name='bn4b25_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b25_branch2b')
.batch_normalization(relu=True, name='bn4b25_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b25_branch2c')
.batch_normalization(name='bn4b25_branch2c'))
(self.feed('res4b24_relu',
'bn4b25_branch2c')
.add(name='res4b25')
.relu(name='res4b25_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b26_branch2a')
.batch_normalization(relu=True, name='bn4b26_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b26_branch2b')
.batch_normalization(relu=True, name='bn4b26_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b26_branch2c')
.batch_normalization(name='bn4b26_branch2c'))
(self.feed('res4b25_relu',
'bn4b26_branch2c')
.add(name='res4b26')
.relu(name='res4b26_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b27_branch2a')
.batch_normalization(relu=True, name='bn4b27_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b27_branch2b')
.batch_normalization(relu=True, name='bn4b27_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b27_branch2c')
.batch_normalization(name='bn4b27_branch2c'))
(self.feed('res4b26_relu',
'bn4b27_branch2c')
.add(name='res4b27')
.relu(name='res4b27_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b28_branch2a')
.batch_normalization(relu=True, name='bn4b28_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b28_branch2b')
.batch_normalization(relu=True, name='bn4b28_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b28_branch2c')
.batch_normalization(name='bn4b28_branch2c'))
(self.feed('res4b27_relu',
'bn4b28_branch2c')
.add(name='res4b28')
.relu(name='res4b28_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b29_branch2a')
.batch_normalization(relu=True, name='bn4b29_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b29_branch2b')
.batch_normalization(relu=True, name='bn4b29_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b29_branch2c')
.batch_normalization(name='bn4b29_branch2c'))
(self.feed('res4b28_relu',
'bn4b29_branch2c')
.add(name='res4b29')
.relu(name='res4b29_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b30_branch2a')
.batch_normalization(relu=True, name='bn4b30_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b30_branch2b')
.batch_normalization(relu=True, name='bn4b30_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b30_branch2c')
.batch_normalization(name='bn4b30_branch2c'))
(self.feed('res4b29_relu',
'bn4b30_branch2c')
.add(name='res4b30')
.relu(name='res4b30_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b31_branch2a')
.batch_normalization(relu=True, name='bn4b31_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b31_branch2b')
.batch_normalization(relu=True, name='bn4b31_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b31_branch2c')
.batch_normalization(name='bn4b31_branch2c'))
(self.feed('res4b30_relu',
'bn4b31_branch2c')
.add(name='res4b31')
.relu(name='res4b31_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b32_branch2a')
.batch_normalization(relu=True, name='bn4b32_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b32_branch2b')
.batch_normalization(relu=True, name='bn4b32_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b32_branch2c')
.batch_normalization(name='bn4b32_branch2c'))
(self.feed('res4b31_relu',
'bn4b32_branch2c')
.add(name='res4b32')
.relu(name='res4b32_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b33_branch2a')
.batch_normalization(relu=True, name='bn4b33_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b33_branch2b')
.batch_normalization(relu=True, name='bn4b33_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b33_branch2c')
.batch_normalization(name='bn4b33_branch2c'))
(self.feed('res4b32_relu',
'bn4b33_branch2c')
.add(name='res4b33')
.relu(name='res4b33_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b34_branch2a')
.batch_normalization(relu=True, name='bn4b34_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b34_branch2b')
.batch_normalization(relu=True, name='bn4b34_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b34_branch2c')
.batch_normalization(name='bn4b34_branch2c'))
(self.feed('res4b33_relu',
'bn4b34_branch2c')
.add(name='res4b34')
.relu(name='res4b34_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b35_branch2a')
.batch_normalization(relu=True, name='bn4b35_branch2a')
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b35_branch2b')
.batch_normalization(relu=True, name='bn4b35_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b35_branch2c')
.batch_normalization(name='bn4b35_branch2c'))
(self.feed('res4b34_relu',
'bn4b35_branch2c')
.add(name='res4b35')
.relu(name='res4b35_relu')
.conv(1, 1, 2048, 2, 2, biased=False, relu=False, name='res5a_branch1')
.batch_normalization(name='bn5a_branch1'))
(self.feed('res4b35_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a')
.batch_normalization(relu=True, name='bn5a_branch2a')
.conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b')
.batch_normalization(relu=True, name='bn5a_branch2b')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')
.batch_normalization(name='bn5a_branch2c'))
(self.feed('bn5a_branch1',
'bn5a_branch2c')
.add(name='res5a')
.relu(name='res5a_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')
.batch_normalization(relu=True, name='bn5b_branch2a')
.conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b')
.batch_normalization(relu=True, name='bn5b_branch2b')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')
.batch_normalization(name='bn5b_branch2c'))
(self.feed('res5a_relu',
'bn5b_branch2c')
.add(name='res5b')
.relu(name='res5b_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')
.batch_normalization(relu=True, name='bn5c_branch2a')
.conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b')
.batch_normalization(relu=True, name='bn5c_branch2b')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')
.batch_normalization(name='bn5c_branch2c'))
# (self.feed('res5b_relu',
# 'bn5c_branch2c')
# .add(name='res5c')
# .relu(name='res5c_relu')
# .avg_pool(7, 7, 1, 1, padding='VALID', name='pool5')
# .fc(1365, relu=False, name='fc1365')
# .softmax(name='prob'))
| 50.599761
| 116
| 0.56446
|
4a010a06c8f708bb02acca31f057c80f0784a529
| 1,625
|
py
|
Python
|
Projekt Zaliczeniowy - Okienko/Kite.py
|
Paarzivall/Programowanie-obiektowe-2
|
b658a659dbf039048173a63975de73c3b07c8b70
|
[
"MIT"
] | null | null | null |
Projekt Zaliczeniowy - Okienko/Kite.py
|
Paarzivall/Programowanie-obiektowe-2
|
b658a659dbf039048173a63975de73c3b07c8b70
|
[
"MIT"
] | null | null | null |
Projekt Zaliczeniowy - Okienko/Kite.py
|
Paarzivall/Programowanie-obiektowe-2
|
b658a659dbf039048173a63975de73c3b07c8b70
|
[
"MIT"
] | null | null | null |
import math
from ConvexQuadrilateral import ConvexQuadrilateral
class Kite(ConvexQuadrilateral):
def __init__(self, fill_colour, outline_colour):
super().__init__(fill_colour, outline_colour)
def take_parameters(self):
self.lenght_of_side_a = input("Podaj długość pierwszej krawędzi:\t")
self.lenght_of_side_b = input("Podaj długość drugiej krawędzi:\t")
self.lenght_of_side_c = self.lenght_of_side_b
self.lenght_of_side_d = self.lenght_of_side_a
self.angle1 = input("Podaj pierwszy kąt:\t")
self.angle2 = input("Podaj drugi kąt (Jeden z dwóch tych samych):\t")
self.angle3 = self.angle2
self.angle4 = 360 - self.angle1 - 2 * self.angle2
def area(self):
a = self.lenght_of_side_a
b = self.lenght_of_side_b
return ((math.pow(a, 2) * math.sin(math.radians(self.angle1))) / 2) + ((math.pow(b, 2) * math.sin(math.radians(360-(self.angle1 + 2 * self.angle2)))) / 2)
def draw(self):
p1 = 2 * self.lenght_of_side_a * math.cos(math.radians((180 - self.angle1) / 2))
p2 = self.lenght_of_side_a * math.cos(math.radians(self.angle1 / 2)) + self.lenght_of_side_b * math.cos(math.radians((180 - self.angle1) / 2))
h = math.sqrt(math.pow(self.lenght_of_side_a, 2) - (math.pow(p1, 2)) / 4)
A = (self.wektor, self.wektor)
B = (p2 + self.wektor, self.wektor)
S = (p2 + self.wektor - h, self.wektor)
C = (S[0], self.wektor - p1 / 2)
D = (S[0], self.wektor + p1 / 2)
coords = [int((x + 1) * self.skala) for x in A + C + B + D]
return coords
| 39.634146
| 162
| 0.626462
|
4a010a4d4d667a661064f57ddba3822f28a37c33
| 2,083
|
py
|
Python
|
lib/model/faster_rcnn/vgg16.py
|
sumiya-NJU/da-faster-rcnn-PyTorch
|
62a7286d8e40c6625f32de8d49039c7f623909bd
|
[
"MIT"
] | 122
|
2018-12-18T16:43:18.000Z
|
2022-03-25T03:21:11.000Z
|
lib/model/faster_rcnn/vgg16.py
|
sumiya-NJU/da-faster-rcnn-PyTorch
|
62a7286d8e40c6625f32de8d49039c7f623909bd
|
[
"MIT"
] | 34
|
2019-02-27T19:35:46.000Z
|
2021-12-26T06:23:40.000Z
|
lib/model/faster_rcnn/vgg16.py
|
sumiya-NJU/da-faster-rcnn-PyTorch
|
62a7286d8e40c6625f32de8d49039c7f623909bd
|
[
"MIT"
] | 44
|
2019-02-20T08:15:24.000Z
|
2022-03-22T15:06:52.000Z
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import torchvision.models as models
from model.faster_rcnn.faster_rcnn import _fasterRCNN
import pdb
class vgg16(_fasterRCNN):
def __init__(self, classes, pretrained=False, class_agnostic=False):
self.model_path = '/data/ztc/detectionModel/vgg16_caffe.pth'
self.dout_base_model = 512
self.pretrained = pretrained
self.class_agnostic = class_agnostic
_fasterRCNN.__init__(self, classes, class_agnostic)
def _init_modules(self):
vgg = models.vgg16()
if self.pretrained:
print("Loading pretrained weights from %s" %(self.model_path))
state_dict = torch.load(self.model_path)
vgg.load_state_dict({k:v for k,v in state_dict.items() if k in vgg.state_dict()})
vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])
# not using the last maxpool layer
self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1])
# Fix the layers before conv3:
for layer in range(10):
for p in self.RCNN_base[layer].parameters(): p.requires_grad = False
# self.RCNN_base = _RCNN_base(vgg.features, self.classes, self.dout_base_model)
self.RCNN_top = vgg.classifier
# not using the last maxpool layer
self.RCNN_cls_score = nn.Linear(4096, self.n_classes)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(4096, 4)
else:
self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes)
def _head_to_tail(self, pool5):
pool5_flat = pool5.view(pool5.size(0), -1)
fc7 = self.RCNN_top(pool5_flat)
return fc7
| 33.063492
| 90
| 0.663946
|
4a010a7da40dd479f996ae445397f3c9b0ccf487
| 4,854
|
py
|
Python
|
test/SConsGnu/GVars/GVarDecls/sconstest-gvardecls2.py
|
ptomulik/scons-gnu-build
|
9c46908eed50679d7aaaaf472e324c97545ac837
|
[
"Unlicense"
] | null | null | null |
test/SConsGnu/GVars/GVarDecls/sconstest-gvardecls2.py
|
ptomulik/scons-gnu-build
|
9c46908eed50679d7aaaaf472e324c97545ac837
|
[
"Unlicense"
] | 1
|
2015-02-13T04:30:45.000Z
|
2015-02-13T04:30:45.000Z
|
test/SConsGnu/GVars/GVarDecls/sconstest-gvardecls2.py
|
ptomulik/scons-gnu-build
|
9c46908eed50679d7aaaaf472e324c97545ac837
|
[
"Unlicense"
] | null | null | null |
#
# Copyright (c) 2012-2014 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
__docformat__ = "restructuredText"
"""
Tests declaring variables with SConsGnu.GVar.GVarDecls() factory method.
"""
import TestSCons
##############################################################################
# GVarDecls(): Test 2 - using instances of _GVarDecl as entries
##############################################################################
test = TestSCons.TestSCons()
test.dir_fixture('../../../../SConsGnu', 'site_scons/SConsGnu')
test.write('SConstruct',
"""
# SConstruct
from SConsGnu.GVars import GVarDecls, GVarDeclU, ENV, VAR, OPT
x = GVarDeclU('env_x', 'var_x', 'opt_x', default = 'x default', option = '-x')
y = GVarDeclU('env_y', 'var_y', 'opt_y', default = 'y default', option = '-y')
list = []
list.append( GVarDecls(x = x, y = y) )
list.append( GVarDecls({'x' : x, 'y' : y}) )
i = 0
for v in list:
for c in ['x', 'y']:
print "GVARS[%d][%r].has_xxx_decl(ENV): %r" % (i, c, v[c].has_xxx_decl(ENV))
print "GVARS[%d][%r].has_xxx_decl(VAR): %r" % (i, c, v[c].has_xxx_decl(VAR))
print "GVARS[%d][%r].has_xxx_decl(OPT): %r" % (i, c, v[c].has_xxx_decl(OPT))
print "GVARS[%d][%r].get_xxx_key(ENV): %r" % (i, c, v[c].get_xxx_key(ENV))
print "GVARS[%d][%r].get_xxx_key(VAR): %r" % (i, c, v[c].get_xxx_key(VAR))
print "GVARS[%d][%r].get_xxx_key(OPT): %r" % (i, c, v[c].get_xxx_key(OPT))
print "GVARS[%d][%r].get_xxx_default(ENV): %r" % (i, c, v[c].get_xxx_default(ENV))
print "GVARS[%d][%r].get_xxx_default(VAR): %r" % (i, c, v[c].get_xxx_default(VAR))
print "GVARS[%d][%r].get_xxx_default(OPT): %r" % (i, c, v[c].get_xxx_default(OPT))
i += 1
""")
test.run()
lines = [
"GVARS[0]['x'].has_xxx_decl(ENV): True",
"GVARS[0]['x'].has_xxx_decl(VAR): True",
"GVARS[0]['x'].has_xxx_decl(OPT): True",
"GVARS[0]['x'].get_xxx_key(ENV): 'env_x'",
"GVARS[0]['x'].get_xxx_key(VAR): 'var_x'",
"GVARS[0]['x'].get_xxx_key(OPT): 'opt_x'",
"GVARS[0]['x'].get_xxx_default(ENV): 'x default'",
"GVARS[0]['x'].get_xxx_default(VAR): 'x default'",
"GVARS[0]['x'].get_xxx_default(OPT): None",
"GVARS[0]['y'].has_xxx_decl(ENV): True",
"GVARS[0]['y'].has_xxx_decl(VAR): True",
"GVARS[0]['y'].has_xxx_decl(OPT): True",
"GVARS[0]['y'].get_xxx_key(ENV): 'env_y'",
"GVARS[0]['y'].get_xxx_key(VAR): 'var_y'",
"GVARS[0]['y'].get_xxx_key(OPT): 'opt_y'",
"GVARS[0]['y'].get_xxx_default(ENV): 'y default'",
"GVARS[0]['y'].get_xxx_default(VAR): 'y default'",
"GVARS[0]['y'].get_xxx_default(OPT): None",
"GVARS[1]['x'].has_xxx_decl(ENV): True",
"GVARS[1]['x'].has_xxx_decl(VAR): True",
"GVARS[1]['x'].has_xxx_decl(OPT): True",
"GVARS[1]['x'].get_xxx_key(ENV): 'env_x'",
"GVARS[1]['x'].get_xxx_key(VAR): 'var_x'",
"GVARS[1]['x'].get_xxx_key(OPT): 'opt_x'",
"GVARS[1]['x'].get_xxx_default(ENV): 'x default'",
"GVARS[1]['x'].get_xxx_default(VAR): 'x default'",
"GVARS[1]['x'].get_xxx_default(OPT): None",
"GVARS[1]['y'].has_xxx_decl(ENV): True",
"GVARS[1]['y'].has_xxx_decl(VAR): True",
"GVARS[1]['y'].has_xxx_decl(OPT): True",
"GVARS[1]['y'].get_xxx_key(ENV): 'env_y'",
"GVARS[1]['y'].get_xxx_key(VAR): 'var_y'",
"GVARS[1]['y'].get_xxx_key(OPT): 'opt_y'",
"GVARS[1]['y'].get_xxx_default(ENV): 'y default'",
"GVARS[1]['y'].get_xxx_default(VAR): 'y default'",
"GVARS[1]['y'].get_xxx_default(OPT): None",
]
test.must_contain_all_lines(test.stdout(), lines)
test.pass_test()
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
| 43.72973
| 90
| 0.600124
|
4a010ab8260eb832c5e67bf369eb87cd4c4f8ebe
| 7,709
|
py
|
Python
|
alex/corpustools/vctk2ufal-audio.py
|
cifkao/alex
|
9573ef5d24919b2b368b35f4dd02aa98f35f0f59
|
[
"Apache-2.0"
] | 184
|
2015-02-11T04:14:41.000Z
|
2022-03-24T21:43:58.000Z
|
alex/corpustools/vctk2ufal-audio.py
|
cifkao/alex
|
9573ef5d24919b2b368b35f4dd02aa98f35f0f59
|
[
"Apache-2.0"
] | 69
|
2015-01-11T04:57:22.000Z
|
2019-04-24T10:25:56.000Z
|
alex/corpustools/vctk2ufal-audio.py
|
cifkao/alex
|
9573ef5d24919b2b368b35f4dd02aa98f35f0f59
|
[
"Apache-2.0"
] | 61
|
2015-03-04T10:52:13.000Z
|
2022-03-04T12:14:06.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# This code is mostly PEP8-compliant. See
# http://www.python.org/dev/peps/pep-0008.
"""
This program processes a VoxForge corpus and copies all WAVs into
a destination directory along with their transcriptions.
In each subdirectory of the given directory, it looks for audio files in
the 'wav48' directory and for transcriptions in 'txt'.
"""
from __future__ import unicode_literals
import argparse
import collections
import os
import os.path
import shutil
import sys
import codecs
import pysox
import random
import glob
from xml.etree import ElementTree
# Make sure the alex package is visible.
if __name__ == '__main__':
import autopath
from alex.utils.fs import find
_LANG2NORMALISATION_MOD = {
'cs': 'alex.corpustools.text_norm_cs',
'en': 'alex.corpustools.text_norm_en',
'es': 'alex.corpustools.text_norm_es',
}
def save_transcription(trs_fname, trs):
"""
Echoes `trs' into `trs_fname'. Returns True iff the
output file already existed.
"""
existed = os.path.exists(trs_fname)
if not trs.endswith('\n'):
trs += '\n'
with codecs.open(trs_fname, 'w', 'UTF-8') as trs_file:
trs_file.write(trs)
return existed
def to_wav(src_path, wav_path):
sox_in = pysox.CSoxStream(src_path)
sox_out = pysox.CSoxStream(wav_path, 'w', pysox.CSignalInfo(16000, 1, 16), fileType='wav')
sox_chain = pysox.CEffectsChain(sox_in, sox_out)
sox_chain.add_effect(pysox.CEffect('rate', ['16000']))
sox_chain.flow_effects()
sox_out.close()
def convert(args):
"""
Looks for recordings and transcriptions under the `args.infname'
directory. Converts audio files to WAVs and copies the .wav files
and their transcriptions to `args.outdir' using the `extract_wavs_trns'
function. `args.dictionary' may refer to an open file listing the only
words to be allowed in transcriptions in the first whitespace-separated column.
Returns a tuple of:
number of collisions (files at different paths with same basename)
number of overwrites (files with the same basename as previously
present in `args.outdir')
number of missing files (file basenames referred in transcription logs
but missing in the file system)
"""
# Unpack the arguments.
infname = args.infname
outdir = args.outdir
lang = args.language
verbose = args.verbose
dict_file = args.dictionary
size = 0
n_overwrites = 0
n_missing_audio = 0
# Import the appropriate normalisation module.
norm_mod_name = _LANG2NORMALISATION_MOD[lang]
norm_mod = __import__(norm_mod_name, fromlist=( 'normalise_text', 'exclude_asr', 'exclude_by_dict'))
# Read in the dictionary.
if dict_file:
known_words = set(line.split()[0] for line in dict_file)
dict_file.close()
else:
known_words = None
wavs = glob.glob(os.path.join(infname, 'wav48', '*.wav'))
wavs.extend(glob.glob(os.path.join(infname, 'wav48', '*', '*.wav')))
wavs.extend(glob.glob(os.path.join(infname, 'wav48', '*', '*', '*.wav')))
fnames = []
for fn_w in wavs:
fn_t = fn_w.replace('wav48', 'txt').replace('wav', 'txt')
if not os.path.exists(fn_t):
continue
with codecs.open(fn_t, 'r', 'UTF-8') as txt_file:
trs = txt_file.readline().strip()
wav_name = fn_w
if len(wav_name) < 3:
continue
fname = os.path.basename(wav_name).replace('.wav', '')
# Copy or convert audio file
src_wav_path = wav_name
tgt_wav_path = os.path.join(outdir, "{r:02}".format(r=random.randint(0, 99)), "{r:02}".format(r=random.randint(0, 99)), fname + '.wav')
if not os.path.exists(os.path.dirname(tgt_wav_path)):
os.makedirs(os.path.dirname(tgt_wav_path))
# copy and convert the audio
to_wav(src_wav_path, tgt_wav_path)
fnames += [fname]
size += os.path.getsize(tgt_wav_path)
# Write transcription
if verbose:
print
print "# f:", wav_name + '.wav'
print "orig transcription:", trs.upper().strip()
trs = norm_mod.normalise_text(trs)
if verbose:
print "normalised trans: ", trs
if known_words is not None:
excluded = norm_mod.exclude_by_dict(trs, known_words)
else:
excluded = norm_mod.exclude_asr(trs)
if excluded:
if verbose:
print "... excluded"
continue
wc.update(trs.split())
if save_transcription(tgt_wav_path + '.trn', trs):
n_overwrites += 1
n_collisions = len(fnames) - len(set(fnames))
return size, n_collisions, n_overwrites, n_missing_audio
if __name__ == '__main__':
wc = collections.Counter() # word counter
from alex.utils.ui import getTerminalSize
# Parse arguments.
arger = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
This program processes a VoxForge corpus and copies all WAVs into
a destination directory along with their transcriptions.
In each subdirectory of the given directory, it looks for audio files in
the 'wav' or 'flac' directory and for transcriptions in 'etc/PROMPTS'.
""")
arger.add_argument('infname', action="store",
help="the directory containing the corpus")
arger.add_argument('outdir', action="store",
help='an output directory for files with audio and '
'their transcription')
arger.add_argument('-v',
action="store_true",
dest="verbose",
help='set verbose output')
arger.add_argument('-d', '--dictionary',
type=argparse.FileType('r'),
metavar='FILE',
help='Path towards a phonetic dictionary constraining '
'what words should be allowed in transcriptions. '
'The dictionary is expected to contain the words '
'in the first whitespace-separated column.')
arger.add_argument('-l', '--language',
default='en',
metavar='CODE',
help='Code of the language (e.g., "cs", "en") of the transcriptions.')
arger.add_argument('-w', '--word-list',
default='word_list',
metavar='FILE',
help='Path towards an output file to contain a list '
'of words that appeared in the transcriptions, '
'one word per line.')
args = arger.parse_args()
# Do the copying.
size, n_collisions, n_overwrites, n_missing_audio = convert(args)
# Report.
print "Size of copied audio data:", size
msg = ("# collisions: {0}; # overwrites: {1}; # missing recordings: {2}").format(n_collisions, n_overwrites, n_missing_audio)
print msg
sec = size / (16000 * 2)
hour = sec / 3600.0
print "Length of audio data in hours (for 16kHz 16b WAVs):", hour
# Print out the contents of the word counter to 'word_list'.
with codecs.open(args.word_list, 'w', 'UTF-8') as word_list_file:
for word in sorted(wc):
word_list_file.write(u"{word}\t{count}\n".format(word=word, count=wc[word]))
| 34.110619
| 147
| 0.605785
|
4a010b6775d25f4ead28daed37450bd3214b7fb9
| 8,347
|
py
|
Python
|
tests/unit/python/foglamp/common/test_statistics.py
|
ashwinscale/FogLAMP
|
dac6f286d31978b6ce00303df8398ea5b2031d79
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/common/test_statistics.py
|
ashwinscale/FogLAMP
|
dac6f286d31978b6ce00303df8398ea5b2031d79
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/common/test_statistics.py
|
ashwinscale/FogLAMP
|
dac6f286d31978b6ce00303df8398ea5b2031d79
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import asyncio
import json
from unittest.mock import MagicMock, patch
import pytest
from foglamp.common.statistics import Statistics, _logger
from foglamp.common.storage_client.storage_client import StorageClient
__author__ = "Ashish Jabble, Mark Riddoch, Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("common", "statistics")
class TestStatistics:
def test_init_with_no_storage(self):
storage_client_mock = None
with pytest.raises(TypeError) as excinfo:
Statistics(storage_client_mock)
assert str(excinfo.value) == 'Must be a valid Storage object'
def test_init_with_storage(self):
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
assert isinstance(s, Statistics)
assert isinstance(s._storage, StorageClient)
def test_singleton(self):
""" Test that two statistics instance share the same state """
storageMock1 = MagicMock(spec=StorageClient)
s1 = Statistics(storageMock1)
storageMock2 = MagicMock(spec=StorageClient)
s2 = Statistics(storageMock2)
assert s1._storage == s2._storage
def test_register(self):
""" Test that register results in a database insert """
storageMock = MagicMock(spec=StorageClient)
stats = Statistics(storageMock)
loop = asyncio.get_event_loop()
loop.run_until_complete(stats.register('T1Stat', 'Test stat'))
args, kwargs = stats._storage.insert_into_tbl.call_args
assert args[0] == 'statistics'
expected_storage_args = json.loads(args[1])
assert expected_storage_args['key'] == 'T1Stat'
assert expected_storage_args['value'] == 0
assert expected_storage_args['previous_value'] == 0
assert expected_storage_args['description'] == 'Test stat'
stats._storage.insert_into_tbl.reset_mock()
def test_register_twice(self):
""" Test that register results in a database insert only once for same key"""
storageMock = MagicMock(spec=StorageClient)
stats = Statistics(storageMock)
loop = asyncio.get_event_loop()
loop.run_until_complete(stats.register('T2Stat', 'Test stat'))
loop.run_until_complete(stats.register('T2Stat', 'Test stat'))
assert stats._storage.insert_into_tbl.called
assert stats._storage.insert_into_tbl.call_count == 1
stats._storage.insert_into_tbl.reset_mock()
async def test_register_exception(self):
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
with patch.object(_logger, 'exception') as logger_exception:
with patch.object(s._storage, 'insert_into_tbl', side_effect=Exception):
with pytest.raises(Exception):
await s.register('T3Stat', 'Test stat')
args, kwargs = logger_exception.call_args
assert args[0] == 'Unable to create new statistic %s, error %s'
assert args[1] == 'T3Stat'
def test_load_keys(self):
"""Test the load key"""
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
storage_return = {'rows': [{"previous_value": 0, "value": 1,
"key": "K1", "description": "desc1"}]}
with patch.object(s._storage, 'query_tbl_with_payload', return_value=storage_return) as patch_query_tbl:
s._load_keys()
assert "K1" in s._registered_keys
patch_query_tbl.assert_called_once_with('statistics', '{"return": ["key"]}')
async def test_load_keys_exception(self):
"""Test the load key exception"""
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
with patch.object(_logger, 'exception') as logger_exception:
with patch.object(s._storage, 'query_tbl_with_payload', side_effect=Exception):
with pytest.raises(Exception):
await s._load_keys()
args, kwargs = logger_exception.call_args
assert args[0] == 'Failed to retrieve statistics keys, %s'
async def test_update(self):
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
payload = '{"where": {"column": "key", "condition": "=", "value": "READING"}, ' \
'"expressions": [{"column": "value", "operator": "+", "value": 5}]}'
expected_result = {"response": "updated", "rows_affected": 1}
with patch.object(s._storage, 'update_tbl', return_value=expected_result) as stat_update:
await s.update('READING', 5)
assert expected_result['response'] == "updated"
stat_update.assert_called_once_with('statistics', payload)
@pytest.mark.parametrize("key, value_increment, exception_name, exception_message", [
(123456, 120, TypeError, "key must be a string"),
('PURGED', '120', ValueError, "value must be an integer"),
(None, '120', TypeError, "key must be a string"),
('123456', '120', ValueError, "value must be an integer"),
('READINGS', None, ValueError, "value must be an integer")
])
async def test_update_with_invalid_params(self, key, value_increment, exception_name, exception_message):
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
with pytest.raises(exception_name) as excinfo:
await s.update(key, value_increment)
assert exception_message == str(excinfo.value)
async def test_update_exception(self):
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
msg = 'Unable to update statistics value based on statistics_key %s and value_increment %d,' \
' error %s', 'BUFFERED', 5, ''
with patch.object(s._storage, 'update_tbl', side_effect=Exception()):
with pytest.raises(Exception):
with patch.object(_logger, 'exception') as logger_exception:
await s.update('BUFFERED', 5)
logger_exception.assert_called_once_with(*msg)
async def test_add_update(self):
stat_dict = {'FOGBENCH/TEMPERATURE': 1}
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
payload = '{"where": {"column": "key", "condition": "=", "value": "FOGBENCH/TEMPERATURE"}, ' \
'"expressions": [{"column": "value", "operator": "+", "value": 1}]}'
expected_result = {"response": "updated", "rows_affected": 1}
with patch.object(s._storage, 'update_tbl', return_value=expected_result) as stat_update:
await s.add_update(stat_dict)
assert expected_result['response'] == "updated"
stat_update.assert_called_once_with('statistics', payload)
async def test_insert_when_key_error(self):
stat_dict = {'FOGBENCH/TEMPERATURE': 1}
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
with patch.object(_logger, 'exception') as logger_exception:
with pytest.raises(KeyError):
await s.add_update(stat_dict)
args, kwargs = logger_exception.call_args
assert args[0] == 'Statistics key %s has not been registered'
assert args[1] == 'FOGBENCH/TEMPERATURE'
async def test_add_update_exception(self):
stat_dict = {'FOGBENCH/TEMPERATURE': 1}
storage_client_mock = MagicMock(spec=StorageClient)
s = Statistics(storage_client_mock)
msg = 'Unable to update statistics value based on statistics_key %s and value_increment' \
' %s, error %s', "FOGBENCH/TEMPERATURE", 1, ''
with patch.object(s._storage, 'update_tbl', side_effect=Exception()):
with pytest.raises(Exception):
with patch.object(_logger, 'exception') as logger_exception:
await s.add_update(stat_dict)
logger_exception.assert_called_once_with(*msg)
| 47.158192
| 112
| 0.661555
|
4a010ba74b79d37fe5e4f3cfa62eac911788054f
| 8,048
|
py
|
Python
|
orbitdbapi/asyncDB.py
|
phillmac/py-orbit-db-http-client-dev
|
5f806572cf78feda7faa43ce9025415518183921
|
[
"MIT"
] | 1
|
2019-06-24T08:35:41.000Z
|
2019-06-24T08:35:41.000Z
|
orbitdbapi/asyncDB.py
|
phillmac/py-orbit-db-http-client-dev
|
5f806572cf78feda7faa43ce9025415518183921
|
[
"MIT"
] | null | null | null |
orbitdbapi/asyncDB.py
|
phillmac/py-orbit-db-http-client-dev
|
5f806572cf78feda7faa43ce9025415518183921
|
[
"MIT"
] | 1
|
2019-06-24T08:38:51.000Z
|
2019-06-24T08:38:51.000Z
|
import json
import logging
from collections.abc import Hashable, Iterable
from copy import deepcopy
from urllib.parse import quote as urlquote
from sseclient import SSEClient
class DB ():
def __init__(self, client, params, **kwargs):
self.__cache = {}
self.__client = client
self.__params = params
self.__db_options = params.get('options', {})
self.__dbname = params['dbname']
self.__id = params['id']
self.__id_safe = urlquote(self.__id, safe='')
self.__type = params['type']
self.__use_cache = kwargs.get('use_db_cache', client.use_db_cache)
self.__enforce_caps = kwargs.get('enforce_caps', True)
self.__enforce_indexby = kwargs.get('enforce_indexby', True)
self.__index_by = self.__db_options.get('indexBy')
self.__sseClients = []
self.logger = logging.getLogger(__name__)
def clear_cache(self):
self.__cache = {}
def cache_get(self, item):
item = str(item)
return deepcopy(self.__cache.get(item))
def cache_remove(self, item):
item = str(item)
if item in self.__cache:
del self.__cache[item]
@property
def cached(self):
return self.__use_cache
@property
def index_by(self):
return self.__index_by
@property
def cache(self):
return deepcopy(self.__cache)
@property
def params(self):
return deepcopy(self.__params)
@property
def dbname(self):
return self.__dbname
@property
def id(self):
return self.__id
@property
def dbtype(self):
return self.__type
@property
def capabilities(self):
return deepcopy(self.__params.get('capabilities', []))
@property
def queryable(self):
return 'query' in self.__params.get('capabilities', [])
@property
def putable(self):
return 'put' in self.__params.get('capabilities', [])
@property
def removeable(self):
return 'remove' in self.__params.get('capabilities', [])
@property
def iterable(self):
return 'iterator' in self.__params.get('capabilities', [])
@property
def addable(self):
return 'add' in self.__params.get('capabilities', [])
@property
def valuable(self):
return 'value' in self.__params.get('capabilities', [])
@property
def incrementable(self):
return 'inc' in self.__params.get('capabilities', [])
@property
def indexed(self):
return 'indexBy' in self.__db_options
@property
def can_append(self):
return self.__params.get('canAppend')
@property
def write_access(self):
return deepcopy(self.__params.get('write'))
def close(self):
for sseClient in self.__sseClients:
sseClient.close()
self.__client._remove_db(self)
def info(self):
endpoint = '/'.join(['db', self.__id_safe])
return self.__client._call('GET', endpoint)
async def get(self, item, cache=None, unpack=False):
if cache is None: cache = self.__use_cache
item = str(item)
if cache and item in self.__cache:
result = self.__cache[item]
else:
endpoint = '/'.join(['db', self.__id_safe, item])
result = await self.__client._call('GET', endpoint)
if cache: self.__cache[item] = result
if isinstance(result, Hashable): return deepcopy(result)
if isinstance(result, Iterable): return deepcopy(result)
if unpack:
if isinstance(result, Iterable): return deepcopy(next(result, {}))
if isinstance(result, list): return deepcopy(next(iter(result), {}))
return result
async def get_raw(self, item):
endpoint = '/'.join(['db', self.__id_safe, 'raw', str(item)])
return (await self.__client._call('GET', endpoint))
async def put(self, item, cache=None):
if self.__enforce_caps and not self.putable:
raise CapabilityError(f'Db {self.__dbname} does not have put capability')
if self.indexed and (not self.__index_by in item) and self.__enforce_indexby:
raise MissingIndexError(f"The provided document {item} doesn't contain field '{self.__index_by}'")
if cache is None: cache = self.__use_cache
if cache:
if self.indexed and hasattr(item, self.__index_by):
index_val = getattr(item, self.__index_by)
else:
index_val = item.get('key')
if index_val:
self.__cache[index_val] = item
endpoint = '/'.join(['db', self.__id_safe, 'put'])
entry_hash = (await self.__client._call('POST', endpoint, json=item)).get('hash')
if cache and entry_hash: self.__cache[entry_hash] = item
return entry_hash
def add(self, item, cache=None):
if self.__enforce_caps and not self.addable:
raise CapabilityError(f'Db {self.__dbname} does not have add capability')
if cache is None: cache = self.__use_cache
endpoint = '/'.join(['db', self.__id_safe, 'add'])
entry_hash = self.__client._call('POST', endpoint, json=item).get('hash')
if cache and entry_hash: self.__cache[entry_hash] = item
return entry_hash
def inc(self, val):
val = int(val)
endpoint = '/'.join(['db', self.__id_safe, 'inc'])
return self.__client._call('POST', endpoint, json={'val':val})
def value(self):
endpoint = '/'.join(['db', self.__id_safe, 'value'])
return self.__client._call('GET', endpoint)
def iterator_raw(self, **kwargs):
if self.__enforce_caps and not self.iterable:
raise CapabilityError(f'Db {self.__dbname} does not have iterator capability')
endpoint = '/'.join(['db', self.__id_safe, 'rawiterator'])
return self.__client._call('GET', endpoint, json=kwargs)
def iterator(self, **kwargs):
if self.__enforce_caps and not self.iterable:
raise CapabilityError(f'Db {self.__dbname} does not have iterator capability')
endpoint = '/'.join(['db', self.__id_safe, 'iterator'])
return self.__client._call('GET', endpoint, json=kwargs)
def index(self):
endpoint = '/'.join(['db', self.__id_safe, 'index'])
result = self.__client._call('GET', endpoint)
return result
async def all(self):
endpoint = '/'.join(['db', self.__id_safe, 'all'])
result = await self.__client._call('GET', endpoint)
if isinstance(result, Hashable):
self.__cache = result
return result
def remove(self, item):
if self.__enforce_caps and not self.removeable:
raise CapabilityError(f'Db {self.__dbname} does not have remove capability')
item = str(item)
endpoint = '/'.join(['db', self.__id_safe, item])
return self.__client._call('DELETE', endpoint)
async def unload(self):
self.close()
endpoint = '/'.join(['db', self.__id_safe])
return await self.__client._call('DELETE', endpoint)
def events(self, eventnames):
endpoint = '/'.join(['db', self.__id_safe, 'events', urlquote(eventnames, safe='')])
res = self.__client._call_raw('GET', endpoint, stream=True)
res.raise_for_status()
sseClient = SSEClient(res.stream())
self.__sseClients.append(sseClient)
for event in sseClient.events():
event.json = json.loads(event.data)
yield event
del self.__sseClients[self.__sseClients.index(sseClient)]
def find_peers(self, **kwargs):
endpoint = '/'.join(['peers','searches','db', self.__id_safe])
return self.__client._call('POST', endpoint, json=kwargs)
def get_peers(self):
endpoint = '/'.join(['db', self.__id_safe, 'peers'])
return self.__client._call('GET', endpoint)
class CapabilityError(Exception):
pass
class MissingIndexError(Exception):
pass
| 33.67364
| 110
| 0.619657
|
4a010beb9643f85e85e8fadb26a41c1fba09518d
| 2,194
|
py
|
Python
|
tooz/drivers/zake.py
|
mail2nsrajesh/tooz
|
e6274bdd5b36f7b114e255c258659223ba921773
|
[
"Apache-2.0"
] | null | null | null |
tooz/drivers/zake.py
|
mail2nsrajesh/tooz
|
e6274bdd5b36f7b114e255c258659223ba921773
|
[
"Apache-2.0"
] | null | null | null |
tooz/drivers/zake.py
|
mail2nsrajesh/tooz
|
e6274bdd5b36f7b114e255c258659223ba921773
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013-2014 Mirantis Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from zake import fake_client
from zake import fake_storage
from tooz import coordination
from tooz.drivers import zookeeper
class ZakeDriver(zookeeper.KazooDriver):
"""This driver uses the `zake`_ client to mimic real `zookeeper`_ servers.
It **should** be mainly used (and **is** really only intended to be used in
this manner) for testing and integration (where real `zookeeper`_ servers
are typically not available).
.. _zake: https://pypi.python.org/pypi/zake
.. _zookeeper: http://zookeeper.apache.org/
"""
CHARACTERISTICS = (
coordination.Characteristics.NON_TIMEOUT_BASED,
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
# NOTE(harlowja): this creates a shared backend 'storage' layer that
# would typically exist inside a zookeeper server, but since zake has
# no concept of a 'real' zookeeper server we create a fake one and share
# it among active clients to simulate zookeeper's consistent storage in
# a thread-safe manner.
fake_storage = fake_storage.FakeStorage(
fake_client.k_threading.SequentialThreadingHandler())
@classmethod
def _make_client(cls, parsed_url, options):
if 'storage' in options:
storage = options['storage']
else:
storage = cls.fake_storage
return fake_client.FakeClient(storage=storage)
| 37.186441
| 79
| 0.726983
|
4a010c52874aa5d6f3b9031e884a03ed455fe9a8
| 1,604
|
py
|
Python
|
SR_attack/data/base_dataset.py
|
Hwihuni/Deep-Model-Watermarking
|
73ea2286ace0aac3d55f6056da38ea2bc38ed00d
|
[
"MIT"
] | 32
|
2021-03-29T08:34:31.000Z
|
2022-03-13T07:41:42.000Z
|
SR_attack/data/base_dataset.py
|
Hwihuni/Deep-Model-Watermarking
|
73ea2286ace0aac3d55f6056da38ea2bc38ed00d
|
[
"MIT"
] | 4
|
2021-09-29T12:13:34.000Z
|
2022-03-29T03:41:33.000Z
|
SR_attack/data/base_dataset.py
|
Hwihuni/Deep-Model-Watermarking
|
73ea2286ace0aac3d55f6056da38ea2bc38ed00d
|
[
"MIT"
] | 5
|
2021-06-21T08:01:29.000Z
|
2022-01-27T05:42:45.000Z
|
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
# def initialize(self, opt):
# pass
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSizeX, opt.loadSizeY]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSizeX)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __scale_width(img, target_width):
ow, oh = img.size
if (ow == target_width):
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), Image.BICUBIC)
| 34.869565
| 69
| 0.658354
|
4a010c5bc4d794027666014e28dc152cfe213361
| 2,114
|
py
|
Python
|
memote/experimental/tabular.py
|
matthiaskoenig/memote
|
7c14cd304523dda83eaf4835ee007243e8673f85
|
[
"Apache-2.0"
] | null | null | null |
memote/experimental/tabular.py
|
matthiaskoenig/memote
|
7c14cd304523dda83eaf4835ee007243e8673f85
|
[
"Apache-2.0"
] | null | null | null |
memote/experimental/tabular.py
|
matthiaskoenig/memote
|
7c14cd304523dda83eaf4835ee007243e8673f85
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules related to reading and writing files."""
from __future__ import absolute_import
import pandas as pd
def read_tabular(filename, dtype_conversion=None):
"""
Read a tabular data file which can be CSV, TSV, XLS or XLSX.
Parameters
----------
filename : str or pathlib.Path
The full file path. May be a compressed file.
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
Returns
-------
pandas.DataFrame
The data table.
"""
if dtype_conversion is None:
dtype_conversion = {}
name, ext = filename.split(".", 1)
ext = ext.lower()
# Completely empty columns are interpreted as float by default.
dtype_conversion["comment"] = str
if "csv" in ext:
df = pd.read_csv(filename, dtype=dtype_conversion, encoding="utf-8")
elif "tsv" in ext:
df = pd.read_table(filename, dtype=dtype_conversion, encoding="utf-8")
elif "xls" in ext or "xlsx" in ext:
df = pd.read_excel(filename, dtype=dtype_conversion, encoding="utf-8")
# TODO: Add a function to parse ODS data into a pandas data frame.
else:
raise ValueError("Unknown file format '{}'.".format(ext))
return df
| 34.655738
| 94
| 0.690634
|
4a010d5eb9728f306b5bd98d5980124ddda29790
| 3,728
|
py
|
Python
|
staramr/blast/results/BlastHitPartitions.py
|
JeffreyThiessen/staramr
|
8550f231b7dc528b91a2c3665a5f99f0fa3d350b
|
[
"Apache-2.0"
] | 53
|
2018-03-14T01:22:40.000Z
|
2022-03-24T17:09:02.000Z
|
staramr/blast/results/BlastHitPartitions.py
|
JeffreyThiessen/staramr
|
8550f231b7dc528b91a2c3665a5f99f0fa3d350b
|
[
"Apache-2.0"
] | 114
|
2018-05-02T18:49:13.000Z
|
2022-03-21T16:27:34.000Z
|
staramr/blast/results/BlastHitPartitions.py
|
JeffreyThiessen/staramr
|
8550f231b7dc528b91a2c3665a5f99f0fa3d350b
|
[
"Apache-2.0"
] | 13
|
2018-11-14T20:12:35.000Z
|
2022-03-03T04:06:51.000Z
|
import logging
from collections import OrderedDict
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
logger = logging.getLogger('BlastHits')
from staramr.blast.results.AMRHitHSP import AMRHitHSP
from staramr.exceptions.InvalidPositionException import InvalidPositionException
"""
Class for partitioning up blast hits into non-overlapping regions.
"""
class BlastHitPartitions:
def __init__(self):
"""
Creates a new object to store BLAST hit partitions.
"""
self._partitions = OrderedDict()
def append(self, hit: AMRHitHSP) -> None:
"""
Adds a new blast hit to the set of partitions.
:param hit: The hit to add.
:return: None
"""
if hit.get_genome_contig_start() > hit.get_genome_contig_end() and hit.get_genome_contig_strand() == 'plus':
raise InvalidPositionException(
"Unsupported condition: strand=plus and contig start > contig end for hit (contig=" + hit.get_genome_contig_id() + ", start=" +
str(hit.get_genome_contig_start()) + ", end=" + str(hit.get_genome_contig_end()) + ")")
partition = self._get_existing_partition(hit)
if (partition is None):
self._create_new_parition(hit)
else:
self._add_hit_partition(hit, partition)
def _add_hit_partition(self, hit: AMRHitHSP, partition: Dict[str, Any]) -> None:
start, end = self._stranded_ends(hit)
if start < partition['start']:
partition['start'] = start
if end > partition['end']:
partition['end'] = end
partition['hits'].append(hit)
def _get_existing_partition(self, hit: AMRHitHSP) -> Optional[Dict[str, Any]]:
partition_name = hit.get_genome_contig_id()
if partition_name in self._partitions:
contig_partitions_list = self._partitions[partition_name]
for partition in contig_partitions_list:
if self._hit_in_parition(hit, partition):
return partition
return None
def _hit_in_parition(self, hit: AMRHitHSP, partition: Dict[str, Any]) -> bool:
pstart, pend = partition['start'], partition['end']
start, end = self._stranded_ends(hit)
return (pstart < start < pend) or (pstart < end < pend) or (start <= pstart and end >= pend)
def _create_new_parition(self, hit: AMRHitHSP) -> None:
start, end = self._stranded_ends(hit)
contig_name = hit.get_genome_contig_id()
partition = {
'start': start,
'end': end,
'hits': [hit]
}
if contig_name in self._partitions:
self._partitions[contig_name].append(partition)
else:
self._partitions[contig_name] = [partition]
def get_hits_nonoverlapping_regions(self) -> List[List[AMRHitHSP]]:
"""
Gets BLAST hits divided up into separate lists for non-overlapping regions..
:return: A list of BLAST hits divided up into non-overlapping regions.
"""
return [p['hits'] for name in self._partitions for p in self._partitions[name]]
def _stranded_ends(self, hit: AMRHitHSP) -> Tuple[int, int]:
"""
Gets the start/end coordinates, taking into account the strand.
:param hit: The hit.
:return: The (start,end) as a tuple.
"""
start = hit.get_genome_contig_start() if hit.get_genome_contig_strand() == 'plus' else hit.get_genome_contig_end()
end = hit.get_genome_contig_end() if hit.get_genome_contig_strand() == 'plus' else hit.get_genome_contig_start()
return start, end
| 36.194175
| 143
| 0.643777
|
4a010ee84039cefdd597f90a1344ae71f039b46e
| 5,293
|
py
|
Python
|
src/domain_park/cli.py
|
nhairs/domain-park
|
b43f61e4f4cb1c5f69676e7297174e4c2ca184e0
|
[
"MIT"
] | 3
|
2020-04-13T07:19:44.000Z
|
2021-03-16T10:27:54.000Z
|
src/domain_park/cli.py
|
nhairs/domain-park
|
b43f61e4f4cb1c5f69676e7297174e4c2ca184e0
|
[
"MIT"
] | null | null | null |
src/domain_park/cli.py
|
nhairs/domain-park
|
b43f61e4f4cb1c5f69676e7297174e4c2ca184e0
|
[
"MIT"
] | 2
|
2020-04-13T08:24:47.000Z
|
2021-01-20T07:24:49.000Z
|
### IMPORTS
### ============================================================================
## Standard Library
import argparse
import logging
import sys
from typing import List
## Installed
import netifaces # type: ignore
import nserver
## Application
from . import _version
from .server import server as nserver_server
### CONSTANTS
### ============================================================================
DESCRIPTION = (
"domain-park is a DNS Name Server that can be used to prevent spoofed emails on parked domains."
)
EPILOG = """For full information including licence see https://github.com/nhairs/domain-park
Copyright (c) 2020 Nicholas Hairs
"""
_APP = None
### FUNCTIONS
### ============================================================================
def get_available_ips():
"""Get all available IPv4 Address on this machine."""
# Source: https://stackoverflow.com/a/274644
ip_list = []
for interface in netifaces.interfaces():
for link in netifaces.ifaddresses(interface).get(netifaces.AF_INET, []):
ip_list.append(link["addr"] + f" ({interface})")
# shortcut for all
ip_list.append("0.0.0.0 (all above)")
return ip_list
def main(argv=None):
"""Main function for use with setup.py"""
global _APP # pylint: disable=global-statement
_APP = Application(argv)
_APP.run()
return
### CLASSES
### ============================================================================
class Application:
"""domain-park application.
Handles reading config and instantiating nserver instance.
"""
def __init__(self, argv: List[str] = None):
self.argv = argv if argv is not None else sys.argv[1:]
self.parser = self.get_parser()
self.args = self.parser.parse_args(self.argv)
self.server = self.get_server()
return
def run(self) -> None:
"""Run application."""
if self.args.ips:
print("\n".join(get_available_ips()))
return
self.server.run()
return
@staticmethod
def get_parser() -> argparse.ArgumentParser:
"""Get argument parser."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG,
)
parser.add_argument("--version", action="version", version=_version.get_version_info_full())
# Server settings
parser.add_argument(
"--host",
action="store",
default="localhost",
help="Host (IP) to bind to. Use --ips to see available. Defaults to localhost.",
)
parser.add_argument(
"--port",
action="store",
default=9953,
type=int,
help="Port to bind to. Defaults to 9953.",
)
transport_group = parser.add_mutually_exclusive_group()
transport_group.add_argument(
"--tcp",
action="store_const",
const="TCPv4",
dest="transport",
help="Use TCPv4 socket for transport.",
)
transport_group.add_argument(
"--udp",
action="store_const",
const="UDPv4",
dest="transport",
help="Use UDPv4 socket for transport. (default)",
)
# DNS settings
parser.add_argument(
"-n",
"--nameserver",
required=True,
action="append",
help="Add NameServer to list returned on NS lookups. This should be equal to the NS records available publicly running domain-park. Must be supplied at least once, and has no limit. Reccomended to have 2-4 Name Servers. Expected to be in the format of either 'FQDN:IP' or 'HOST'",
dest="nameservers",
metavar="NAMESERVER",
)
parser.add_argument(
"--rua",
action="store",
help="Email address to use for DMARC aggregate repots.",
metavar="EMAIL",
)
parser.add_argument(
"--ruf",
action="store",
help="Email address to use for DMARC forensic reports.",
metavar="EMAIL",
)
parser.add_argument("--ips", action="store_true", help="Print available IPs and exit")
parser.set_defaults(transport="UDPv4")
return parser
def get_server(self) -> nserver.NameServer:
"""Get NameServer instance."""
server = nserver_server
server.settings.SERVER_TYPE = self.args.transport
server.settings.SERVER_ADDRESS = self.args.host
server.settings.SERVER_PORT = self.args.port
server.settings.CONSOLE_LOG_LEVEL = logging.WARNING
nameservers = []
for nameserver in self.args.nameservers:
if ":" in nameserver:
host, ip = nameserver.split(":")
else:
# assume IP
host = None
ip = nameserver
# TODO: IP validation
nameservers.append((host, ip))
server.settings.NAME_SERVERS = nameservers
server.settings.RUA = self.args.rua
server.settings.RUF = self.args.ruf
return server
| 29.903955
| 292
| 0.555451
|
4a010f015009c64ec6ffe7b5ef0b8d8a5fb3a367
| 17,638
|
py
|
Python
|
im_caption_full.py
|
ukyh/unsupervised_captioning_fast
|
97e50c871a71c3133962b2a4d3822bf186ca9310
|
[
"MIT"
] | 229
|
2019-03-08T12:55:45.000Z
|
2022-03-30T06:37:37.000Z
|
im_caption_full.py
|
ukyh/unsupervised_captioning_fast
|
97e50c871a71c3133962b2a4d3822bf186ca9310
|
[
"MIT"
] | 38
|
2019-04-08T08:50:17.000Z
|
2021-11-04T07:53:10.000Z
|
im_caption_full.py
|
ukyh/unsupervised_captioning_fast
|
97e50c871a71c3133962b2a4d3822bf186ca9310
|
[
"MIT"
] | 57
|
2019-03-14T11:14:04.000Z
|
2022-01-16T17:17:14.000Z
|
"""Train the full model.
python im_caption_full.py --multi_gpu --batch_size 512 --save_checkpoint_steps\
1000 --gen_lr 0.001 --dis_lr 0.001
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import sys
import numpy as np
import tensorflow as tf
import tensorflow.contrib.gan as tfgan
import tensorflow.contrib.slim as slim
from tensorflow.contrib.framework import nest
from tensorflow.contrib.gan.python.losses.python.losses_impl import modified_discriminator_loss
from tensorflow.contrib.gan.python.train import get_sequential_train_hooks
from config import TF_MODELS_PATH
from input_pipeline import input_fn
from misc_fn import crop_sentence
from misc_fn import get_len
from misc_fn import obj_rewards
from misc_fn import transform_grads_fn
from misc_fn import validate_batch_size_for_multi_gpu
from misc_fn import variable_summaries
sys.path.append(TF_MODELS_PATH + '/research/slim')
from nets import inception_v4
tf.logging.set_verbosity(tf.logging.INFO)
tf.flags.DEFINE_integer('intra_op_parallelism_threads', 0, 'Number of threads')
tf.flags.DEFINE_integer('inter_op_parallelism_threads', 0, 'Number of threads')
tf.flags.DEFINE_bool('multi_gpu', False, 'use multi gpus')
tf.flags.DEFINE_integer('emb_dim', 512, 'emb dim')
tf.flags.DEFINE_integer('mem_dim', 512, 'mem dim')
tf.flags.DEFINE_float('keep_prob', 0.8, 'keep prob')
tf.flags.DEFINE_string('job_dir', 'saving', 'job dir')
tf.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.flags.DEFINE_integer('max_steps', 1000000, 'maximum training steps')
tf.flags.DEFINE_float('gen_lr', 0.0001, 'learning rate')
tf.flags.DEFINE_float('dis_lr', 0.0001, 'learning rate')
tf.flags.DEFINE_integer('save_summary_steps', 100, 'save summary steps')
tf.flags.DEFINE_integer('save_checkpoint_steps', 5000, 'save ckpt')
tf.flags.DEFINE_integer('max_caption_length', 20, 'max len')
tf.flags.DEFINE_bool('wass', False, 'use wass')
tf.flags.DEFINE_bool('use_pool', False, 'use pool')
tf.flags.DEFINE_integer('pool_size', 512, 'pool size')
tf.flags.DEFINE_string('inc_ckpt', None, 'path to InceptionV4 checkpoint')
tf.flags.DEFINE_string('imcap_ckpt', None, 'initialization checkpoint')
tf.flags.DEFINE_string('sae_ckpt', None, 'initialization checkpoint')
tf.flags.DEFINE_float('w_obj', 10, 'object weight')
tf.flags.DEFINE_float('w_mse', 100, 'object weight')
FLAGS = tf.flags.FLAGS
def generator(inputs, is_training=True):
"""The sentence generator."""
embedding = tf.get_variable(
name='embedding',
shape=[FLAGS.vocab_size, FLAGS.emb_dim],
initializer=tf.random_uniform_initializer(-0.08, 0.08))
softmax_w = tf.matrix_transpose(embedding)
softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])
inputs = inputs[0]
feat = slim.fully_connected(inputs, FLAGS.mem_dim, activation_fn=None)
feat = tf.nn.l2_normalize(feat, axis=1)
batch_size = tf.shape(feat)[0]
cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.mem_dim)
if is_training:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, FLAGS.keep_prob, FLAGS.keep_prob)
zero_state = cell.zero_state(batch_size, tf.float32)
sequence, logits, log_probs, rnn_outs = [], [], [], []
_, state = cell(feat, zero_state)
state_bl = state
tf.get_variable_scope().reuse_variables()
for t in range(FLAGS.max_caption_length):
if t == 0:
rnn_inp = tf.zeros([batch_size], tf.int32) + FLAGS.start_id
rnn_inp = tf.nn.embedding_lookup(embedding, rnn_inp)
rnn_out, state = cell(rnn_inp, state)
rnn_outs.append(rnn_out)
logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b)
categorical = tf.contrib.distributions.Categorical(logits=logit)
fake = categorical.sample()
log_prob = categorical.log_prob(fake)
sequence.append(fake)
log_probs.append(log_prob)
logits.append(logit)
rnn_inp = fake
sequence = tf.stack(sequence, axis=1)
log_probs = tf.stack(log_probs, axis=1)
logits = tf.stack(logits, axis=1)
# Computes the baseline for self-critic.
baseline = []
state = state_bl
for t in range(FLAGS.max_caption_length):
if t == 0:
rnn_inp = tf.zeros([batch_size], tf.int32) + FLAGS.start_id
rnn_inp = tf.nn.embedding_lookup(embedding, rnn_inp)
rnn_out, state = cell(rnn_inp, state)
logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b)
fake = tf.argmax(logit, axis=1, output_type=tf.int32)
baseline.append(fake)
rnn_inp = fake
baseline = tf.stack(baseline, axis=1)
return sequence, logits, log_probs, baseline, feat
def discriminator(generated_data, generator_inputs, is_training=True):
"""The discriminator."""
if type(generated_data) is tuple:
# When the sentences are generated, we need to compute their length.
sequence = generated_data[0]
length = get_len(sequence, FLAGS.end_id)
else:
# We already know the length of the sentences from the input pipeline.
sequence = generated_data
length = generator_inputs[-1]
embedding = tf.get_variable(
name='embedding',
shape=[FLAGS.vocab_size, FLAGS.emb_dim],
initializer=tf.random_uniform_initializer(-0.08, 0.08))
cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.mem_dim)
if is_training:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, FLAGS.keep_prob, FLAGS.keep_prob)
rnn_inputs = tf.nn.embedding_lookup(embedding, sequence)
rnn_out, state = tf.nn.dynamic_rnn(cell, rnn_inputs, length, dtype=tf.float32)
pred = slim.fully_connected(rnn_out, 1, activation_fn=None, scope='fc')
pred = tf.squeeze(pred, 2)
mask = tf.sequence_mask(length, tf.shape(sequence)[1])
idx = tf.transpose(tf.stack([tf.range(tf.shape(length)[0]), length - 1]))
state_h = tf.gather_nd(rnn_out, idx)
feat = slim.fully_connected(state_h, FLAGS.mem_dim, activation_fn=None,
scope='recon')
feat = tf.nn.l2_normalize(feat, axis=1)
return pred, mask, feat
def rl_loss(gan_model, gan_loss, classes, scores, num, add_summaries):
"""Reinforcement learning loss."""
eps = 1e-7
gamma = 0.9
sequence, _, log_probs, seq_bl, pca = gan_model.generated_data
with tf.variable_scope(gan_model.discriminator_scope, reuse=True):
baselines, _, feat_bl = discriminator((seq_bl, None, None, None, pca), None)
baselines, feat_bl = nest.map_structure(
tf.stop_gradient, (baselines, feat_bl))
logits, mask, feat = gan_model.discriminator_gen_outputs
dist = tf.reduce_mean(tf.squared_difference(pca, feat), axis=1,
keepdims=True) * FLAGS.w_mse
loss_mse = tf.reduce_mean(dist)
l_rewards = -dist
l_rewards = tf.tile(l_rewards, [1, sequence.shape[1]])
l_rewards = tf.where(mask, l_rewards, tf.zeros_like(l_rewards))
l_rewards_mat = l_rewards
l_rewards = tf.unstack(l_rewards, axis=1)
dis_predictions = tf.nn.sigmoid(logits)
d_rewards = tf.log(dis_predictions + eps)
o_rewards = obj_rewards(sequence, mask, classes, scores, num) * FLAGS.w_obj
rewards = d_rewards + o_rewards
rewards = tf.where(mask, rewards, tf.zeros_like(rewards))
l_bl = -tf.reduce_mean(tf.squared_difference(pca, feat_bl), axis=1,
keepdims=True) * FLAGS.w_mse
l_bl = tf.tile(l_bl, [1, seq_bl.shape[1]])
l_bl = tf.where(mask, l_bl, tf.zeros_like(l_bl))
l_bl = tf.unstack(l_bl, axis=1)
baselines = tf.nn.sigmoid(baselines)
baselines = tf.log(baselines + eps)
baselines += obj_rewards(seq_bl, mask, classes, scores, num) * FLAGS.w_obj
baselines = tf.where(mask, baselines, tf.zeros_like(baselines))
log_prob_list = tf.unstack(log_probs, axis=1)
rewards_list = tf.unstack(rewards, axis=1)
cumulative_rewards = []
baseline_list = tf.unstack(baselines, axis=1)
cumulative_baseline = []
for t in range(FLAGS.max_caption_length):
cum_value = l_rewards[t]
for s in range(t, FLAGS.max_caption_length):
cum_value += np.power(gamma, s - t) * rewards_list[s]
cumulative_rewards.append(cum_value)
cum_value = l_bl[t]
for s in range(t, FLAGS.max_caption_length):
cum_value += np.power(gamma, s - t) * baseline_list[s]
cumulative_baseline.append(cum_value)
c_rewards = tf.stack(cumulative_rewards, axis=1)
c_baseline = tf.stack(cumulative_baseline, axis=1)
advantages = []
final_gen_objective = []
for t in range(FLAGS.max_caption_length):
log_probability = log_prob_list[t]
cum_advantage = cumulative_rewards[t] - cumulative_baseline[t]
cum_advantage = tf.clip_by_value(cum_advantage, -5.0, 5.0)
advantages.append(cum_advantage)
final_gen_objective.append(
log_probability * tf.stop_gradient(cum_advantage))
final_gen_objective = tf.stack(final_gen_objective, axis=1)
final_gen_objective = tf.losses.compute_weighted_loss(final_gen_objective,
tf.to_float(mask))
final_gen_objective = -final_gen_objective
advantages = tf.stack(advantages, axis=1)
if add_summaries:
tf.summary.scalar('losses/mse', loss_mse)
tf.summary.scalar('losses/gen_obj', final_gen_objective)
with tf.name_scope('rewards'):
variable_summaries(c_rewards, mask, 'rewards')
with tf.name_scope('advantages'):
variable_summaries(advantages, mask, 'advantages')
with tf.name_scope('baselines'):
variable_summaries(c_baseline, mask, 'baselines')
with tf.name_scope('log_probs'):
variable_summaries(log_probs, mask, 'log_probs')
with tf.name_scope('d_rewards'):
variable_summaries(d_rewards, mask, 'd_rewards')
with tf.name_scope('l_rewards'):
variable_summaries(l_rewards_mat, mask, 'l_rewards')
with tf.name_scope('o_rewards'):
variable_summaries(o_rewards, mask, 'o_rewards')
o_rewards = tf.where(mask, o_rewards, tf.zeros_like(o_rewards))
minimum = tf.minimum(tf.reduce_min(o_rewards, axis=1, keepdims=True), 0.0)
o_rewards = tf.reduce_sum(
tf.to_float(tf.logical_and(o_rewards > minimum, mask)), axis=1)
o_rewards = tf.reduce_mean(o_rewards)
tf.summary.scalar('mean_found_obj', o_rewards)
return gan_loss._replace(generator_loss=final_gen_objective,
discriminator_loss=gan_loss.discriminator_loss + loss_mse)
def sentence_ae(gan_model, features, labels, add_summaries=True):
"""Sentence auto-encoder."""
with tf.variable_scope(gan_model.discriminator_scope, reuse=True):
feat = discriminator(features['key'], [None, features['lk']])[2]
with tf.variable_scope(gan_model.generator_scope, reuse=True):
embedding = tf.get_variable(
name='embedding',
shape=[FLAGS.vocab_size, FLAGS.emb_dim],
initializer=tf.random_uniform_initializer(-0.08, 0.08))
softmax_w = tf.matrix_transpose(embedding)
softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])
sentence, ls = labels['sentence'], labels['len']
targets = sentence[:, 1:]
sentence = sentence[:, :-1]
ls -= 1
sentence = tf.nn.embedding_lookup(embedding, sentence)
batch_size = tf.shape(feat)[0]
cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.mem_dim)
cell = tf.nn.rnn_cell.DropoutWrapper(cell, FLAGS.keep_prob, FLAGS.keep_prob)
zero_state = cell.zero_state(batch_size, tf.float32)
_, state = cell(feat, zero_state)
tf.get_variable_scope().reuse_variables()
out, state = tf.nn.dynamic_rnn(cell, sentence, ls, state)
out = tf.reshape(out, [-1, FLAGS.mem_dim])
logits = tf.nn.bias_add(tf.matmul(out, softmax_w), softmax_b)
logits = tf.reshape(logits, [batch_size, -1, FLAGS.vocab_size])
mask = tf.sequence_mask(ls, tf.shape(sentence)[1])
targets = tf.boolean_mask(targets, mask)
logits = tf.boolean_mask(logits, mask)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
logits=logits)
loss = tf.reduce_mean(loss)
if add_summaries:
tf.summary.scalar('losses/sentence_ae', loss)
return loss
def model_fn(features, labels, mode, params):
"""The full unsupervised captioning model."""
is_chief = not tf.get_variable_scope().reuse
with slim.arg_scope(inception_v4.inception_v4_arg_scope()):
net, _ = inception_v4.inception_v4(features['im'], None, is_training=False)
net = tf.squeeze(net, [1, 2])
inc_saver = tf.train.Saver(tf.global_variables('InceptionV4'))
gan_model = tfgan.gan_model(
generator_fn=generator,
discriminator_fn=discriminator,
real_data=labels['sentence'][:, 1:],
generator_inputs=(net, labels['len'] - 1),
check_shapes=False)
if is_chief:
for variable in tf.trainable_variables():
tf.summary.histogram(variable.op.name, variable)
tf.summary.histogram('logits/gen_logits',
gan_model.discriminator_gen_outputs[0])
tf.summary.histogram('logits/real_logits',
gan_model.discriminator_real_outputs[0])
def gen_loss_fn(gan_model, add_summaries):
return 0
def dis_loss_fn(gan_model, add_summaries):
discriminator_real_outputs = gan_model.discriminator_real_outputs
discriminator_gen_outputs = gan_model.discriminator_gen_outputs
real_logits = tf.boolean_mask(discriminator_real_outputs[0],
discriminator_real_outputs[1])
gen_logits = tf.boolean_mask(discriminator_gen_outputs[0],
discriminator_gen_outputs[1])
return modified_discriminator_loss(real_logits, gen_logits,
add_summaries=add_summaries)
with tf.name_scope('losses'):
pool_fn = functools.partial(tfgan.features.tensor_pool,
pool_size=FLAGS.pool_size)
gan_loss = tfgan.gan_loss(
gan_model,
generator_loss_fn=gen_loss_fn,
discriminator_loss_fn=dis_loss_fn,
gradient_penalty_weight=10 if FLAGS.wass else 0,
tensor_pool_fn=pool_fn if FLAGS.use_pool else None,
add_summaries=is_chief)
if is_chief:
tfgan.eval.add_regularization_loss_summaries(gan_model)
gan_loss = rl_loss(gan_model, gan_loss, features['classes'],
features['scores'], features['num'],
add_summaries=is_chief)
sen_ae_loss = sentence_ae(gan_model, features, labels, is_chief)
loss = gan_loss.generator_loss + gan_loss.discriminator_loss + sen_ae_loss
gan_loss = gan_loss._replace(
generator_loss=gan_loss.generator_loss + sen_ae_loss)
with tf.name_scope('train'):
gen_opt = tf.train.AdamOptimizer(params.gen_lr, 0.5)
dis_opt = tf.train.AdamOptimizer(params.dis_lr, 0.5)
if params.multi_gpu:
gen_opt = tf.contrib.estimator.TowerOptimizer(gen_opt)
dis_opt = tf.contrib.estimator.TowerOptimizer(dis_opt)
train_ops = tfgan.gan_train_ops(
gan_model,
gan_loss,
generator_optimizer=gen_opt,
discriminator_optimizer=dis_opt,
transform_grads_fn=transform_grads_fn,
summarize_gradients=is_chief,
check_for_unused_update_ops=not FLAGS.use_pool,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
train_op = train_ops.global_step_inc_op
train_hooks = get_sequential_train_hooks()(train_ops)
# Summary the generated caption on the fly.
if is_chief:
with open('data/word_counts.txt', 'r') as f:
dic = list(f)
dic = [i.split()[0] for i in dic]
dic.append('<unk>')
dic = tf.convert_to_tensor(dic)
sentence = crop_sentence(gan_model.generated_data[0][0], FLAGS.end_id)
sentence = tf.gather(dic, sentence)
real = crop_sentence(gan_model.real_data[0], FLAGS.end_id)
real = tf.gather(dic, real)
train_hooks.append(
tf.train.LoggingTensorHook({'fake': sentence, 'real': real},
every_n_iter=100))
tf.summary.text('fake', sentence)
tf.summary.image('im', features['im'][None, 0])
gen_saver = tf.train.Saver(tf.trainable_variables('Generator'))
dis_var = []
dis_var.extend(tf.trainable_variables('Discriminator/rnn'))
dis_var.extend(tf.trainable_variables('Discriminator/embedding'))
dis_var.extend(tf.trainable_variables('Discriminator/fc'))
dis_saver = tf.train.Saver(dis_var)
def init_fn(scaffold, session):
inc_saver.restore(session, FLAGS.inc_ckpt)
if FLAGS.imcap_ckpt:
gen_saver.restore(session, FLAGS.imcap_ckpt)
if FLAGS.sae_ckpt:
dis_saver.restore(session, FLAGS.sae_ckpt)
scaffold = tf.train.Scaffold(init_fn=init_fn)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
scaffold=scaffold,
training_hooks=train_hooks)
def main(_):
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
if FLAGS.multi_gpu:
validate_batch_size_for_multi_gpu(FLAGS.batch_size)
model_function = tf.contrib.estimator.replicate_model_fn(
model_fn,
loss_reduction=tf.losses.Reduction.MEAN)
else:
model_function = model_fn
sess_config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
gpu_options=tf.GPUOptions(allow_growth=True))
run_config = tf.estimator.RunConfig(
session_config=sess_config,
save_checkpoints_steps=FLAGS.save_checkpoint_steps,
save_summary_steps=FLAGS.save_summary_steps,
keep_checkpoint_max=100)
train_input_fn = functools.partial(input_fn, batch_size=FLAGS.batch_size)
estimator = tf.estimator.Estimator(
model_fn=model_function,
model_dir=FLAGS.job_dir,
config=run_config,
params=FLAGS)
estimator.train(train_input_fn, max_steps=FLAGS.max_steps)
if __name__ == '__main__':
tf.app.run()
| 37.368644
| 95
| 0.719753
|
4a010f855699e817012c4878f2ddbab0d3ba09ae
| 1,063
|
py
|
Python
|
tkaligner/load_paras.py
|
ffreemt/tkaligner-bumblebee
|
ccc218c525508c13a16bd4e40fa039cf234bebf1
|
[
"MIT"
] | null | null | null |
tkaligner/load_paras.py
|
ffreemt/tkaligner-bumblebee
|
ccc218c525508c13a16bd4e40fa039cf234bebf1
|
[
"MIT"
] | null | null | null |
tkaligner/load_paras.py
|
ffreemt/tkaligner-bumblebee
|
ccc218c525508c13a16bd4e40fa039cf234bebf1
|
[
"MIT"
] | null | null | null |
"""
load_paras
"""
import os
from typing import List
from pathlib import Path
import chardet
import logzero
from logzero import logger
_ = os.environ.get("ALIGNER_DEBUG")
if _ is not None and (_ == "1" or _.lower() == "true"):
logzero.loglevel(10)
else:
logzero.loglevel(20)
def load_paras(filepath: str) -> List[str]:
"""
load paras
"""
try:
text = Path(filepath).read_text("utf-8")
except UnicodeDecodeError:
try:
text = Path(filepath).read_text("gbk")
except UnicodeDecodeError: # pylint: disable=try-except-raise
encoding = chardet.detect(Path(filepath).read_bytes()[:5000]).get(
"encoding"
)
text = Path(filepath).read_text(encoding)
# rid of some strange chars
text = text.replace("\u3000", "")
except Exception as exc:
logger.error("Path.readtext() exc: %s, return **[]** ", exc)
# raise SystemExit(1)
return []
return [elm.strip() for elm in text.split("\n") if elm.strip()]
| 24.72093
| 78
| 0.593603
|
4a010fb9cf01590333e8cfc98273b4543440e7c9
| 11,739
|
py
|
Python
|
lib/errors.py
|
apyrgio/snf-ganeti
|
c59bb92f5bf4a98d90b4f10fb509a5a2f11c65b7
|
[
"BSD-2-Clause"
] | null | null | null |
lib/errors.py
|
apyrgio/snf-ganeti
|
c59bb92f5bf4a98d90b4f10fb509a5a2f11c65b7
|
[
"BSD-2-Clause"
] | null | null | null |
lib/errors.py
|
apyrgio/snf-ganeti
|
c59bb92f5bf4a98d90b4f10fb509a5a2f11c65b7
|
[
"BSD-2-Clause"
] | null | null | null |
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ganeti exception handling.
"""
from ganeti import constants
ECODE_RESOLVER = constants.ERRORS_ECODE_RESOLVER
ECODE_NORES = constants.ERRORS_ECODE_NORES
ECODE_TEMP_NORES = constants.ERRORS_ECODE_TEMP_NORES
ECODE_INVAL = constants.ERRORS_ECODE_INVAL
ECODE_STATE = constants.ERRORS_ECODE_STATE
ECODE_NOENT = constants.ERRORS_ECODE_NOENT
ECODE_EXISTS = constants.ERRORS_ECODE_EXISTS
ECODE_NOTUNIQUE = constants.ERRORS_ECODE_NOTUNIQUE
ECODE_FAULT = constants.ERRORS_ECODE_FAULT
ECODE_ENVIRON = constants.ERRORS_ECODE_ENVIRON
ECODE_ALL = constants.ERRORS_ECODE_ALL
class GenericError(Exception):
"""Base exception for Ganeti.
"""
class LockError(GenericError):
"""Lock error exception.
This signifies problems in the locking subsystem.
"""
class PidFileLockError(LockError):
"""PID file is already locked by another process.
"""
class HypervisorError(GenericError):
"""Hypervisor-related exception.
This is raised in case we can't communicate with the hypervisor
properly.
"""
class HotplugError(HypervisorError):
"""Hotplug-related exception.
This is raised in case a hotplug action fails or is not supported.
It is currently used only by KVM hypervisor.
"""
class ProgrammerError(GenericError):
"""Programming-related error.
This is raised in cases we determine that the calling conventions
have been violated, meaning we got some desynchronisation between
parts of our code. It signifies a real programming bug.
"""
class BlockDeviceError(GenericError):
"""Block-device related exception.
This is raised in case we can't setup the instance's block devices
properly.
"""
class ConfigurationError(GenericError):
"""Configuration related exception.
Things like having an instance with a primary node that doesn't
exist in the config or such raise this exception.
"""
class ConfigVersionMismatch(ConfigurationError):
"""Version mismatch in the configuration file.
The error has two arguments: the expected and the actual found
version.
"""
class AddressPoolError(GenericError):
"""Errors related to IP address pools.
"""
class ReservationError(GenericError):
"""Errors reserving a resource.
"""
class RemoteError(GenericError):
"""Programming-related error on remote call.
This is raised when an unhandled error occurs in a call to a
remote node. It usually signifies a real programming bug.
"""
class SignatureError(GenericError):
"""Error authenticating a remote message.
This is raised when the hmac signature on a message doesn't verify correctly
to the message itself. It can happen because of network unreliability or
because of spurious traffic.
"""
class ParameterError(GenericError):
"""A passed parameter to a command is invalid.
This is raised when the parameter passed to a request function is
invalid. Correct code should have verified this before passing the
request structure.
The argument to this exception should be the parameter name.
"""
class ResultValidationError(GenericError):
"""The iallocation results fails validation.
"""
class OpPrereqError(GenericError):
"""Prerequisites for the OpCode are not fulfilled.
This exception has two arguments: an error message, and one of the
ECODE_* codes.
"""
class OpExecError(GenericError):
"""Error during OpCode execution.
"""
class OpResultError(GenericError):
"""Issue with OpCode result.
"""
class DeviceCreationError(GenericError):
"""Error during the creation of a device.
This exception should contain the list of the devices actually created
up to now, in the form of pairs (node, device)
"""
def __init__(self, message, created_devices):
GenericError.__init__(self)
self.message = message
self.created_devices = created_devices
def __str__(self):
return self.message
class OpCodeUnknown(GenericError):
"""Unknown opcode submitted.
This signifies a mismatch between the definitions on the client and
server side.
"""
class JobLost(GenericError):
"""Submitted job lost.
The job was submitted but it cannot be found in the current job
list.
"""
class JobFileCorrupted(GenericError):
"""Job file could not be properly decoded/restored.
"""
class ResolverError(GenericError):
"""Host name cannot be resolved.
This is not a normal situation for Ganeti, as we rely on having a
working resolver.
The non-resolvable hostname is available as the first element of the
args tuple; the other two elements of the tuple are the first two
args of the socket.gaierror exception (error code and description).
"""
class HooksFailure(GenericError):
"""A generic hook failure.
This signifies usually a setup misconfiguration.
"""
class HooksAbort(HooksFailure):
"""A required hook has failed.
This caused an abort of the operation in the initial phase. This
exception always has an attribute args which is a list of tuples of:
- node: the source node on which this hooks has failed
- script: the name of the script which aborted the run
"""
class UnitParseError(GenericError):
"""Unable to parse size unit.
"""
class ParseError(GenericError):
"""Generic parse error.
Raised when unable to parse user input.
"""
class TypeEnforcementError(GenericError):
"""Unable to enforce data type.
"""
class X509CertError(GenericError):
"""Invalid X509 certificate.
This error has two arguments: the certificate filename and the error cause.
"""
class TagError(GenericError):
"""Generic tag error.
The argument to this exception will show the exact error.
"""
class CommandError(GenericError):
"""External command error.
"""
class StorageError(GenericError):
"""Storage-related exception.
"""
class InotifyError(GenericError):
"""Error raised when there is a failure setting up an inotify watcher.
"""
class QuitGanetiException(Exception):
"""Signal Ganeti that it must quit.
This is not necessarily an error (and thus not a subclass of
GenericError), but it's an exceptional circumstance and it is thus
treated. This exception should be instantiated with two values. The
first one will specify the return code to the caller, and the second
one will be the returned result (either as an error or as a normal
result). Usually only the leave cluster rpc call should return
status True (as there it's expected we quit), every other call will
return status False (as a critical error was encountered).
Examples::
# Return a result of "True" to the caller, but quit ganeti afterwards
raise QuitGanetiException(True, None)
# Send an error to the caller, and quit ganeti
raise QuitGanetiException(False, "Fatal safety violation, shutting down")
"""
class JobQueueError(GenericError):
"""Job queue error.
"""
class JobQueueDrainError(JobQueueError):
"""Job queue is marked for drain error.
This is raised when a job submission attempt is made but the queue
is marked for drain.
"""
class JobQueueFull(JobQueueError):
"""Job queue full error.
Raised when job queue size reached its hard limit.
"""
class ConfdMagicError(GenericError):
"""A magic fourcc error in Ganeti confd.
Errors processing the fourcc in ganeti confd datagrams.
"""
class ConfdClientError(GenericError):
"""A magic fourcc error in Ganeti confd.
Errors in the confd client library.
"""
class UdpDataSizeError(GenericError):
"""UDP payload too big.
"""
class NoCtypesError(GenericError):
"""python ctypes module is not found in the system.
"""
class IPAddressError(GenericError):
"""Generic IP address error.
"""
class LuxiError(GenericError):
"""LUXI error.
"""
class QueryFilterParseError(ParseError):
"""Error while parsing query filter.
This exception must be instantiated with two values. The first one is a
string with an error description, the second one is an instance of a subclass
of C{pyparsing.ParseBaseException} (used to display the exact error
location).
"""
def GetDetails(self):
"""Returns a list of strings with details about the error.
"""
try:
(_, inner) = self.args
except IndexError:
return None
return [str(inner.line),
(" " * (inner.column - 1)) + "^",
str(inner)]
class RapiTestResult(GenericError):
"""Exception containing results from RAPI test utilities.
"""
class FileStoragePathError(GenericError):
"""Error from file storage path validation.
"""
# errors should be added above
def GetErrorClass(name):
"""Return the class of an exception.
Given the class name, return the class itself.
@type name: str
@param name: the exception name
@rtype: class
@return: the actual class, or None if not found
"""
item = globals().get(name, None)
if item is not None:
if not (isinstance(item, type(Exception)) and
issubclass(item, GenericError)):
item = None
return item
def EncodeException(err):
"""Encodes an exception into a format that L{MaybeRaise} will recognise.
The passed L{err} argument will be formatted as a tuple (exception
name, arguments) that the MaybeRaise function will recognise.
@type err: GenericError child
@param err: usually a child of GenericError (but any exception
will be accepted)
@rtype: tuple
@return: tuple of (exception name, exception arguments)
"""
return (err.__class__.__name__, err.args)
def GetEncodedError(result):
"""If this looks like an encoded Ganeti exception, return it.
This function tries to parse the passed argument and if it looks
like an encoding done by EncodeException, it will return the class
object and arguments.
"""
tlt = (tuple, list)
if (isinstance(result, tlt) and len(result) == 2 and
isinstance(result[1], tlt)):
# custom ganeti errors
errcls = GetErrorClass(result[0])
if errcls:
return (errcls, tuple(result[1]))
return None
def MaybeRaise(result):
"""If this looks like an encoded Ganeti exception, raise it.
This function tries to parse the passed argument and if it looks
like an encoding done by EncodeException, it will re-raise it.
"""
error = GetEncodedError(result)
if error:
(errcls, args) = error
# pylint: disable=W0142
raise errcls(*args)
| 22.972603
| 79
| 0.734304
|
4a0110adfe7a58493c093f09254a0da46d4710b4
| 14,203
|
py
|
Python
|
pymc3/tests/test_examples.py
|
AmitKus/pymc3
|
5e305543a13d248cb8cd67522527c586bde45165
|
[
"Apache-2.0"
] | null | null | null |
pymc3/tests/test_examples.py
|
AmitKus/pymc3
|
5e305543a13d248cb8cd67522527c586bde45165
|
[
"Apache-2.0"
] | null | null | null |
pymc3/tests/test_examples.py
|
AmitKus/pymc3
|
5e305543a13d248cb8cd67522527c586bde45165
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
import numpy as np
import pandas as pd
import pymc3 as pm
import theano.tensor as tt
import pytest
import theano
from pymc3.theanof import floatX
from packaging import version
from .helpers import SeededTest
if version.parse(matplotlib.__version__) < version.parse("3.3"):
matplotlib.use("Agg", warn=False)
else:
matplotlib.use("Agg")
def get_city_data():
"""Helper to get city data"""
data = pd.read_csv(pm.get_data("srrs2.dat"))
cty_data = pd.read_csv(pm.get_data("cty.dat"))
data = data[data.state == "MN"]
data["fips"] = data.stfips * 1000 + data.cntyfips
cty_data["fips"] = cty_data.stfips * 1000 + cty_data.ctfips
data["lradon"] = np.log(np.where(data.activity == 0, 0.1, data.activity))
data = data.merge(cty_data, "inner", on="fips")
unique = data[["fips"]].drop_duplicates()
unique["group"] = np.arange(len(unique))
unique.set_index("fips")
return data.merge(unique, "inner", on="fips")
class TestARM5_4(SeededTest):
def build_model(self):
data = pd.read_csv(
pm.get_data("wells.dat"),
delimiter=" ",
index_col="id",
dtype={"switch": np.int8},
)
data.dist /= 100
data.educ /= 4
col = data.columns
P = data[col[1:]]
P -= P.mean()
P["1"] = 1
with pm.Model() as model:
effects = pm.Normal("effects", mu=0, sigma=100, shape=len(P.columns))
logit_p = tt.dot(floatX(np.array(P)), effects)
pm.Bernoulli("s", logit_p=logit_p, observed=floatX(data.switch.values))
return model
def test_run(self):
model = self.build_model()
with model:
pm.sample(50, tune=50)
class TestARM12_6(SeededTest):
def build_model(self):
data = get_city_data()
self.obs_means = data.groupby("fips").lradon.mean().to_numpy()
lradon = data.lradon.to_numpy()
floor = data.floor.to_numpy()
group = data.group.to_numpy()
with pm.Model() as model:
groupmean = pm.Normal("groupmean", 0, 10.0 ** -2.0)
groupsd = pm.Uniform("groupsd", 0, 10.0)
sd = pm.Uniform("sd", 0, 10.0)
floor_m = pm.Normal("floor_m", 0, 5.0 ** -2.0)
means = pm.Normal("means", groupmean, groupsd ** -2.0, shape=len(self.obs_means))
pm.Normal("lr", floor * floor_m + means[group], sd ** -2.0, observed=lradon)
return model
def too_slow(self):
model = self.build_model()
start = {
"groupmean": self.obs_means.mean(),
"groupsd_interval__": 0,
"sd_interval__": 0,
"means": self.obs_means,
"floor_m": 0.0,
}
with model:
start = pm.find_MAP(
start=start,
vars=[model["groupmean"], model["sd_interval__"], model["floor_m"]],
)
step = pm.NUTS(model.vars, scaling=start)
pm.sample(50, step=step, start=start)
class TestARM12_6Uranium(SeededTest):
def build_model(self):
data = get_city_data()
self.obs_means = data.groupby("fips").lradon.mean()
lradon = data.lradon.to_numpy()
floor = data.floor.to_numpy()
group = data.group.to_numpy()
ufull = data.Uppm.to_numpy()
with pm.Model() as model:
groupmean = pm.Normal("groupmean", 0, 10.0 ** -2.0)
groupsd = pm.Uniform("groupsd", 0, 10.0)
sd = pm.Uniform("sd", 0, 10.0)
floor_m = pm.Normal("floor_m", 0, 5.0 ** -2.0)
u_m = pm.Normal("u_m", 0, 5.0 ** -2)
means = pm.Normal("means", groupmean, groupsd ** -2.0, shape=len(self.obs_means))
pm.Normal(
"lr",
floor * floor_m + means[group] + ufull * u_m,
sd ** -2.0,
observed=lradon,
)
return model
def too_slow(self):
model = self.build_model()
with model:
start = pm.Point(
{
"groupmean": self.obs_means.mean(),
"groupsd_interval__": 0,
"sd_interval__": 0,
"means": np.array(self.obs_means),
"u_m": np.array([0.72]),
"floor_m": 0.0,
}
)
start = pm.find_MAP(start, model.vars[:-1])
H = model.fastd2logp()
h = np.diag(H(start))
step = pm.HamiltonianMC(model.vars, h)
pm.sample(50, step=step, start=start)
def build_disaster_model(masked=False):
# fmt: off
disasters_data = np.array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
# fmt: on
if masked:
disasters_data[[23, 68]] = -1
disasters_data = np.ma.masked_values(disasters_data, value=-1)
years = len(disasters_data)
with pm.Model() as model:
# Prior for distribution of switchpoint location
switchpoint = pm.DiscreteUniform("switchpoint", lower=0, upper=years)
# Priors for pre- and post-switch mean number of disasters
early_mean = pm.Exponential("early_mean", lam=1.0)
late_mean = pm.Exponential("late_mean", lam=1.0)
# Allocate appropriate Poisson rates to years before and after current
# switchpoint location
idx = np.arange(years)
rate = tt.switch(switchpoint >= idx, early_mean, late_mean)
# Data likelihood
pm.Poisson("disasters", rate, observed=disasters_data)
return model
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
class TestDisasterModel(SeededTest):
# Time series of recorded coal mining disasters in the UK from 1851 to 1962
def test_disaster_model(self):
model = build_disaster_model(masked=False)
with model:
# Initial values for stochastic nodes
start = {"early_mean": 2.0, "late_mean": 3.0}
# Use slice sampler for means (other variables auto-selected)
step = pm.Slice([model.early_mean_log__, model.late_mean_log__])
tr = pm.sample(500, tune=50, start=start, step=step, chains=2)
pm.summary(tr)
def test_disaster_model_missing(self):
model = build_disaster_model(masked=True)
with model:
# Initial values for stochastic nodes
start = {"early_mean": 2.0, "late_mean": 3.0}
# Use slice sampler for means (other variables auto-selected)
step = pm.Slice([model.early_mean_log__, model.late_mean_log__])
tr = pm.sample(500, tune=50, start=start, step=step, chains=2)
pm.summary(tr)
class TestGLMLinear(SeededTest):
def build_model(self):
size = 50
true_intercept = 1
true_slope = 2
self.x = np.linspace(0, 1, size)
self.y = true_intercept + self.x * true_slope + np.random.normal(scale=0.5, size=size)
data = dict(x=self.x, y=self.y)
with pm.Model() as model:
pm.GLM.from_formula("y ~ x", data)
return model
def test_run(self):
with self.build_model():
start = pm.find_MAP(method="Powell")
pm.sample(50, pm.Slice(), start=start)
class TestLatentOccupancy(SeededTest):
"""
From the PyMC example list
latent_occupancy.py
Simple model demonstrating the estimation of occupancy, using latent variables. Suppose
a population of n sites, with some proportion pi being occupied. Each site is surveyed,
yielding an array of counts, y:
y = [3, 0, 0, 2, 1, 0, 1, 0, ..., ]
This is a classic zero-inflated count problem, where more zeros appear in the data than would
be predicted by a simple Poisson model. We have, in fact, a mixture of models; one, conditional
on occupancy, with a poisson mean of theta, and another, conditional on absence, with mean zero.
One way to tackle the problem is to model the latent state of 'occupancy' as a Bernoulli
variable at each site, with some unknown probability:
z_i ~ Bern(pi)
These latent variables can then be used to generate an array of Poisson parameters:
t_i = theta (if z_i=1) or 0 (if z_i=0)
Hence, the likelihood is just:
y_i = Poisson(t_i)
(Note in this elementary model, we are ignoring the issue of imperfect detection.)
Created by Chris Fonnesbeck on 2008-07-28.
Copyright (c) 2008 University of Otago. All rights reserved.
"""
def setup_method(self):
super().setup_method()
# Sample size
n = 100
# True mean count, given occupancy
theta = 2.1
# True occupancy
pi = 0.4
# Simulate some data data
self.y = ((np.random.random(n) < pi) * np.random.poisson(lam=theta, size=n)).astype("int16")
def build_model(self):
with pm.Model() as model:
# Estimated occupancy
psi = pm.Beta("psi", 1, 1)
# Latent variable for occupancy
pm.Bernoulli("z", psi, shape=self.y.shape)
# Estimated mean count
theta = pm.Uniform("theta", 0, 100)
# Poisson likelihood
pm.ZeroInflatedPoisson("y", theta, psi, observed=self.y)
return model
def test_run(self):
model = self.build_model()
with model:
start = {
"psi": np.array(0.5, dtype="f"),
"z": (self.y > 0).astype("int16"),
"theta": np.array(5, dtype="f"),
}
step_one = pm.Metropolis([model.theta_interval__, model.psi_logodds__])
step_two = pm.BinaryMetropolis([model.z])
pm.sample(50, step=[step_one, step_two], start=start, chains=1)
@pytest.mark.xfail(
condition=(theano.config.floatX == "float32"),
reason="Fails on float32 due to starting inf at starting logP",
)
class TestRSV(SeededTest):
"""
This model estimates the population prevalence of respiratory syncytial virus
(RSV) among children in Amman, Jordan, based on 3 years of admissions diagnosed
with RSV to Al Bashir hospital.
To estimate this parameter from raw counts of diagnoses, we need to establish
the population of 1-year-old children from which the diagnosed individuals
were sampled. This involved correcting census data (national estimate of
1-year-olds) for the proportion of the population in the city, as well as for
the market share of the hospital. The latter is based on expert esimate, and
hence encoded as a prior.
"""
def build_model(self):
# 1-year-old children in Jordan
kids = np.array([180489, 191817, 190830])
# Proportion of population in Amman
amman_prop = 0.35
# infant RSV cases in Al Bashir hostpital
rsv_cases = np.array([40, 59, 65])
with pm.Model() as model:
# Al Bashir hospital market share
market_share = pm.Uniform("market_share", 0.5, 0.6)
# Number of 1 y.o. in Amman
n_amman = pm.Binomial("n_amman", kids, amman_prop, shape=3)
# Prior probability
prev_rsv = pm.Beta("prev_rsv", 1, 5, shape=3)
# RSV in Amman
y_amman = pm.Binomial("y_amman", n_amman, prev_rsv, shape=3, testval=100)
# Likelihood for number with RSV in hospital (assumes Pr(hosp | RSV) = 1)
pm.Binomial("y_hosp", y_amman, market_share, observed=rsv_cases)
return model
def test_run(self):
with self.build_model():
pm.sample(50, step=[pm.NUTS(), pm.Metropolis()])
class TestMultilevelNormal(SeededTest):
"""
Toy three-level normal model sampled using MLDA. The finest model is a
Normal distribution with unknown mean and sigma=1.0 where we have only one
observed datum (y = 11.0). The coarse models are the same but with the observed
datum changed to y = 11.5 and y = 12.0. This is a very simple way to create
a 3-level system of "approximate" coarse models.
Normals with
"""
def build_models(self):
np.random.seed(1234)
true_mean = 11.0
y = np.array([true_mean])
with pm.Model() as model_coarse_0:
sigma = 1.0
x_coeff = pm.Normal("x", true_mean, sigma=10.0)
pm.Normal("y", mu=x_coeff, sigma=sigma, observed=y + 1.0)
with pm.Model() as model_coarse_1:
sigma = 1.0
x_coeff = pm.Normal("x", true_mean, sigma=10.0)
pm.Normal("y", mu=x_coeff, sigma=sigma, observed=y + 0.5)
coarse_models = [model_coarse_0, model_coarse_1]
with pm.Model() as model:
sigma = 1.0
x_coeff = pm.Normal("x", true_mean, sigma=10.0)
pm.Normal("y", mu=x_coeff, sigma=sigma, observed=y)
return model, coarse_models
def test_run(self):
model, coarse_models = self.build_models()
with model:
step = pm.MLDA(subsampling_rates=2, coarse_models=coarse_models)
pm.sample(draws=50, chains=2, tune=50, step=step)
| 37.773936
| 100
| 0.588256
|
4a0110d516daaadeace250c442554779e91bce79
| 2,616
|
py
|
Python
|
src/poetry/console/commands/export.py
|
mmacchia/poetry
|
7c53db9680d021bac99cc366a3bbc88ebbffdf0f
|
[
"MIT"
] | null | null | null |
src/poetry/console/commands/export.py
|
mmacchia/poetry
|
7c53db9680d021bac99cc366a3bbc88ebbffdf0f
|
[
"MIT"
] | null | null | null |
src/poetry/console/commands/export.py
|
mmacchia/poetry
|
7c53db9680d021bac99cc366a3bbc88ebbffdf0f
|
[
"MIT"
] | null | null | null |
from cleo.helpers import option
from poetry.console.commands.command import Command
from poetry.utils.exporter import Exporter
class ExportCommand(Command):
name = "export"
description = "Exports the lock file to alternative formats."
options = [
option(
"format",
"f",
"Format to export to. Currently, only requirements.txt is supported.",
flag=False,
default=Exporter.FORMAT_REQUIREMENTS_TXT,
),
option("output", "o", "The name of the output file.", flag=False),
option("without-hashes", None, "Exclude hashes from the exported file."),
option(
"without-urls",
None,
"Exclude source repository urls from the exported file.",
),
option("dev", None, "Include development dependencies."),
option(
"extras",
"E",
"Extra sets of dependencies to include.",
flag=False,
multiple=True,
),
option("with-credentials", None, "Include credentials for extra indices."),
]
def handle(self) -> None:
fmt = self.option("format")
if fmt not in Exporter.ACCEPTED_FORMATS:
raise ValueError("Invalid export format: {}".format(fmt))
output = self.option("output")
locker = self.poetry.locker
if not locker.is_locked():
self.line_error("<comment>The lock file does not exist. Locking.</comment>")
options = []
if self.io.is_debug():
options.append(("-vvv", None))
elif self.io.is_very_verbose():
options.append(("-vv", None))
elif self.io.is_verbose():
options.append(("-v", None))
self.call("lock", " ".join(options))
if not locker.is_fresh():
self.line_error(
"<warning>"
"Warning: The lock file is not up to date with "
"the latest changes in pyproject.toml. "
"You may be getting outdated dependencies. "
"Run update to update them."
"</warning>"
)
exporter = Exporter(self.poetry)
exporter.export(
fmt,
self.poetry.file.parent,
output or self.io,
with_hashes=not self.option("without-hashes"),
dev=self.option("dev"),
extras=self.option("extras"),
with_credentials=self.option("with-credentials"),
with_urls=not self.option("without-urls"),
)
| 32.7
| 88
| 0.545107
|
4a011196b92592cacd22c30b35c863cae0dc7f74
| 2,912
|
py
|
Python
|
djangopypi2/apps/pypi_frontend/distutils_request.py
|
peopledoc/djangopypi2
|
1356963cd103947058b951476ba8c32d108f19f6
|
[
"BSD-3-Clause"
] | null | null | null |
djangopypi2/apps/pypi_frontend/distutils_request.py
|
peopledoc/djangopypi2
|
1356963cd103947058b951476ba8c32d108f19f6
|
[
"BSD-3-Clause"
] | null | null | null |
djangopypi2/apps/pypi_frontend/distutils_request.py
|
peopledoc/djangopypi2
|
1356963cd103947058b951476ba8c32d108f19f6
|
[
"BSD-3-Clause"
] | null | null | null |
from logging import getLogger
from django.http import QueryDict
from django.http import HttpResponseNotAllowed
from django.utils.datastructures import MultiValueDict
from django.core.files.uploadedfile import TemporaryUploadedFile
from .distutils_views import ACTION_VIEWS
log = getLogger(__name__)
def _get_distutils_action(request):
if request.method == 'POST':
parse_distutils_request(request)
action = request.POST.get(':action', None)
else:
action = request.GET.get(':action', None)
return action
def is_distutils_request(request):
return _get_distutils_action(request) is not None
def handle_distutils_request(request):
action = _get_distutils_action(request)
if action not in ACTION_VIEWS:
log.error('Invalid action encountered: %r', action)
return HttpResponseNotAllowed(ACTION_VIEWS.keys())
return ACTION_VIEWS[action](request)
def _parse_header(header):
headers = {}
for kvpair in filter(lambda p: p,
map(lambda p: p.strip(),
header.split(';'))):
try:
key, value = kvpair.split("=",1)
except ValueError:
continue
headers[key.strip()] = value.strip('"')
return headers
def parse_distutils_request(request):
""" This is being used because the built in request parser that Django uses,
django.http.multipartparser.MultiPartParser is interperting the POST data
incorrectly and/or the post data coming from distutils is invalid.
One portion of this is the end marker: \r\n\r\n (what Django expects)
versus \n\n (what distutils is sending).
"""
try:
sep = request.raw_post_data.splitlines()[1]
except:
raise ValueError('Invalid post data')
request.POST = QueryDict('',mutable=True)
try:
request._files = MultiValueDict()
except Exception, e:
pass
for part in filter(lambda e: e.strip(), request.raw_post_data.split(sep)):
try:
header, content = part.lstrip().split('\n',1)
except Exception, e:
continue
if content.startswith('\n'):
content = content[1:]
if content.endswith('\n'):
content = content[:-1]
headers = _parse_header(header)
if "name" not in headers:
continue
if "filename" in headers:
dist = TemporaryUploadedFile(name=headers["filename"],
size=len(content),
content_type="application/gzip",
charset='utf-8')
dist.write(content)
dist.seek(0)
request.FILES.appendlist(headers['name'], dist)
else:
request.POST.appendlist(headers["name"],content)
return
| 32.355556
| 80
| 0.607143
|
4a0111b8569ae9ff67260686003fd5d1f5387fa9
| 16,753
|
py
|
Python
|
haystack/nodes/preprocessor/preprocessor.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 1
|
2022-03-06T02:13:15.000Z
|
2022-03-06T02:13:15.000Z
|
haystack/nodes/preprocessor/preprocessor.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | null | null | null |
haystack/nodes/preprocessor/preprocessor.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 1
|
2022-03-23T18:17:02.000Z
|
2022-03-23T18:17:02.000Z
|
import logging
import re
from copy import deepcopy
from functools import partial, reduce
from itertools import chain
from typing import List, Optional, Generator, Set, Union
import nltk
from more_itertools import windowed
from tqdm import tqdm
from haystack.nodes.preprocessor import BasePreProcessor
logger = logging.getLogger(__name__)
iso639_to_nltk = {
"ru": "russian",
"sl": "slovene",
"es": "spanish",
"sv": "swedish",
"tr": "turkish",
"cs": "czech",
"da": "danish",
"nl": "dutch",
"en": "english",
"et": "estonian",
"fi": "finnish",
"fr": "french",
"de": "german",
"el": "greek",
"it": "italian",
"no": "norwegian",
"pl": "polish",
"pt": "portuguese",
}
class PreProcessor(BasePreProcessor):
def __init__(
self,
clean_whitespace: bool = True,
clean_header_footer: bool = False,
clean_empty_lines: bool = True,
split_by: str = "word",
split_length: int = 200,
split_overlap: int = 0,
split_respect_sentence_boundary: bool = True,
language: str = "en",
):
"""
:param clean_header_footer: Use heuristic to remove footers and headers across different pages by searching
for the longest common string. This heuristic uses exact matches and therefore
works well for footers like "Copyright 2019 by XXX", but won't detect "Page 3 of 4"
or similar.
:param clean_whitespace: Strip whitespaces before or after each line in the text.
:param clean_empty_lines: Remove more than two empty lines in the text.
:param split_by: Unit for splitting the document. Can be "word", "sentence", or "passage". Set to None to disable splitting.
:param split_length: Max. number of the above split unit (e.g. words) that are allowed in one document. For instance, if n -> 10 & split_by ->
"sentence", then each output document will have 10 sentences.
:param split_overlap: Word overlap between two adjacent documents after a split.
Setting this to a positive number essentially enables the sliding window approach.
For example, if split_by -> `word`,
split_length -> 5 & split_overlap -> 2, then the splits would be like:
[w1 w2 w3 w4 w5, w4 w5 w6 w7 w8, w7 w8 w10 w11 w12].
Set the value to 0 to ensure there is no overlap among the documents after splitting.
:param split_respect_sentence_boundary: Whether to split in partial sentences if split_by -> `word`. If set
to True, the individual split will always have complete sentences &
the number of words will be <= split_length.
:param language: The language used by "nltk.tokenize.sent_tokenize" in iso639 format. Available options: "en", "es", "de", "fr" & many more.
"""
# save init parameters to enable export of component config as YAML
self.set_config(
clean_whitespace=clean_whitespace,
clean_header_footer=clean_header_footer,
clean_empty_lines=clean_empty_lines,
split_by=split_by,
split_length=split_length,
split_overlap=split_overlap,
split_respect_sentence_boundary=split_respect_sentence_boundary,
)
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
self.clean_whitespace = clean_whitespace
self.clean_header_footer = clean_header_footer
self.clean_empty_lines = clean_empty_lines
self.split_by = split_by
self.split_length = split_length
self.split_overlap = split_overlap
self.split_respect_sentence_boundary = split_respect_sentence_boundary
self.language = iso639_to_nltk.get(language, language)
self.print_log: Set[str] = set()
def process(
self,
documents: Union[dict, List[dict]],
clean_whitespace: Optional[bool] = None,
clean_header_footer: Optional[bool] = None,
clean_empty_lines: Optional[bool] = None,
split_by: Optional[str] = None,
split_length: Optional[int] = None,
split_overlap: Optional[int] = None,
split_respect_sentence_boundary: Optional[bool] = None,
) -> List[dict]:
"""
Perform document cleaning and splitting. Can take a single document or a list of documents as input and returns a list of documents.
"""
kwargs = {
"clean_whitespace": clean_whitespace,
"clean_header_footer": clean_header_footer,
"clean_empty_lines": clean_empty_lines,
"split_by": split_by,
"split_length": split_length,
"split_overlap": split_overlap,
"split_respect_sentence_boundary": split_respect_sentence_boundary,
}
ret = []
if type(documents) == dict:
ret = self._process_single(document=documents, **kwargs) # type: ignore
elif type(documents) == list:
ret = self._process_batch(documents=list(documents), **kwargs)
else:
raise Exception("documents provided to PreProcessor.prepreprocess() is not of type list nor Document")
return ret
def _process_single(
self,
document,
clean_whitespace: Optional[bool] = None,
clean_header_footer: Optional[bool] = None,
clean_empty_lines: Optional[bool] = None,
split_by: Optional[str] = None,
split_length: Optional[int] = None,
split_overlap: Optional[int] = None,
split_respect_sentence_boundary: Optional[bool] = None,
) -> List[dict]:
if clean_whitespace is None:
clean_whitespace = self.clean_whitespace
if clean_header_footer is None:
clean_header_footer = self.clean_header_footer
if clean_empty_lines is None:
clean_empty_lines = self.clean_empty_lines
if split_by is None:
split_by = self.split_by
if split_length is None:
split_length = self.split_length
if split_overlap is None:
split_overlap = self.split_overlap
if split_respect_sentence_boundary is None:
split_respect_sentence_boundary = self.split_respect_sentence_boundary
cleaned_document = self.clean(
document=document,
clean_whitespace=clean_whitespace,
clean_header_footer=clean_header_footer,
clean_empty_lines=clean_empty_lines,
)
split_documents = self.split(
document=cleaned_document,
split_by=split_by,
split_length=split_length,
split_overlap=split_overlap,
split_respect_sentence_boundary=split_respect_sentence_boundary,
)
return split_documents
def _process_batch(self, documents: List[dict], **kwargs) -> List[dict]:
nested_docs = [self._process_single(d, **kwargs) for d in tqdm(documents, unit="docs")]
return [d for x in nested_docs for d in x]
def clean(
self,
document: dict,
clean_whitespace: bool,
clean_header_footer: bool,
clean_empty_lines: bool,
) -> dict:
"""
Perform document cleaning on a single document and return a single document. This method will deal with whitespaces, headers, footers
and empty lines. Its exact functionality is defined by the parameters passed into PreProcessor.__init__().
"""
text = document["content"]
if clean_header_footer:
text = self._find_and_remove_header_footer(
text, n_chars=300, n_first_pages_to_ignore=1, n_last_pages_to_ignore=1
)
if clean_whitespace:
lines = text.splitlines()
cleaned_lines = []
for line in lines:
line = line.strip()
cleaned_lines.append(line)
text = "\n".join(cleaned_lines)
if clean_empty_lines:
text = re.sub(r"\n\n+", "\n\n", text)
document["content"] = text
return document
def split(
self,
document: dict,
split_by: str,
split_length: int,
split_overlap: int,
split_respect_sentence_boundary: bool,
) -> List[dict]:
"""Perform document splitting on a single document. This method can split on different units, at different lengths,
with different strides. It can also respect sentence boundaries. Its exact functionality is defined by
the parameters passed into PreProcessor.__init__(). Takes a single document as input and returns a list of documents."""
if not split_by:
return [document]
if not split_length:
raise Exception("split_length needs be set when using split_by.")
if split_respect_sentence_boundary and split_by != "word":
raise NotImplementedError("'split_respect_sentence_boundary=True' is only compatible with split_by='word'.")
text = document["content"]
if split_respect_sentence_boundary and split_by == "word":
# split by words ensuring no sub sentence splits
sentences = nltk.tokenize.sent_tokenize(text, language=self.language)
word_count = 0
list_splits = []
current_slice: List[str] = []
for sen in sentences:
current_word_count = len(sen.split(" "))
if current_word_count > split_length:
long_sentence_message = f"One or more sentence found with word count higher than the split length."
if long_sentence_message not in self.print_log:
self.print_log.add(long_sentence_message)
logger.warning(long_sentence_message)
if word_count + current_word_count > split_length:
list_splits.append(current_slice)
# Enable split_stride with split_by='word' while respecting sentence boundaries.
if split_overlap:
overlap = []
w_count = 0
for s in current_slice[::-1]:
sen_len = len(s.split(" "))
if w_count < split_overlap:
overlap.append(s)
w_count += sen_len
else:
break
current_slice = list(reversed(overlap))
word_count = w_count
else:
current_slice = []
word_count = 0
current_slice.append(sen)
word_count += len(sen.split(" "))
if current_slice:
list_splits.append(current_slice)
text_splits = []
for sl in list_splits:
txt = " ".join(sl)
if len(txt) > 0:
text_splits.append(txt)
else:
# create individual "elements" of passage, sentence, or word
if split_by == "passage":
elements = text.split("\n\n")
elif split_by == "sentence":
elements = nltk.tokenize.sent_tokenize(text, language=self.language)
elif split_by == "word":
elements = text.split(" ")
else:
raise NotImplementedError(
"PreProcessor only supports 'passage', 'sentence' or 'word' split_by options."
)
# concatenate individual elements based on split_length & split_stride
if split_overlap:
segments = windowed(elements, n=split_length, step=split_length - split_overlap)
else:
segments = windowed(elements, n=split_length, step=split_length)
text_splits = []
for seg in segments:
txt = " ".join([t for t in seg if t is not None])
if len(txt) > 0:
text_splits.append(txt)
# create new document dicts for each text split
documents = []
for i, txt in enumerate(text_splits):
doc = deepcopy(document)
doc["content"] = txt
if "meta" not in doc.keys() or doc["meta"] is None:
doc["meta"] = {}
doc["meta"]["_split_id"] = i
documents.append(doc)
return documents
def _find_and_remove_header_footer(
self, text: str, n_chars: int, n_first_pages_to_ignore: int, n_last_pages_to_ignore: int
) -> str:
"""
Heuristic to find footers and headers across different pages by searching for the longest common string.
For headers we only search in the first n_chars characters (for footer: last n_chars).
Note: This heuristic uses exact matches and therefore works well for footers like "Copyright 2019 by XXX",
but won't detect "Page 3 of 4" or similar.
:param n_chars: number of first/last characters where the header/footer shall be searched in
:param n_first_pages_to_ignore: number of first pages to ignore (e.g. TOCs often don't contain footer/header)
:param n_last_pages_to_ignore: number of last pages to ignore
:return: (cleaned pages, found_header_str, found_footer_str)
"""
pages = text.split("\f")
# header
start_of_pages = [p[:n_chars] for p in pages[n_first_pages_to_ignore:-n_last_pages_to_ignore]]
found_header = self._find_longest_common_ngram(start_of_pages)
if found_header:
pages = [page.replace(found_header, "") for page in pages]
# footer
end_of_pages = [p[-n_chars:] for p in pages[n_first_pages_to_ignore:-n_last_pages_to_ignore]]
found_footer = self._find_longest_common_ngram(end_of_pages)
if found_footer:
pages = [page.replace(found_footer, "") for page in pages]
logger.debug(f"Removed header '{found_header}' and footer '{found_footer}' in document")
text = "\f".join(pages)
return text
def _ngram(self, seq: str, n: int) -> Generator[str, None, None]:
"""
Return ngram (of tokens - currently split by whitespace)
:param seq: str, string from which the ngram shall be created
:param n: int, n of ngram
:return: str, ngram as string
"""
# In order to maintain the original whitespace, but still consider \n and \t for n-gram tokenization,
# we add a space here and remove it after creation of the ngrams again (see below)
seq = seq.replace("\n", " \n")
seq = seq.replace("\t", " \t")
words = seq.split(" ")
ngrams = (
" ".join(words[i : i + n]).replace(" \n", "\n").replace(" \t", "\t") for i in range(0, len(words) - n + 1)
)
return ngrams
def _allngram(self, seq: str, min_ngram: int, max_ngram: int) -> Set[str]:
lengths = range(min_ngram, max_ngram) if max_ngram else range(min_ngram, len(seq))
ngrams = map(partial(self._ngram, seq), lengths)
res = set(chain.from_iterable(ngrams))
return res
def _find_longest_common_ngram(
self, sequences: List[str], max_ngram: int = 30, min_ngram: int = 3
) -> Optional[str]:
"""
Find the longest common ngram across different text sequences (e.g. start of pages).
Considering all ngrams between the specified range. Helpful for finding footers, headers etc.
:param sequences: list[str], list of strings that shall be searched for common n_grams
:param max_ngram: int, maximum length of ngram to consider
:param min_ngram: minimum length of ngram to consider
:return: str, common string of all sections
"""
sequences = [s for s in sequences if s] # filter empty sequences
if not sequences:
return None
seqs_ngrams = map(partial(self._allngram, min_ngram=min_ngram, max_ngram=max_ngram), sequences)
intersection = reduce(set.intersection, seqs_ngrams)
try:
longest = max(intersection, key=len)
except ValueError:
# no common sequence found
longest = ""
return longest if longest.strip() else None
| 42.092965
| 150
| 0.60222
|
4a0111ce8fff959e5cae7362962697af3915df96
| 22,269
|
py
|
Python
|
robotframework-ls/src/robotframework_ls/server_manager.py
|
DetachHead/robotframework-lsp
|
a82438f45b75f3afbe3f80a970b75ed9065f96a7
|
[
"ECL-2.0",
"Apache-2.0"
] | 92
|
2020-01-22T22:15:29.000Z
|
2022-03-31T05:19:16.000Z
|
robotframework-ls/src/robotframework_ls/server_manager.py
|
DetachHead/robotframework-lsp
|
a82438f45b75f3afbe3f80a970b75ed9065f96a7
|
[
"ECL-2.0",
"Apache-2.0"
] | 604
|
2020-01-25T17:13:27.000Z
|
2022-03-31T18:58:24.000Z
|
robotframework-ls/src/robotframework_ls/server_manager.py
|
DetachHead/robotframework-lsp
|
a82438f45b75f3afbe3f80a970b75ed9065f96a7
|
[
"ECL-2.0",
"Apache-2.0"
] | 39
|
2020-02-06T00:38:06.000Z
|
2022-03-15T06:14:19.000Z
|
from robocorp_ls_core.pluginmanager import PluginManager
import threading
from robocorp_ls_core.basic import (
log_and_silence_errors,
kill_process_and_subprocesses,
is_process_alive,
)
import sys
import weakref
import os
from robocorp_ls_core.robotframework_log import get_logger
from typing import Any, Dict, Optional, Tuple, List, Iterable
from robotframework_ls.ep_resolve_interpreter import (
EPResolveInterpreter,
IInterpreterInfo,
)
from robocorp_ls_core.protocols import (
IConfig,
IMessageMatcher,
IWorkspace,
IRobotFrameworkApiClient,
)
import itertools
from functools import partial
DEFAULT_API_ID = "default"
log = get_logger(__name__)
_next_id = partial(next, itertools.count(0))
class _ServerApi(object):
"""
Note: this is mainly a helper to manage the startup of an IRobotFrameworkApiClient
and restart it when needed.
This class is not thread-safe and should be accessed only from a single thread.
The provided `IRobotFrameworkApiClient` may later be accessed from any thread.
"""
def __init__(self, log_extension, language_server_ref):
self._main_thread = threading.current_thread()
from robotframework_ls.robot_config import RobotConfig
self._used_python_executable = None
self._used_environ = None
self._server_process = None
self._robotframework_api_client: Optional[IRobotFrameworkApiClient] = None
# We have a version of the config with the settings passed overridden
# by the settings of a given (customized) interpreter.
self._config: IConfig = RobotConfig()
self.workspace = None
self._initializing = False
self._log_extension = log_extension
self._language_server_ref = language_server_ref
self._interpreter_info: Optional[IInterpreterInfo] = None
def _check_in_main_thread(self):
curr_thread = threading.current_thread()
if self._main_thread is not curr_thread:
raise AssertionError(
f"This may only be called at the thread: {self._main_thread}. Current thread: {curr_thread}"
)
@property
def robot_framework_language_server(self):
return self._language_server_ref()
@property
def workspace(self) -> IWorkspace:
return self._workspace
@workspace.setter
def workspace(self, workspace: IWorkspace):
self._check_in_main_thread()
self._workspace = workspace
@property
def config(self) -> IConfig:
return self._config
@config.setter
def config(self, config: IConfig):
self._check_in_main_thread()
self._config.update(config.get_full_settings())
self._check_reinitialize_and_forward_settings_if_needed()
def set_interpreter_info(self, interpreter_info: IInterpreterInfo) -> None:
from robotframework_ls.config_extension import apply_interpreter_info_to_config
self._check_in_main_thread()
self._interpreter_info = interpreter_info
apply_interpreter_info_to_config(self._config, interpreter_info)
self._check_reinitialize_and_forward_settings_if_needed()
def _check_reinitialize_and_forward_settings_if_needed(self) -> None:
self._check_in_main_thread()
was_disposed = self._check_reinitialize()
if not was_disposed:
# i.e.: when the interpreter info changes, even if it kept the same
# interpreter, it's possible that the configuration changed.
self.forward(
"workspace/didChangeConfiguration",
{"settings": self._config.get_full_settings()},
)
def _check_reinitialize(self) -> bool:
"""
Returns True if the existing process was disposed (or if it wasn't even
started) and False if the existing process was kept running.
"""
self._check_in_main_thread()
if self._server_process is None:
return True
# If the python executable changes, restart the server API.
if self._used_python_executable is not None:
python_executable = self._get_python_executable()
if python_executable != self._used_python_executable:
# It'll be reinitialized when needed.
self._dispose_server_process()
return True
if self._used_environ is not None:
environ = self._get_environ()
if environ != self._used_environ:
# It'll be reinitialized when needed.
self._dispose_server_process()
return True
return False
def _get_python_executable(self) -> str:
self._check_in_main_thread()
from robotframework_ls.impl.robot_lsp_constants import (
OPTION_ROBOT_PYTHON_EXECUTABLE,
)
config = self._config
python_exe = sys.executable
if config is not None:
python_exe = config.get_setting(
OPTION_ROBOT_PYTHON_EXECUTABLE, str, default=python_exe
)
else:
log.warning(f"self._config not set in {self.__class__}")
return python_exe
def _get_environ(self) -> Dict[str, str]:
self._check_in_main_thread()
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_PYTHON_ENV
config = self._config
env = os.environ.copy()
env.pop("PYTHONPATH", "")
env.pop("PYTHONHOME", "")
env.pop("VIRTUAL_ENV", "")
if config is not None:
env_in_settings = config.get_setting(
OPTION_ROBOT_PYTHON_ENV, dict, default={}
)
for key, val in env_in_settings.items():
env[str(key)] = str(val)
else:
log.warning("self._config not set in %s" % (self.__class__,))
return env
def get_robotframework_api_client(self) -> Optional[IRobotFrameworkApiClient]:
self._check_in_main_thread()
workspace = self.workspace
assert (
workspace
), "The workspace must be already set when getting the server api."
server_process = self._server_process
if server_process is not None:
# If someone killed it, dispose of internal references
# and create a new process.
if not is_process_alive(server_process.pid):
server_process = None
self._dispose_server_process()
if server_process is None:
try:
from robotframework_ls.options import Setup
from robotframework_ls.server_api.client import RobotFrameworkApiClient
from robotframework_ls.server_api.server__main__ import (
start_server_process,
)
from robocorp_ls_core.jsonrpc.streams import (
JsonRpcStreamWriter,
JsonRpcStreamReader,
)
from robotframework_ls.robotframework_ls_impl import (
RobotFrameworkLanguageServer,
)
args = []
if Setup.options.verbose:
args.append("-" + "v" * int(Setup.options.verbose))
if Setup.options.log_file:
log_id = _next_id()
# i.e.: use a log id in case we create more than one in the
# same session.
if log_id == 0:
args.append(
"--log-file=" + Setup.options.log_file + self._log_extension
)
else:
args.append(
"--log-file="
+ Setup.options.log_file
+ (".%s" % (log_id,))
+ self._log_extension
)
python_exe = self._get_python_executable()
environ = self._get_environ()
self._used_python_executable = python_exe
self._used_environ = environ
robot_framework_language_server: RobotFrameworkLanguageServer = (
self.robot_framework_language_server
)
remote_fs_observer_port = (
robot_framework_language_server.get_remote_fs_observer_port()
)
if not remote_fs_observer_port:
raise RuntimeError(
f"Expected the port to hear the Remote filesystem observer to be available. Found: {remote_fs_observer_port}"
)
args.append(f"--remote-fs-observer-port={remote_fs_observer_port}")
server_process = start_server_process(
args=args, python_exe=python_exe, env=environ
)
self._server_process = server_process
write_to = server_process.stdin
read_from = server_process.stdout
w = JsonRpcStreamWriter(write_to, sort_keys=True)
r = JsonRpcStreamReader(read_from)
api = self._robotframework_api_client = RobotFrameworkApiClient(
w, r, server_process
)
log.debug(
"Initializing api... (this pid: %s, api pid: %s).",
os.getpid(),
server_process.pid,
)
api.initialize(
process_id=os.getpid(),
root_uri=workspace.root_uri,
workspace_folders=list(
{"uri": folder.uri, "name": folder.name}
for folder in workspace.iter_folders()
),
)
config = self._config
log.debug("Forwarding config to api...")
if config is not None:
api.forward(
"workspace/didChangeConfiguration",
{"settings": config.get_full_settings()},
)
# Open existing documents in the API.
source: Optional[str]
for document in workspace.iter_documents():
log.debug("Forwarding doc: %s to api...", document.uri)
try:
source = document.source
except Exception:
source = None
api.forward(
"textDocument/didOpen",
{
"textDocument": {
"uri": document.uri,
"version": document.version,
"text": source,
}
},
)
except Exception as e:
if server_process is None:
log.exception(
"Error starting robotframework server api (server_process=None)."
)
else:
exitcode = server_process.poll()
if exitcode is not None:
# Note: only read() if the process exited.
log.exception(
"Error starting robotframework server api. Exit code: %s Base exception: %s. Stderr: %s",
exitcode,
e,
server_process.stderr.read(),
)
else:
log.exception(
"Error (%s) starting robotframework server api (still running). Base exception: %s.",
exitcode,
e,
)
self._dispose_server_process()
finally:
if server_process is not None:
log.debug(
"Server api (%s) created pid: %s", self, server_process.pid
)
else:
log.debug(
"server_process == None in get_robotframework_api_client()"
)
return self._robotframework_api_client
@log_and_silence_errors(log)
def _dispose_server_process(self):
self._check_in_main_thread()
try:
log.debug("Dispose server process.")
if self._server_process is not None:
if is_process_alive(self._server_process.pid):
kill_process_and_subprocesses(self._server_process.pid)
finally:
self._server_process = None
self._robotframework_api_client = None
self._used_environ = None
self._used_python_executable = None
def request_cancel(self, message_id) -> None:
self._check_in_main_thread()
api = self.get_robotframework_api_client()
if api is not None:
api.request_cancel(message_id)
@log_and_silence_errors(log)
def forward(self, method_name, params) -> None:
self._check_in_main_thread()
api = self.get_robotframework_api_client()
if api is not None:
return api.forward(method_name, params)
return None
@log_and_silence_errors(log)
def forward_async(self, method_name, params) -> Optional[IMessageMatcher]:
self._check_in_main_thread()
api = self.get_robotframework_api_client()
if api is not None:
return api.forward_async(method_name, params)
return None
@log_and_silence_errors(log)
def open(self, uri, version, source):
self._check_in_main_thread()
api = self.get_robotframework_api_client()
if api is not None:
api.open(uri, version, source)
@log_and_silence_errors(log)
def exit(self):
self._check_in_main_thread()
if self._robotframework_api_client is not None:
# i.e.: only exit if it was started in the first place.
self._robotframework_api_client.exit()
self._dispose_server_process()
@log_and_silence_errors(log)
def shutdown(self):
self._check_in_main_thread()
if self._robotframework_api_client is not None:
# i.e.: only shutdown if it was started in the first place.
self._robotframework_api_client.shutdown()
class _RegularLintAndOthersApi(object):
"""
This encapsulates 3 different processes (each process is an API).
The default (api) is usually used for requests which are real-time,
such as code-completion, find definition, signature help and hover.
The lint api is used for linting.
The others api is used for other requests which are a middle ground between
the lint (slowest) and the default (fastest). It covers requests such as
document formatting, code folding, semantic tokens and workspace symbols.
"""
def __init__(self, api: _ServerApi, lint_api: _ServerApi, others_api: _ServerApi):
self.api = api
self.lint_api = lint_api
self.others_api = others_api
def __iter__(self):
yield self.api
yield self.lint_api
yield self.others_api
def set_interpreter_info(self, interpreter_info: IInterpreterInfo):
for api in self:
api.set_interpreter_info(interpreter_info)
class ServerManager(object):
"""
Note: accessing the ServerManager may only be done from a single thread.
The idea is that clients do something as:
rf_api_client = server_manager.get_lint_rf_api_client(doc_uri)
if rf_api_client is not None:
... robotframework_api_client may then be accessed by any thread.
"""
def __init__(
self,
pm: PluginManager,
config: Optional[IConfig] = None,
workspace: Optional[IWorkspace] = None,
language_server: Optional[Any] = None,
):
self._main_thread = threading.current_thread()
self._config: Optional[IConfig] = config
self._workspace: Optional[IWorkspace] = workspace
self._pm = pm
self._id_to_apis: Dict[str, _RegularLintAndOthersApi] = {}
if language_server is None:
self._language_server_ref = lambda: None
else:
self._language_server_ref = weakref.ref(language_server)
def _check_in_main_thread(self):
curr_thread = threading.current_thread()
if self._main_thread is not curr_thread:
raise AssertionError(
f"This may only be called at the thread: {self._main_thread}. Current thread: {curr_thread}"
)
def _iter_all_apis(self) -> Iterable[_ServerApi]:
self._check_in_main_thread()
for apis in self._id_to_apis.values():
for api in apis:
yield api
def set_config(self, config: IConfig) -> None:
self._check_in_main_thread()
self._config = config
for api in self._iter_all_apis():
api.config = config
def set_workspace(self, workspace: IWorkspace) -> None:
self._check_in_main_thread()
self._workspace = workspace
for api in self._iter_all_apis():
api.workspace = workspace
def _create_apis(self, api_id) -> _RegularLintAndOthersApi:
self._check_in_main_thread()
assert api_id not in self._id_to_apis, f"{api_id} already created."
api = _ServerApi(".api", self._language_server_ref)
lint_api = _ServerApi(".lint.api", self._language_server_ref)
others_api = _ServerApi(".others.api", self._language_server_ref)
config = self._config
if config is not None:
api.config = config
lint_api.config = config
others_api.config = config
workspace = self._workspace
if workspace is not None:
api.workspace = workspace
lint_api.workspace = workspace
others_api.workspace = workspace
apis = _RegularLintAndOthersApi(api, lint_api, others_api)
self._id_to_apis[api_id] = apis
return apis
def _get_default_apis(self) -> _RegularLintAndOthersApi:
self._check_in_main_thread()
apis = self._id_to_apis.get(DEFAULT_API_ID)
if not apis:
apis = self._create_apis(DEFAULT_API_ID)
return apis
def _get_apis_for_doc_uri(self, doc_uri: str) -> _RegularLintAndOthersApi:
self._check_in_main_thread()
if doc_uri:
for ep in self._pm.get_implementations(EPResolveInterpreter):
interpreter_info = ep.get_interpreter_info_for_doc_uri(doc_uri)
if interpreter_info is not None:
# Note: we currently only identify things through the interpreter
# id, but a potential optimization would be using the same python
# executable in different APIs if they match.
interpreter_id = interpreter_info.get_interpreter_id()
apis = self._id_to_apis.get(interpreter_id)
if apis is not None:
apis.set_interpreter_info(interpreter_info)
else:
apis = self._create_apis(interpreter_id)
apis.set_interpreter_info(interpreter_info)
return apis
return self._get_default_apis()
def forward(self, target: Tuple[str, ...], method_name: str, params: Any) -> None:
self._check_in_main_thread()
apis: _RegularLintAndOthersApi
for apis in self._id_to_apis.values():
# Note: always forward async to all APIs (all the messages are sent
# from the current main thread, so, the messages ordering is still
# guaranteed to be correct).
if "api" in target:
apis.api.forward_async(method_name, params)
if "lint" in target:
apis.lint_api.forward_async(method_name, params)
if "others" in target:
apis.others_api.forward_async(method_name, params)
def shutdown(self) -> None:
self._check_in_main_thread()
for api in self._iter_all_apis():
api.shutdown()
def exit(self) -> None:
self._check_in_main_thread()
for api in self._iter_all_apis():
api.exit()
def collect_apis(self) -> List[_ServerApi]:
return list(self._iter_all_apis())
# Private APIs
def _get_others_api(self, doc_uri: str) -> _ServerApi:
self._check_in_main_thread()
apis = self._get_apis_for_doc_uri(doc_uri)
return apis.others_api
def _get_lint_api(self, doc_uri: str) -> _ServerApi:
self._check_in_main_thread()
apis = self._get_apis_for_doc_uri(doc_uri)
return apis.lint_api
def _get_regular_api(self, doc_uri: str) -> _ServerApi:
self._check_in_main_thread()
apis = self._get_apis_for_doc_uri(doc_uri)
return apis.api
# Public APIs -- returns a client that can be accessed in any thread
def get_lint_rf_api_client(
self, doc_uri: str
) -> Optional[IRobotFrameworkApiClient]:
api = self._get_lint_api(doc_uri)
if api is not None:
return api.get_robotframework_api_client()
return None
def get_regular_rf_api_client(
self, doc_uri: str
) -> Optional[IRobotFrameworkApiClient]:
api = self._get_regular_api(doc_uri)
if api is not None:
return api.get_robotframework_api_client()
return None
def get_others_api_client(self, doc_uri) -> Optional[IRobotFrameworkApiClient]:
api = self._get_others_api(doc_uri)
if api is not None:
return api.get_robotframework_api_client()
return None
def get_workspace_symbols_api_client(self) -> Optional[IRobotFrameworkApiClient]:
self._check_in_main_thread()
apis = self._get_default_apis()
return apis.others_api.get_robotframework_api_client()
| 37.115
| 133
| 0.598186
|
4a0111ecea21482cdf6d08e1dd80bf881fd61b2e
| 792
|
py
|
Python
|
MeClass8/my_devices.py
|
carrascodc/pyneta
|
857c0279d6c567d8855fedde2883603d7c70b50b
|
[
"Apache-2.0"
] | null | null | null |
MeClass8/my_devices.py
|
carrascodc/pyneta
|
857c0279d6c567d8855fedde2883603d7c70b50b
|
[
"Apache-2.0"
] | null | null | null |
MeClass8/my_devices.py
|
carrascodc/pyneta
|
857c0279d6c567d8855fedde2883603d7c70b50b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from getpass import getpass
from pprint import pprint
from napalm import get_network_driver
# Supress SSL Certificate Warnings
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
username = 'pyclass'
password = '88newclass'
# Device definitions
cisco3 = dict(
hostname="cisco3.lasthop.io",
device_type="ios",
username=username,
password=password,
optional_args={},
)
nxos1 = dict(
hostname="nxos1.lasthop.io",
device_type="nxos",
username=username,
password=password,
optional_args={"port": 8443},
)
arista1 = dict(
hostname="arista1.lasthop.io",
device_type="eos",
username=username,
password=password,
)
| 21.405405
| 71
| 0.739899
|
4a01123db38b57de73bd800a3e451bf559a5da50
| 14,203
|
py
|
Python
|
autoreject/tests/test_autoreject.py
|
rob-luke/autoreject
|
b73847949db64740680e3d325b824ed9aa05a8cc
|
[
"BSD-3-Clause"
] | null | null | null |
autoreject/tests/test_autoreject.py
|
rob-luke/autoreject
|
b73847949db64740680e3d325b824ed9aa05a8cc
|
[
"BSD-3-Clause"
] | null | null | null |
autoreject/tests/test_autoreject.py
|
rob-luke/autoreject
|
b73847949db64740680e3d325b824ed9aa05a8cc
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test autoreject."""
# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Denis A. Engemann <denis.engemann@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
import os.path as op
import pickle
import platform
import os
import numpy as np
from numpy.testing import assert_array_equal
import pytest
import mne
from mne.datasets import sample
from mne import io
from mne.utils import _TempDir
from autoreject import (_GlobalAutoReject, _AutoReject, AutoReject,
compute_thresholds, validation_curve,
get_rejection_threshold, read_auto_reject)
from autoreject.utils import _get_picks_by_type
from autoreject.autoreject import _get_interp_chs
import matplotlib
matplotlib.use('Agg')
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = io.read_raw_fif(raw_fname, preload=False)
raw.crop(0, 60)
raw.info['projs'] = list()
def test_global_autoreject():
"""Test global autoreject."""
event_id = None
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False,
eog=True, exclude=[])
# raise error if preload is false
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=None, preload=False)
# Test get_rejection_thresholds.
reject1 = get_rejection_threshold(epochs, decim=1, random_state=42)
reject2 = get_rejection_threshold(epochs, decim=1, random_state=42)
reject3 = get_rejection_threshold(epochs, decim=2, random_state=42)
tols = dict(eeg=5e-6, eog=5e-6, grad=10e-12, mag=5e-15)
if platform.system().lower().startswith("win"): # pragma: no cover
# XXX: When testing on Windows, the precision seemed to be lower. Why?
tols = dict(eeg=9e-5, eog=9e-5, grad=10e-12, mag=5e-15)
assert reject1, isinstance(reject1, dict)
for key, value in list(reject1.items()):
assert reject1[key] == reject2[key]
assert abs(reject1[key] - reject3[key]) < tols[key]
reject = get_rejection_threshold(epochs, decim=4, ch_types='eeg')
assert 'eog' not in reject
assert 'eeg' in reject
pytest.raises(ValueError, get_rejection_threshold, epochs,
decim=4, ch_types=5)
def test_autoreject():
"""Test basic _AutoReject functionality."""
event_id = None
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
##########################################################################
# picking epochs
include = [u'EEG %03d' % i for i in range(1, 45, 3)]
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, include=include, exclude=[])
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), decim=10,
reject=None, preload=False)[:10]
ar = _AutoReject()
pytest.raises(ValueError, ar.fit, epochs)
epochs.load_data()
ar.fit(epochs)
assert len(ar.picks_) == len(picks) - 1
# epochs with no picks.
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), decim=10,
reject=None, preload=True)[:20]
# let's drop some channels to speed up
pre_picks = mne.pick_types(epochs.info, meg=True, eeg=True)
pre_picks = np.r_[
mne.pick_types(epochs.info, meg='mag', eeg=False)[::15],
mne.pick_types(epochs.info, meg='grad', eeg=False)[::60],
mne.pick_types(epochs.info, meg=False, eeg=True)[::16],
mne.pick_types(epochs.info, meg=False, eeg=False, eog=True)]
pick_ch_names = [epochs.ch_names[pp] for pp in pre_picks]
bad_ch_names = [epochs.ch_names[ix] for ix in range(len(epochs.ch_names))
if ix not in pre_picks]
epochs_with_bads = epochs.copy()
epochs_with_bads.info['bads'] = bad_ch_names
epochs.pick_channels(pick_ch_names)
epochs_fit = epochs[:12] # make sure to use different size of epochs
epochs_new = epochs[12:]
epochs_with_bads_fit = epochs_with_bads[:12]
X = epochs_fit.get_data()
n_epochs, n_channels, n_times = X.shape
X = X.reshape(n_epochs, -1)
ar = _GlobalAutoReject()
pytest.raises(ValueError, ar.fit, X)
ar = _GlobalAutoReject(n_channels=n_channels)
pytest.raises(ValueError, ar.fit, X)
ar = _GlobalAutoReject(n_times=n_times)
pytest.raises(ValueError, ar.fit, X)
ar_global = _GlobalAutoReject(
n_channels=n_channels, n_times=n_times, thresh=40e-6)
ar_global.fit(X)
param_range = np.linspace(40e-6, 200e-6, 10)
train_scores, test_scores = \
validation_curve(epochs_fit, param_range=param_range)
assert len(train_scores) == len(test_scores)
train_scores, test_scores, param_range = \
validation_curve(epochs_fit, return_param_range=True)
assert len(train_scores) == len(test_scores) == len(param_range)
pytest.raises(ValueError, validation_curve, X, param_range=param_range)
##########################################################################
# picking AutoReject
picks = mne.pick_types(
epochs.info, meg='mag', eeg=True, stim=False, eog=False,
include=[], exclude=[])
non_picks = mne.pick_types(
epochs.info, meg='grad', eeg=False, stim=False, eog=False,
include=[], exclude=[])
ch_types = ['mag', 'eeg']
ar = _AutoReject(picks=picks) # XXX : why do we need this??
ar = AutoReject(cv=3, picks=picks, random_state=42,
n_interpolate=[1, 2], consensus=[0.5, 1])
pytest.raises(AttributeError, ar.fit, X)
pytest.raises(ValueError, ar.transform, X)
pytest.raises(ValueError, ar.transform, epochs)
epochs_nochs = epochs_fit.copy()
# just one channel loc is nan or all channel locs are 0.
# Should raise error in both cases
epochs_nochs.info['chs'][1]['loc'][:] = np.nan
pytest.raises(RuntimeError, ar.fit, epochs_nochs)
for ch in epochs_nochs.info['chs']:
ch['loc'] = np.zeros_like(ch['loc'])
pytest.raises(RuntimeError, ar.fit, epochs_nochs)
ar2 = AutoReject(cv=3, picks=picks, random_state=42,
n_interpolate=[1, 2], consensus=[0.5, 1],
verbose='blah')
pytest.raises(ValueError, ar2.fit, epochs_fit)
ar.fit(epochs_fit)
reject_log = ar.get_reject_log(epochs_fit)
for ch_type in ch_types:
# test that kappa & rho are selected
assert ar.n_interpolate_[ch_type] in ar.n_interpolate
assert ar.consensus_[ch_type] in ar.consensus
assert (ar.n_interpolate_[ch_type] ==
ar.local_reject_[ch_type].n_interpolate_[ch_type])
assert (ar.consensus_[ch_type] ==
ar.local_reject_[ch_type].consensus_[ch_type])
# test complementarity of goods and bads
assert_array_equal(len(reject_log.bad_epochs), len(epochs_fit))
# test that transform does not change state of ar
epochs_clean = ar.transform(epochs_fit) # apply same data
assert repr(ar)
assert repr(ar.local_reject_)
reject_log2 = ar.get_reject_log(epochs_fit)
assert_array_equal(reject_log.labels, reject_log2.labels)
assert_array_equal(reject_log.bad_epochs, reject_log2.bad_epochs)
assert_array_equal(reject_log.ch_names, reject_log2.ch_names)
epochs_new_clean = ar.transform(epochs_new) # apply to new data
reject_log_new = ar.get_reject_log(epochs_new)
assert_array_equal(len(reject_log_new.bad_epochs), len(epochs_new))
assert len(reject_log_new.bad_epochs) != len(reject_log.bad_epochs)
picks_by_type = _get_picks_by_type(epochs.info, ar.picks)
# test correct entries in fix log
assert np.isnan(reject_log_new.labels[:, non_picks]).sum() > 0
assert np.isnan(reject_log_new.labels[:, picks]).sum() == 0
assert (reject_log_new.labels.shape ==
(len(epochs_new), len(epochs_new.ch_names)))
# test correct interpolations by type
for ch_type, this_picks in picks_by_type:
interp_counts = np.sum(
reject_log_new.labels[:, this_picks] == 2, axis=1)
labels = reject_log_new.labels.copy()
not_this_picks = np.setdiff1d(np.arange(labels.shape[1]), this_picks)
labels[:, not_this_picks] = np.nan
interp_channels = _get_interp_chs(
labels, reject_log.ch_names, this_picks)
assert_array_equal(
interp_counts, [len(cc) for cc in interp_channels])
is_same = epochs_new_clean.get_data() == epochs_new.get_data()
if not np.isscalar(is_same):
is_same = np.isscalar(is_same)
assert not is_same
# test that transform ignores bad channels
epochs_with_bads_fit.pick_types(meg='mag', eeg=True, eog=True, exclude=[])
ar_bads = AutoReject(cv=3, random_state=42,
n_interpolate=[1, 2], consensus=[0.5, 1])
ar_bads.fit(epochs_with_bads_fit)
epochs_with_bads_clean = ar_bads.transform(epochs_with_bads_fit)
good_w_bads_ix = mne.pick_types(epochs_with_bads_clean.info,
meg='mag', eeg=True, eog=True,
exclude='bads')
good_wo_bads_ix = mne.pick_types(epochs_clean.info,
meg='mag', eeg=True, eog=True,
exclude='bads')
assert_array_equal(epochs_with_bads_clean.get_data()[:, good_w_bads_ix, :],
epochs_clean.get_data()[:, good_wo_bads_ix, :])
bad_ix = [epochs_with_bads_clean.ch_names.index(ch)
for ch in epochs_with_bads_clean.info['bads']]
epo_ix = ~ar_bads.get_reject_log(epochs_with_bads_fit).bad_epochs
assert_array_equal(
epochs_with_bads_clean.get_data()[:, bad_ix, :],
epochs_with_bads_fit.get_data()[epo_ix, :, :][:, bad_ix, :])
assert epochs_clean.ch_names == epochs_fit.ch_names
assert isinstance(ar.threshes_, dict)
assert len(ar.picks) == len(picks)
assert len(ar.threshes_.keys()) == len(ar.picks)
pick_eog = mne.pick_types(epochs.info, meg=False, eeg=False, eog=True)[0]
assert epochs.ch_names[pick_eog] not in ar.threshes_.keys()
pytest.raises(
IndexError, ar.transform,
epochs.copy().pick_channels(
[epochs.ch_names[pp] for pp in picks[:3]]))
epochs.load_data()
pytest.raises(ValueError, compute_thresholds, epochs, 'dfdfdf')
index, ch_names = zip(*[(ii, epochs_fit.ch_names[pp])
for ii, pp in enumerate(picks)])
threshes_a = compute_thresholds(
epochs_fit, picks=picks, method='random_search')
assert set(threshes_a.keys()) == set(ch_names)
threshes_b = compute_thresholds(
epochs_fit, picks=picks, method='bayesian_optimization')
assert set(threshes_b.keys()) == set(ch_names)
def test_io():
"""Test IO functionality."""
event_id = None
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
savedir = _TempDir()
fname = op.join(savedir, 'autoreject.hdf5')
include = [u'EEG %03d' % i for i in range(1, 45, 3)]
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, include=include, exclude=[])
# raise error if preload is false
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), decim=4,
reject=None, preload=True)[:10]
ar = AutoReject(cv=2, random_state=42, n_interpolate=[1],
consensus=[0.5], verbose=False)
ar.save(fname) # save without fitting
pkl_ar = pickle.dumps(ar) # also, pickle without fitting
# check that fit after saving is the same as fit without saving/pickling
ar2 = read_auto_reject(fname)
ar3 = pickle.loads(pkl_ar)
ar.fit(epochs)
ar2.fit(epochs)
ar3.fit(epochs)
assert np.sum([ar.threshes_[k] - ar2.threshes_[k]
for k in ar.threshes_.keys()]) == 0.
assert np.sum([ar.threshes_[k] - ar3.threshes_[k]
for k in ar.threshes_.keys()]) == 0.
pytest.raises(ValueError, ar.save, fname)
ar.save(fname, overwrite=True)
pkl_ar2 = pickle.dumps(ar)
ar4 = read_auto_reject(fname)
ar5 = pickle.loads(pkl_ar2)
epochs_clean1, reject_log1 = ar.transform(epochs, return_log=True)
epochs_clean2, reject_log2 = ar4.transform(epochs, return_log=True)
epochs_clean3, reject_log3 = ar5.transform(epochs, return_log=True)
assert_array_equal(epochs_clean1.get_data(), epochs_clean2.get_data())
assert_array_equal(epochs_clean1.get_data(), epochs_clean3.get_data())
assert_array_equal(reject_log1.labels, reject_log2.labels)
assert_array_equal(reject_log1.labels, reject_log3.labels)
def test_fnirs():
"""Test that autoreject runs on fNIRS data."""
raw = mne.io.read_raw_nirx(os.path.join(
mne.datasets.fnirs_motor.data_path(), 'Participant-1'))
raw.crop(tmax=1200)
raw = mne.preprocessing.nirs.optical_density(raw)
raw = mne.preprocessing.nirs.beer_lambert_law(raw)
events, _ = mne.events_from_annotations(raw, event_id={'1.0': 1,
'2.0': 2,
'3.0': 3})
event_dict = {'Control': 1, 'Tapping/Left': 2, 'Tapping/Right': 3}
epochs = mne.Epochs(raw, events, event_id=event_dict,
tmin=-5, tmax=15,
proj=True, baseline=(None, 0), preload=True,
detrend=None, verbose=True)
# Test autoreject
ar = AutoReject()
assert len(epochs) == 37
epochs_clean = ar.fit_transform(epochs)
assert len(epochs_clean) < len(epochs)
# Test threshold extraction
reject = get_rejection_threshold(epochs)
print(reject)
assert "hbo" in reject.keys()
assert "hbr" in reject.keys()
assert reject["hbo"] < 0.001 # This is a very high value as sanity check
assert reject["hbr"] < 0.001
assert reject["hbr"] > 0.0
| 40.349432
| 79
| 0.643737
|
4a0113f5db097f501252b9c95d4ce5449ac20f00
| 250
|
py
|
Python
|
flask_firebase_admin/__init__.py
|
andrewrosss/flask-firebase-admin
|
c131f48fd8a2d20b9f26602d7d0a15dc7bc4f1ae
|
[
"MIT"
] | 21
|
2020-10-22T08:48:33.000Z
|
2022-03-23T01:33:24.000Z
|
flask_firebase_admin/__init__.py
|
andrewrosss/flask-firebase-admin
|
c131f48fd8a2d20b9f26602d7d0a15dc7bc4f1ae
|
[
"MIT"
] | 6
|
2021-03-11T18:10:28.000Z
|
2021-09-10T01:00:24.000Z
|
flask_firebase_admin/__init__.py
|
andrewrosss/flask-firebase-admin
|
c131f48fd8a2d20b9f26602d7d0a15dc7bc4f1ae
|
[
"MIT"
] | 3
|
2021-03-03T11:58:45.000Z
|
2021-05-16T19:00:44.000Z
|
from . import flask_firebase_admin
from . import status_codes
from .__version__ import __version__
from .flask_firebase_admin import FirebaseAdmin
__all__ = (
"flask_firebase_admin",
"status_codes",
"__version__",
"FirebaseAdmin",
)
| 20.833333
| 47
| 0.76
|
4a0114228706b4a8cd29ed5bae820fac13ff542c
| 13,442
|
py
|
Python
|
tests/conftest.py
|
allisonmorgan/Dallinger
|
f171e28c352854a3c6ed6b21f25362cd933b17dc
|
[
"MIT"
] | 1
|
2019-08-01T16:15:44.000Z
|
2019-08-01T16:15:44.000Z
|
tests/conftest.py
|
allisonmorgan/Dallinger
|
f171e28c352854a3c6ed6b21f25362cd933b17dc
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
allisonmorgan/Dallinger
|
f171e28c352854a3c6ed6b21f25362cd933b17dc
|
[
"MIT"
] | null | null | null |
import mock
import os
import pytest
import shutil
import sys
import tempfile
from dallinger import information
from dallinger import models
from dallinger import networks
from dallinger import nodes
@pytest.fixture(scope="session", autouse=True)
def subprocess_coverage():
# Set env var to trigger starting coverage for subprocesses
coverage_path = os.path.dirname(os.path.dirname(__file__))
os.environ["COVERAGE_PROCESS_START"] = os.path.join(coverage_path, ".coveragerc")
os.environ["COVERAGE_FILE"] = os.path.join(coverage_path, ".coverage")
@pytest.fixture
def reset_sys_modules():
to_clear = [k for k in sys.modules if k.startswith("dallinger_experiment")]
for key in to_clear:
del sys.modules[key]
@pytest.fixture()
def clear_workers():
import subprocess
def _zap():
kills = [["pkill", "-f", "heroku"]]
for kill in kills:
try:
subprocess.check_call(kill)
except Exception as e:
if e.returncode != 1:
raise
_zap()
yield
_zap()
@pytest.fixture(scope="session")
def root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# This fixture is used automatically and ensures that
# the current working directory is reset if other test classes changed it.
@pytest.fixture(scope="class")
def cwd(root):
os.chdir(root)
@pytest.fixture(scope="class")
def experiment_dir(root):
os.chdir("tests/experiment")
yield
os.chdir(root)
@pytest.fixture(scope="class")
def bartlett_dir(root):
os.chdir("demos/dlgr/demos/bartlett1932")
yield
os.chdir(root)
@pytest.fixture(scope="class", autouse=True)
def reset_config():
yield
# Make sure dallinger_experiment module isn't kept between tests
import sys
to_delete = []
for module in sys.modules:
if module.startswith("dallinger_experiment"):
to_delete.append(module)
for module in to_delete:
del sys.modules[module]
# Make sure extra parameters aren't kept between tests
import dallinger.config
dallinger.config.config = None
@pytest.fixture(scope="session")
def env():
# Heroku requires a home directory to start up
# We create a fake one using tempfile and set it into the
# environment to handle sandboxes on CI servers
environ_orig = os.environ.copy()
running_on_ci = environ_orig.get("CI", False)
have_home_dir = environ_orig.get("HOME", False)
if not running_on_ci and have_home_dir:
yield environ_orig
else:
fake_home = tempfile.mkdtemp()
environ_patched = environ_orig.copy()
environ_patched.update({"HOME": fake_home})
os.environ = environ_patched
yield environ_patched
os.environ = environ_orig
shutil.rmtree(fake_home, ignore_errors=True)
@pytest.fixture()
def webapp(active_config):
from dallinger.experiment_server import sockets
app = sockets.app
app.root_path = os.getcwd() # look in the right place for test's templates
app.config.update({"DEBUG": True, "TESTING": True})
client = app.test_client()
yield client
@pytest.fixture
def test_request(webapp):
return webapp.application.test_request_context
@pytest.fixture
def a(db_session):
""" Provides a standard way of building model objects in tests.
def test_using_all_defaults(self, a):
assert a.info()
def test_with_participant_node(self, a):
participant = a.participant(worker_id=42)
info = a.info(origin=a.node(participant=participant))
"""
class ModelFactory(object):
def __init__(self, db):
self.db = db
def agent(self, **kw):
defaults = {"network": self.network}
defaults.update(kw)
return self._build(nodes.Agent, defaults)
def info(self, **kw):
defaults = {"origin": self.star, "contents": None}
defaults.update(kw)
return self._build(models.Info, defaults)
def gene(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(information.Gene, defaults)
def meme(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(information.Meme, defaults)
def participant(self, **kw):
defaults = {
"recruiter_id": "hotair",
"worker_id": "1",
"assignment_id": "1",
"hit_id": "1",
"mode": "test",
}
defaults.update(kw)
return self._build(models.Participant, defaults)
def network(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(models.Network, defaults)
def burst(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(networks.Burst, defaults)
def chain(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(networks.Chain, defaults)
def delayed_chain(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(networks.DelayedChain, defaults)
def empty(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(networks.Empty, defaults)
def fully_connected(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(networks.FullyConnected, defaults)
def replicator(self, **kw):
defaults = {"network": self.network}
defaults.update(kw)
return self._build(nodes.ReplicatorAgent, defaults)
def scale_free(self, **kw):
defaults = {"m0": 1, "m": 1}
defaults.update(kw)
return self._build(networks.ScaleFree, defaults)
def sequential_microsociety(self, **kw):
defaults = {"n": 1}
defaults.update(kw)
return self._build(networks.SequentialMicrosociety, defaults)
def split_sample(self, **kw):
defaults = {}
defaults.update(kw)
return self._build(networks.SplitSampleNetwork, defaults)
def star(self, **kw):
defaults = {"max_size": 2}
defaults.update(kw)
return self._build(networks.Star, defaults)
def node(self, **kw):
defaults = {"network": self.star}
defaults.update(kw)
return self._build(models.Node, defaults)
def source(self, **kw):
defaults = {"network": self.star}
defaults.update(kw)
# nodes.Source is intended to be abstract
return self._build(nodes.RandomBinaryStringSource, defaults)
def _build(self, klass, attrs):
# Some of our default values are factories:
for k, v in attrs.items():
if callable(v):
attrs[k] = v()
obj = klass(**attrs)
self._insert(obj)
return obj
def _insert(self, thing):
db_session.add(thing)
db_session.flush() # This gets us an ID and sets relationships
return ModelFactory(db_session)
@pytest.fixture
def stub_config():
"""Builds a standardized Configuration object and returns it, but does
not load it as the active configuration returned by
dallinger.config.get_config()
"""
defaults = {
u"ad_group": u"Test ad group",
u"approve_requirement": 95,
u"assign_qualifications": True,
u"auto_recruit": True,
u"aws_access_key_id": u"fake aws key",
u"aws_secret_access_key": u"fake aws secret",
u"aws_region": u"us-east-1",
u"base_payment": 0.01,
u"base_port": 5000,
u"browser_exclude_rule": u"MSIE, mobile, tablet",
u"clock_on": False,
u"contact_email_on_error": u"error_contact@test.com",
u"dallinger_email_address": u"test@example.com",
u"database_size": u"standard-0",
u"redis_size": u"premium-0",
u"database_url": u"postgresql://postgres@localhost/dallinger",
u"description": u"fake HIT description",
u"duration": 1.0,
u"dyno_type": u"free",
u"heroku_auth_token": u"heroku secret",
u"heroku_team": u"",
u"host": u"0.0.0.0",
u"id": u"some experiment uid",
u"keywords": u"kw1, kw2, kw3",
u"lifetime": 1,
u"logfile": u"-",
u"loglevel": 0,
u"mode": u"debug",
u"notification_url": u"https://url-of-notification-route",
u"num_dynos_web": 1,
u"num_dynos_worker": 1,
u"organization_name": u"Monsters University",
u"sentry": True,
u"smtp_host": u"smtp.fakehost.com:587",
u"smtp_username": u"fake email username",
u"smtp_password": u"fake email password",
u"threads": u"1",
u"title": u"fake experiment title",
u"us_only": True,
u"webdriver_type": u"phantomjs",
u"whimsical": True,
u"replay": False,
u"worker_multiplier": 1.5,
}
from dallinger.config import default_keys
from dallinger.config import Configuration
config = Configuration()
for key in default_keys:
config.register(*key)
config.extend(defaults.copy())
# Patch load() so we don't update any key/value pairs from actual files:
config.load = mock.Mock(side_effect=lambda: setattr(config, "ready", True))
config.ready = True
return config
@pytest.fixture
def active_config(stub_config):
"""Loads the standard config as the active configuration returned by
dallinger.config.get_config() and returns it.
"""
from dallinger import config as c
c.config = stub_config
return c.config
@pytest.fixture
def tempdir():
cwd = os.getcwd()
tmp = tempfile.mkdtemp()
os.chdir(tmp)
yield tmp
os.chdir(cwd)
shutil.rmtree(tmp, ignore_errors=True)
@pytest.fixture
def in_tempdir(tempdir):
cwd = os.getcwd()
os.chdir(tempdir)
yield tempdir
os.chdir(cwd)
@pytest.fixture(scope="class")
def aws_creds():
from dallinger.config import get_config
config = get_config()
if not config.ready:
config.load()
creds = {
"aws_access_key_id": config.get("aws_access_key_id"),
"aws_secret_access_key": config.get("aws_secret_access_key"),
}
return creds
@pytest.fixture
def db_session():
import dallinger.db
# The drop_all call can hang without this; see:
# https://stackoverflow.com/questions/13882407/sqlalchemy-blocked-on-dropping-tables
dallinger.db.session.close()
session = dallinger.db.init_db(drop_all=True)
yield session
session.rollback()
session.close()
@pytest.fixture
def custom_app_output():
with mock.patch("dallinger.heroku.tools.check_output") as check_output:
def my_check_output(cmd):
if "auth:whoami" in cmd:
return b"test@example.com"
elif "config:get" in cmd:
if "CREATOR" in cmd and "dlgr-my-uid" in cmd:
return b"test@example.com"
elif "DALLINGER_UID" in cmd:
return cmd[-1].replace("dlgr-", "")
return b""
elif "apps" in cmd:
return b"""[
{"name": "dlgr-my-uid",
"created_at": "2018-01-01T12:00Z",
"web_url": "https://dlgr-my-uid.herokuapp.com"},
{"name": "dlgr-another-uid",
"created_at": "2018-01-02T00:00Z",
"web_url": "https://dlgr-another-uid.herokuapp.com"}
]"""
check_output.side_effect = my_check_output
yield check_output
def pytest_addoption(parser):
parser.addoption("--firefox", action="store_true", help="Run firefox bot tests")
parser.addoption("--chrome", action="store_true", help="Run chrome bot tests")
parser.addoption("--phantomjs", action="store_true", help="Run phantomjs bot tests")
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
parser.addoption(
"--webdriver",
nargs="?",
action="store",
help="URL of selenium server including /wd/hub to run remote tests against",
metavar="URL",
)
parser.addoption(
"--runbot",
action="store_true",
help="Run an experiment using a bot during tests",
)
parser.addoption(
"--manual",
action="store_true",
help="Run manual interactive tests during test run",
)
parser.addoption(
"--mturkfull",
action="store_true",
help="Run comprehensive MTurk integration tests during test run",
)
parser.addoption(
"--heroku", action="store_true", help="Run tests requiring heroku login"
)
parser.addoption(
"--griduniverse",
action="store_true",
help="Run griduinverse tests and fail if not all pass",
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
| 29.47807
| 88
| 0.610549
|
4a0115d19fb03054d244e10d15438f513856654f
| 129
|
py
|
Python
|
contractpy/__init__.py
|
KumarManoj-S/pycontract
|
5682741af0f98ac40819bd7c6ff7b48d0e9abf6b
|
[
"MIT"
] | 8
|
2020-03-23T12:50:08.000Z
|
2021-04-05T02:27:34.000Z
|
contractpy/__init__.py
|
KumarManoj-S/pycontract
|
5682741af0f98ac40819bd7c6ff7b48d0e9abf6b
|
[
"MIT"
] | 1
|
2020-07-26T16:20:37.000Z
|
2020-07-27T06:19:33.000Z
|
contractpy/__init__.py
|
KumarManoj-S/pycontract
|
5682741af0f98ac40819bd7c6ff7b48d0e9abf6b
|
[
"MIT"
] | 2
|
2020-03-24T15:39:20.000Z
|
2020-07-26T16:49:34.000Z
|
from contractpy.main.contract import Contract
from contractpy.main.types import Types
__all__ = [
'Contract',
'Types'
]
| 16.125
| 45
| 0.728682
|
4a01166282e1786694d14980e2c4ee5a623349e3
| 224
|
py
|
Python
|
Inicio/001.py
|
daisyaragao/Python
|
9f31a6e8b423747d1c7392e4e8f481866987dbca
|
[
"MIT"
] | null | null | null |
Inicio/001.py
|
daisyaragao/Python
|
9f31a6e8b423747d1c7392e4e8f481866987dbca
|
[
"MIT"
] | null | null | null |
Inicio/001.py
|
daisyaragao/Python
|
9f31a6e8b423747d1c7392e4e8f481866987dbca
|
[
"MIT"
] | null | null | null |
# Faça um programa que leia um número inteiro e mostre na tela o seu antecessor e o seu sucessor
n = int(input('Digite um número: '))
print('O seu antecessor é: {} '.format(n-1))
print('O seu sucessor é: {} '.format(n+1))
| 32
| 96
| 0.683036
|
4a011718872fdc3c805b4edc118f9b4d0d5a816d
| 8,633
|
py
|
Python
|
jina/parsers/__init__.py
|
qwe123coder/jina
|
43215cd044c47d6b02111cd249499737d5d0a4e3
|
[
"Apache-2.0"
] | 1
|
2021-09-16T03:54:34.000Z
|
2021-09-16T03:54:34.000Z
|
jina/parsers/__init__.py
|
AnudeepGunukula/jina
|
a4183a3b57d194c6f4b92ec48b485ebedd37b88d
|
[
"Apache-2.0"
] | null | null | null |
jina/parsers/__init__.py
|
AnudeepGunukula/jina
|
a4183a3b57d194c6f4b92ec48b485ebedd37b88d
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from jina.parsers.client import mixin_comm_protocol_parser
from .helper import _SHOW_ALL_ARGS
from .peapods.pod import mixin_k8s_pod_parser
def set_pea_parser(parser=None):
"""Set the parser for the Pea
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
from .base import set_base_parser
parser = set_base_parser()
from .peapods.base import mixin_base_ppr_parser
from .peapods.runtimes.zmq import mixin_zmq_runtime_parser
from .peapods.runtimes.zed import mixin_zed_runtime_parser
from .peapods.runtimes.container import mixin_container_runtime_parser
from .peapods.runtimes.remote import mixin_remote_runtime_parser
from .peapods.pea import mixin_pea_parser
from .peapods.runtimes.distributed import mixin_distributed_feature_parser
mixin_base_ppr_parser(parser)
mixin_zmq_runtime_parser(parser)
mixin_zed_runtime_parser(parser)
mixin_container_runtime_parser(parser)
mixin_remote_runtime_parser(parser)
mixin_distributed_feature_parser(parser)
mixin_pea_parser(parser)
return parser
def set_pod_parser(parser=None):
"""Set the parser for the Pod
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
from .base import set_base_parser
parser = set_base_parser()
set_pea_parser(parser)
from .peapods.pod import mixin_base_pod_parser
mixin_base_pod_parser(parser)
mixin_k8s_pod_parser(parser)
return parser
def set_gateway_parser(parser=None):
"""Set the parser for the gateway arguments
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
from .base import set_base_parser
parser = set_base_parser()
from .peapods.base import mixin_base_ppr_parser
from .peapods.runtimes.zmq import mixin_zmq_runtime_parser
from .peapods.runtimes.zed import mixin_zed_runtime_parser
from .peapods.runtimes.remote import (
mixin_gateway_parser,
mixin_prefetch_parser,
mixin_http_gateway_parser,
mixin_compressor_parser,
)
from .peapods.pea import mixin_pea_parser
mixin_base_ppr_parser(parser)
mixin_zmq_runtime_parser(parser)
mixin_zed_runtime_parser(parser)
mixin_prefetch_parser(parser)
mixin_http_gateway_parser(parser)
mixin_compressor_parser(parser)
mixin_comm_protocol_parser(parser)
mixin_gateway_parser(parser)
mixin_pea_parser(parser)
from ..enums import SocketType, PodRoleType
parser.set_defaults(
name='gateway',
socket_in=SocketType.PULL_CONNECT, # otherwise there can be only one client at a time
socket_out=SocketType.PUSH_CONNECT,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
runtime_cls='GRPCRuntime',
pod_role=PodRoleType.GATEWAY,
)
parser.add_argument(
'--dynamic-routing',
action='store_true',
default=True,
help='The Pod will setup the socket types of the HeadPea and TailPea depending on this argument.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
parser.add_argument(
'--connect-to-predecessor',
action='store_true',
default=False,
help='The head Pea of this Pod will connect to the TailPea of the predecessor Pod.',
)
return parser
def set_client_cli_parser(parser=None):
"""Set the parser for the cli client
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
from .base import set_base_parser
parser = set_base_parser()
from .peapods.runtimes.remote import mixin_client_gateway_parser
from .client import mixin_client_features_parser, mixin_comm_protocol_parser
mixin_client_gateway_parser(parser)
mixin_client_features_parser(parser)
mixin_comm_protocol_parser(parser)
return parser
def set_help_parser(parser=None):
"""Set the parser for the jina help lookup
:param parser: an optional existing parser to build upon
:return: the parser
"""
if not parser:
from .base import set_base_parser
parser = set_base_parser()
parser.add_argument(
'query',
type=str,
help='Lookup the usage & mention of the argument name in Jina API. The name can be fuzzy',
)
return parser
def get_main_parser():
"""The main parser for Jina
:return: the parser
"""
from .base import set_base_parser
from .helloworld import set_hello_parser
from .helper import _chf, _SHOW_ALL_ARGS
from .export_api import set_export_api_parser
from .flow import set_flow_parser
from .ping import set_ping_parser
from .hubble import set_hub_parser
# from .optimizer import set_optimizer_parser
# create the top-level parser
parser = set_base_parser()
sp = parser.add_subparsers(
dest='cli',
description='''
Use `%(prog)-8s [sub-command] --help` to get detailed information about each sub-command.
To show all commands, run `JINA_FULL_CLI=1 jina --help`.
''',
required=True,
)
set_hello_parser(
sp.add_parser(
'hello',
help='👋 Hello Jina!',
description='Start hello world demos.',
formatter_class=_chf,
)
)
set_pea_parser(
sp.add_parser(
'executor',
help='Start an Executor',
description='Start an Executor. Executor is how Jina processes Document.',
formatter_class=_chf,
)
)
set_flow_parser(
sp.add_parser(
'flow',
description='Start a Flow. Flow is how Jina streamlines and distributes Executors.',
help='Start a Flow',
formatter_class=_chf,
)
)
set_ping_parser(
sp.add_parser(
'ping',
help='Ping an Executor',
description='Ping a Pod and check its network connectivity.',
formatter_class=_chf,
)
)
set_gateway_parser(
sp.add_parser(
'gateway',
description='Start a Gateway that receives client Requests via gRPC/REST interface',
**(dict(help='Start a Gateway')) if _SHOW_ALL_ARGS else {},
formatter_class=_chf,
)
)
set_hub_parser(
sp.add_parser(
'hub',
help='Push/pull an Executor to/from Jina Hub',
description='Push/Pull an Executor to/from Jina Hub',
formatter_class=_chf,
)
)
set_help_parser(
sp.add_parser(
'help',
help='Show help text of a CLI argument',
description='Show help text of a CLI argument',
formatter_class=_chf,
)
)
# Below are low-level / internal / experimental CLIs, hidden from users by default
set_pea_parser(
sp.add_parser(
'pea',
description='Start a Pea. '
'You should rarely use this directly unless you '
'are doing low-level orchestration',
formatter_class=_chf,
**(dict(help='Start a Pea')) if _SHOW_ALL_ARGS else {},
)
)
set_pod_parser(
sp.add_parser(
'pod',
description='Start a Pod. '
'You should rarely use this directly unless you '
'are doing low-level orchestration',
formatter_class=_chf,
**(dict(help='Start a Pod')) if _SHOW_ALL_ARGS else {},
)
)
set_pod_parser(
sp.add_parser(
'pod',
description='Start a Pod. '
'You should rarely use this directly unless you '
'are doing low-level orchestration',
formatter_class=_chf,
**(dict(help='Start a Pod')) if _SHOW_ALL_ARGS else {},
)
)
set_client_cli_parser(
sp.add_parser(
'client',
description='Start a Python client that connects to a remote Jina gateway',
formatter_class=_chf,
**(dict(help='Start a Client')) if _SHOW_ALL_ARGS else {},
)
)
set_export_api_parser(
sp.add_parser(
'export-api',
description='Export Jina API to JSON/YAML file for 3rd party applications',
formatter_class=_chf,
**(dict(help='Export Jina API to file')) if _SHOW_ALL_ARGS else {},
)
)
return parser
| 28.029221
| 105
| 0.645778
|
4a0117c1de86897104eeb0f1bfebed27df899a5c
| 14,434
|
py
|
Python
|
src/werkzeug/middleware/lint.py
|
omerholz/werkzeug
|
5f3994f155055ed8b4231df8ac41dd38849d90f8
|
[
"BSD-3-Clause"
] | null | null | null |
src/werkzeug/middleware/lint.py
|
omerholz/werkzeug
|
5f3994f155055ed8b4231df8ac41dd38849d90f8
|
[
"BSD-3-Clause"
] | null | null | null |
src/werkzeug/middleware/lint.py
|
omerholz/werkzeug
|
5f3994f155055ed8b4231df8ac41dd38849d90f8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
WSGI Protocol Linter
====================
This module provides a middleware that performs sanity checks on the
behavior of the WSGI server and application. It checks that the
:pep:`3333` WSGI spec is properly implemented. It also warns on some
common HTTP errors such as non-empty responses for 304 status codes.
.. autoclass:: LintMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import typing as t
from types import TracebackType
from urllib.parse import urlparse
from warnings import warn
from ..datastructures import Headers
from ..http import is_entity_header
from ..wsgi import FileWrapper
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse, WSGIApplication, WSGIEnvironment
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_type(context: str, obj: object, need: t.Type = str) -> None:
if type(obj) is not need:
warn(
f"{context!r} requires {need.__name__!r}, got {type(obj).__name__!r}.",
WSGIWarning,
stacklevel=3,
)
class InputStream:
def __init__(self, stream: t.IO[bytes]) -> None:
self._stream = stream
def read(self, *args: t.Any) -> bytes:
if len(args) == 0:
warn(
"WSGI does not guarantee an EOF marker on the input stream, thus making"
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
" return from this call.",
WSGIWarning,
stacklevel=2,
)
elif len(args) != 1:
warn(
"Too many parameters passed to 'wsgi.input.read()'.",
WSGIWarning,
stacklevel=2,
)
return self._stream.read(*args)
def readline(self, *args: t.Any) -> bytes:
if len(args) == 0:
warn(
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
" 'wsgi.input.read()' instead.",
WSGIWarning,
stacklevel=2,
)
elif len(args) == 1:
warn(
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
" support this, although it's available on all major servers.",
WSGIWarning,
stacklevel=2,
)
else:
raise TypeError(
"Too many arguments passed to 'wsgi.input.readline()'.")
return self._stream.readline(*args)
def __iter__(self) -> t.Iterator[bytes]:
try:
return iter(self._stream)
except TypeError:
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
return iter(())
def close(self) -> None:
warn("The application closed the input stream!",
WSGIWarning,
stacklevel=2)
self._stream.close()
class ErrorStream:
def __init__(self, stream: t.IO[str]) -> None:
self._stream = stream
def write(self, s: str) -> None:
check_type("wsgi.error.write()", s, str)
self._stream.write(s)
def flush(self) -> None:
self._stream.flush()
def writelines(self, seq: t.Iterable[str]) -> None:
for line in seq:
self.write(line)
def close(self) -> None:
warn("The application closed the error stream!",
WSGIWarning,
stacklevel=2)
self._stream.close()
class GuardedWrite:
def __init__(self, write: t.Callable[[bytes], None],
chunks: t.List[int]) -> None:
self._write = write
self._chunks = chunks
def __call__(self, s: bytes) -> None:
check_type("write()", s, bytes)
self._write(s)
self._chunks.append(len(s))
class GuardedIterator:
def __init__(
self,
iterator: t.Iterable[bytes],
headers_set: t.Tuple[int, Headers],
chunks: t.List[int],
) -> None:
self._iterator = iterator
self._next = iter(iterator).__next__
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self) -> "GuardedIterator":
return self
def __next__(self) -> bytes:
if self.closed:
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(
"The application returned before it started the response.",
WSGIWarning,
stacklevel=2,
)
check_type("application iterator items", rv, bytes)
self.chunks.append(len(rv))
return rv
def close(self) -> None:
self.closed = True
if hasattr(self._iterator, "close"):
self._iterator.close() # type: ignore
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get("content-length", type=int)
if status_code == 304:
for key, _value in headers:
key = key.lower()
if key not in ("expires", "content-location"
) and is_entity_header(key):
warn(f"Entity header {key!r} found in 304 response.",
HTTPWarning)
if bytes_sent:
warn("304 responses must not have a body.", HTTPWarning)
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(
f"{status_code} responses must have an empty content length.",
HTTPWarning,
)
if bytes_sent:
warn(f"{status_code} responses must not have a body.",
HTTPWarning)
elif content_length is not None and content_length != bytes_sent:
warn(
"Content-Length and the number of bytes sent to the"
" client do not match.",
WSGIWarning,
)
def __del__(self) -> None:
if not self.closed:
try:
warn("Iterator was garbage collected before it was closed.",
WSGIWarning)
except Exception:
pass
class LintMiddleware:
"""Warns about common errors in the WSGI and HTTP behavior of the
server and wrapped application. Some of the issues it checks are:
- invalid status codes
- non-bytes sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Error information is emitted using the :mod:`warnings` module.
:param app: The WSGI application to wrap.
.. code-block:: python
from werkzeug.middleware.lint import LintMiddleware
app = LintMiddleware(app)
"""
def __init__(self, app: "WSGIApplication") -> None:
self.app = app
def check_environ(self, environ: "WSGIEnvironment") -> None:
if type(environ) is not dict:
warn(
"WSGI environment is not a standard Python dict.",
WSGIWarning,
stacklevel=4,
)
for key in (
"REQUEST_METHOD",
"SERVER_NAME",
"SERVER_PORT",
"wsgi.version",
"wsgi.input",
"wsgi.errors",
"wsgi.multithread",
"wsgi.multiprocess",
"wsgi.run_once",
):
if key not in environ:
warn(
f"Required environment key {key!r} not found",
WSGIWarning,
stacklevel=3,
)
if environ["wsgi.version"] != (1, 0):
warn("Environ is not a WSGI 1.0 environ.",
WSGIWarning,
stacklevel=3)
script_name = environ.get("SCRIPT_NAME", "")
path_info = environ.get("PATH_INFO", "")
if script_name and script_name[0] != "/":
warn(
f"'SCRIPT_NAME' does not start with a slash: {script_name!r}",
WSGIWarning,
stacklevel=3,
)
if path_info and path_info[0] != "/":
warn(
f"'PATH_INFO' does not start with a slash: {path_info!r}",
WSGIWarning,
stacklevel=3,
)
def check_start_response(
self,
status: str,
headers: t.List[t.Tuple[str, str]],
exc_info: t.Optional[t.Tuple[t.Type[BaseException], BaseException,
TracebackType]],
) -> t.Tuple[int, Headers]:
check_type("status", status, str)
status_code_str = status.split(None, 1)[0]
if len(status_code_str) != 3 or not status_code_str.isdigit():
warn("Status code must be three digits.",
WSGIWarning,
stacklevel=3)
if len(status) < 4 or status[3] != " ":
warn(
f"Invalid value for status {status!r}. Valid status strings are three"
" digits, a space and a status explanation.",
WSGIWarning,
stacklevel=3,
)
status_code = int(status_code_str)
if status_code < 100:
warn("Status code < 100 detected.", WSGIWarning, stacklevel=3)
if type(headers) is not list:
warn("Header list is not a list.", WSGIWarning, stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn("Header items must be 2-item tuples.",
WSGIWarning,
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn("Header keys and values must be strings.",
WSGIWarning,
stacklevel=3)
if name.lower() == "status":
warn(
"The status header is not supported due to"
" conflicts with the CGI spec.",
WSGIWarning,
stacklevel=3,
)
if exc_info is not None and not isinstance(exc_info, tuple):
warn("Invalid value for exc_info.", WSGIWarning, stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers: Headers) -> None:
etag = headers.get("etag")
if etag is not None:
if etag.startswith(("W/", "w/")):
if etag.startswith("w/"):
warn(
"Weak etag indicator should be upper case.",
HTTPWarning,
stacklevel=4,
)
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn("Unquoted etag emitted.", HTTPWarning, stacklevel=4)
location = headers.get("location")
if location is not None:
if not urlparse(location).netloc:
warn(
"Absolute URLs required for location header.",
HTTPWarning,
stacklevel=4,
)
def check_iterator(self, app_iter: t.Iterable[bytes]) -> None:
if isinstance(app_iter, bytes):
warn(
"The application returned a bytestring. The response will send one"
" character at a time to the client, which will kill performance."
" Return a list or iterable instead.",
WSGIWarning,
stacklevel=3,
)
def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Iterable[bytes]:
if len(args) != 2:
warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
if kwargs:
warn("A WSGI app does not take keyword arguments.",
WSGIWarning,
stacklevel=2)
environ: "WSGIEnvironment" = args[0]
start_response: "StartResponse" = args[1]
self.check_environ(environ)
environ["wsgi.input"] = InputStream(environ["wsgi.input"])
environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
# Hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length.
environ["wsgi.file_wrapper"] = FileWrapper
headers_set: t.List[t.Any] = []
chunks: t.List[int] = []
def checking_start_response(
*args: t.Any, **kwargs: t.Any) -> t.Callable[[bytes], None]:
if len(args) not in {2, 3}:
warn(
f"Invalid number of arguments: {len(args)}, expected 2 or 3.",
WSGIWarning,
stacklevel=2,
)
if kwargs:
warn("'start_response' does not take keyword arguments.",
WSGIWarning)
status: str = args[0]
headers: t.List[t.Tuple[str, str]] = args[1]
exc_info: t.Optional[t.Tuple[t.Type[BaseException], BaseException,
TracebackType]] = (args[2]
if len(args) == 3
else None)
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ,
t.cast("StartResponse", checking_start_response))
self.check_iterator(app_iter)
return GuardedIterator(app_iter,
t.cast(t.Tuple[int, Headers], headers_set),
chunks)
| 33.412037
| 88
| 0.522793
|
4a011810b1aa15948bd07ab362906562c0540151
| 662
|
py
|
Python
|
appfl/protos/utils.py
|
markxiao/APPFL
|
2940f01695b84d8239368e5d1fc3133c7f7a05ae
|
[
"MIT"
] | null | null | null |
appfl/protos/utils.py
|
markxiao/APPFL
|
2940f01695b84d8239368e5d1fc3133c7f7a05ae
|
[
"MIT"
] | null | null | null |
appfl/protos/utils.py
|
markxiao/APPFL
|
2940f01695b84d8239368e5d1fc3133c7f7a05ae
|
[
"MIT"
] | null | null | null |
import numpy as np
from .federated_learning_pb2 import DataBuffer
from .federated_learning_pb2 import TensorRecord
def construct_tensor_record(name, nparray):
return TensorRecord(name=name, data_shape=list(nparray.shape), data_bytes=nparray.tobytes(order='C'))
def proto_to_databuffer(proto, max_size=(2*1024*1024)):
data_bytes = proto.SerializeToString()
data_bytes_size = len(data_bytes)
message_size = data_bytes_size if max_size > data_bytes_size else max_size
for i in range(0,data_bytes_size,message_size):
chunk = data_bytes[i:i+message_size]
msg = DataBuffer(size=message_size, data_bytes=chunk)
yield msg
| 36.777778
| 105
| 0.767372
|
4a011869967d951f9e254f91f4091a654b76ad82
| 3,225
|
py
|
Python
|
proxy_osint_api/swagger_server/models/query_object.py
|
CoNik/EuROCE
|
12435d47fe2dfcf7834b8fdd5f4f37a4df16037d
|
[
"Apache-2.0"
] | null | null | null |
proxy_osint_api/swagger_server/models/query_object.py
|
CoNik/EuROCE
|
12435d47fe2dfcf7834b8fdd5f4f37a4df16037d
|
[
"Apache-2.0"
] | 1
|
2020-07-20T11:07:50.000Z
|
2020-07-20T11:07:50.000Z
|
proxy_osint_api/swagger_server/models/query_object.py
|
CoNik/EuROCE
|
12435d47fe2dfcf7834b8fdd5f4f37a4df16037d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class QueryObject(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, ipv4: str=None, ipv6: str=None, fqdn: str=None, url: str=None): # noqa: E501
"""QueryObject - a model defined in Swagger
:param ipv4: The ipv4 of this QueryObject. # noqa: E501
:type ipv4: str
:param ipv6: The ipv6 of this QueryObject. # noqa: E501
:type ipv6: str
:param fqdn: The fqdn of this QueryObject. # noqa: E501
:type fqdn: str
:param url: The url of this QueryObject. # noqa: E501
:type url: str
"""
self.swagger_types = {
'ipv4': str,
'ipv6': str,
'fqdn': str,
'url': str
}
self.attribute_map = {
'ipv4': 'ipv4',
'ipv6': 'ipv6',
'fqdn': 'fqdn',
'url': 'URL'
}
self._ipv4 = ipv4
self._ipv6 = ipv6
self._fqdn = fqdn
self._url = url
@classmethod
def from_dict(cls, dikt) -> 'QueryObject':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The QueryObject of this QueryObject. # noqa: E501
:rtype: QueryObject
"""
return util.deserialize_model(dikt, cls)
@property
def ipv4(self) -> str:
"""Gets the ipv4 of this QueryObject.
:return: The ipv4 of this QueryObject.
:rtype: str
"""
return self._ipv4
@ipv4.setter
def ipv4(self, ipv4: str):
"""Sets the ipv4 of this QueryObject.
:param ipv4: The ipv4 of this QueryObject.
:type ipv4: str
"""
self._ipv4 = ipv4
@property
def ipv6(self) -> str:
"""Gets the ipv6 of this QueryObject.
:return: The ipv6 of this QueryObject.
:rtype: str
"""
return self._ipv6
@ipv6.setter
def ipv6(self, ipv6: str):
"""Sets the ipv6 of this QueryObject.
:param ipv6: The ipv6 of this QueryObject.
:type ipv6: str
"""
self._ipv6 = ipv6
@property
def fqdn(self) -> str:
"""Gets the fqdn of this QueryObject.
:return: The fqdn of this QueryObject.
:rtype: str
"""
return self._fqdn
@fqdn.setter
def fqdn(self, fqdn: str):
"""Sets the fqdn of this QueryObject.
:param fqdn: The fqdn of this QueryObject.
:type fqdn: str
"""
self._fqdn = fqdn
@property
def url(self) -> str:
"""Gets the url of this QueryObject.
:return: The url of this QueryObject.
:rtype: str
"""
return self._url
@url.setter
def url(self, url: str):
"""Sets the url of this QueryObject.
:param url: The url of this QueryObject.
:type url: str
"""
self._url = url
| 22.552448
| 100
| 0.549147
|
4a0119474d44ced4f83467f7f80b3ac44cc770be
| 624
|
py
|
Python
|
backend/api/migrations/0003_liftermodel_competition.py
|
ChristchurchCityWeightlifting/lifter-api
|
a82b79c75106e7f4f8ea4b4e3e12d727213445e3
|
[
"MIT"
] | null | null | null |
backend/api/migrations/0003_liftermodel_competition.py
|
ChristchurchCityWeightlifting/lifter-api
|
a82b79c75106e7f4f8ea4b4e3e12d727213445e3
|
[
"MIT"
] | 5
|
2022-03-07T08:30:47.000Z
|
2022-03-22T09:15:52.000Z
|
backend/api/migrations/0003_liftermodel_competition.py
|
ChristchurchCityWeightlifting/lifter-api
|
a82b79c75106e7f4f8ea4b4e3e12d727213445e3
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-03-05 05:47
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0002_rename_event_name_competitionmodel_competition_name"),
]
operations = [
migrations.AddField(
model_name="liftermodel",
name="competition",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.CASCADE,
to="api.competitionmodel",
),
preserve_default=False,
),
]
| 24.96
| 76
| 0.597756
|
4a0119fcd2d4d6ec3d49e57930f2c78ff5cfa341
| 5,417
|
py
|
Python
|
src/data.py
|
mneedham/article-1
|
5523647c74fc6bb27a28e359dcf5ff3d9252a72f
|
[
"MIT"
] | 1
|
2021-12-10T18:34:40.000Z
|
2021-12-10T18:34:40.000Z
|
src/data.py
|
mneedham/article-1
|
5523647c74fc6bb27a28e359dcf5ff3d9252a72f
|
[
"MIT"
] | null | null | null |
src/data.py
|
mneedham/article-1
|
5523647c74fc6bb27a28e359dcf5ff3d9252a72f
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import os.path
import random
import json
from neo4j.v1 import GraphDatabase, Driver
# After fighting with gcloud, I dumped this here. Ideally, package elsewhere
settings = {
"hosted": {
"neo4j_url": "bolt://b2355e7e.databases.neo4j.io",
"neo4j_user": "readonly",
"neo4j_password": "OS5jLkVsOUZCVTdQOU5PazBo"
},
"local": {
"neo4j_url": "bolt://localhost",
"neo4j_user": "neo4j",
"neo4j_password": "neo4j"
}
}
# Reduce code duplication throughout these methods
nouns = ["person", "product"]
class GraphData(object):
def _uuid_to_index(self, uuid, db):
if uuid not in db:
db[uuid] = len(db)
return db[uuid]
def _get_index(self, row, noun):
return self._uuid_to_index(row[noun+"_id"], self.ids[noun])
def __init__(self, args, person_ids, product_ids, test=False):
self.args = args
self.query = """
MATCH p=
(person:PERSON)
-[:WROTE]->
(review:REVIEW {dataset_name:{dataset_name}, test:{test}})
-[:OF]->
(product:PRODUCT)
RETURN
person.id as person_id,
product.id as product_id,
person.style_preference as person_style,
product.style as product_style,
review.score as review_score
"""
self.query_params = {
"dataset_name": "article_1",
"test": test
}
self.settings = settings[args.database]
driver = GraphDatabase.driver(
self.settings["neo4j_url"],
auth=(self.settings["neo4j_user"], self.settings["neo4j_password"]))
self.ids = {
"person": person_ids,
"product": product_ids
}
def data_to_vec(i):
return ({
"person": {
"id": self._get_index(i, "person"),
"style": i["person_style"],
},
"product": {
"id": self._get_index(i, "product"),
"style": i["product_style"],
},
"review_score": i["review_score"],
},
i["review_score"]
)
with driver.session() as session:
self.raw_data = session.run(self.query, **self.query_params).data()
data = [ data_to_vec(i) for i in self.raw_data ]
# Remove any ordering biases from the database
random.seed(123)
random.shuffle(data)
# Index the rows by person_id and _product_id
self.indexed_data = {}
for noun in nouns:
self.indexed_data[noun] = {
self._uuid_to_index(k, self.ids[noun]): [
data_to_vec(i) for i in self.raw_data if i[noun+"_id"] == k
]
for k in self.ids[noun]
}
self.data = data
# --------------------------------------------------------------------------
# Input functions
# --------------------------------------------------------------------------
def gen_walk(self, batch_size):
"""Generate random walks across our graph."""
def next_noun(prev):
found_prev = False
for noun in nouns:
if noun == prev:
found_prev = True
elif found_prev:
return noun
# Loop back to start
return nouns[0]
for noun in nouns:
for obj_id in self.indexed_data[noun].keys():
rows = self.indexed_data[noun][obj_id]
if len(rows) > 0:
batch = []
batch.append(random.choice(rows))
noun_to_join = next_noun(noun)
while len(batch) < batch_size:
next_id = batch[-1][0][noun_to_join]["id"]
next_rows = self.indexed_data[noun_to_join].get(next_id, [])
if len(next_rows) > 0:
batch.append(random.choice(next_rows))
noun_to_join = next_noun(noun_to_join)
else:
break
# If we somehow indexed into a dead end above (highly unlikely) then
# pad the rest of the batch with random rows
while len(batch) < batch_size:
batch.append(random.choice(self.data))
for b in batch:
yield b
def gen_dataset_walk(self, batch_size):
return tf.data.Dataset.from_generator(
lambda: self.gen_walk(batch_size),
self.dataset_dtype,
self.dataset_size
).batch(batch_size)
def gen_dataset_rand(self, batch_size):
return tf.data.Dataset.from_generator(
lambda: (i for i in self.data),
self.dataset_dtype,
self.dataset_size
)
.shuffle(len(self))
.batch(batch_size)
# This is a little syntactic sugar so the caller can pass input_fn directly into Estimator.train()
@property
def input_fn(self):
if self.args.use_random_walk:
return self.input_fn_walk
else:
return self.input_fn_rand
@property
def input_fn_rand(self):
return lambda: self.gen_dataset_rand(self.args.batch_size)
@property
def input_fn_walk(self):
return lambda: self.gen_dataset_walk(self.args.batch_size)
@property
def dataset_dtype(self):
return (
{
"person": {
"id": tf.int32,
"style": tf.float32
},
"product": {
"id": tf.int32,
"style": tf.float32
},
"review_score": tf.float32
},
tf.float32
)
@property
def dataset_size(self):
return (
{
"person": {
"id": tf.TensorShape([]),
"style": tf.TensorShape([6]),
},
"product": {
"id": tf.TensorShape([]),
"style": tf.TensorShape([6]),
},
"review_score": tf.TensorShape([])
},
tf.TensorShape([])
)
# --------------------------------------------------------------------------
# Utilities
# --------------------------------------------------------------------------
@property
def n_person(self):
return len(self.ids["person"])
@property
def n_product(self):
return len(self.ids["product"])
def __len__(self):
return len(self.data)
| 21.842742
| 99
| 0.606609
|
4a011a3d080ab0aa177d2fcfa0c65dd3c8384d4a
| 346
|
py
|
Python
|
remo/api/__init__.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
remo/api/__init__.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
remo/api/__init__.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
from tastypie.cache import NoCache
class HttpCache(NoCache):
"""Cache class that sets cache-control to response headers."""
def __init__(self, control={'no_cache': True}, *args, **kwargs):
super(HttpCache, self).__init__(*args, **kwargs)
self.control = control
def cache_control(self):
return self.control
| 26.615385
| 68
| 0.67341
|
4a011a41e3a864f9ed590e7f04c3ecd0d603541e
| 2,074
|
py
|
Python
|
commands/config.py
|
Robertoskb/Betozinho
|
5b8e2413c66452bb50d358178f77b3bb17726079
|
[
"MIT"
] | null | null | null |
commands/config.py
|
Robertoskb/Betozinho
|
5b8e2413c66452bb50d358178f77b3bb17726079
|
[
"MIT"
] | null | null | null |
commands/config.py
|
Robertoskb/Betozinho
|
5b8e2413c66452bb50d358178f77b3bb17726079
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from commands.utils.serversettings import ServerSettings
class Config(commands.Cog):
"""Configurações"""
def __init__(self, bot):
self.bot = bot
@commands.guild_only()
@commands.command(name='talks', help='Ativar ou Desativar minhas respostas', description='on/off')
async def talks(self, ctx, on_off: str = ''):
on_off = on_off.lower()
if not on_off in ['on', 'off']:
response = 'Digite on ou off'
else:
self.update(ctx, f'talks = {dict(on=1, off=0)[on_off]}')
response = f'As respostas foram {dict(on="**ativadas**", off="**desativadas**")[on_off]}'
await ctx.reply(response, mention_author=False)
@commands.guild_only()
@commands.command(name='biblev', help='Mudar linguagem da bíblia', description='sigla da linguagem')
async def biblev(self, ctx, version: str = ''):
versions = ['NVI', 'RA', 'ACF', 'KJV', 'BBE', 'RVR', 'APEE']
if not version.upper() in versions:
response = f'Digite uma versão entre {", ".join(versions)}'
else:
self.update(ctx, f'biblelang = "{version.lower()}"')
response = f'Versão redefinida para {version.upper()}'
await ctx.reply(response, mention_author=False)
@commands.guild_only()
@commands.command(name='reset', help='Resetar as minhas configurações', description='Sem argumentos')
async def reset(self, ctx):
ServerSettings(ctx.guild.id).reset()
response = 'Configurações redefinidas'
await ctx.reply(response, mention_author=False)
@staticmethod
def update(ctx, values):
server = ServerSettings(ctx.guild.id)
server.update(values)
async def cog_command_error(self, ctx, error):
if not isinstance(error, commands.NoPrivateMessage):
response = 'Algum erro ao atualizar as configurações'
await ctx.reply(response, mention_author=False)
print(error)
def setup(bot):
bot.add_cog(Config(bot))
| 34.566667
| 105
| 0.635487
|
4a011a4bff8f0b56193d9207112c797d1214987f
| 1,421
|
py
|
Python
|
examples/plot_iris_comparison.py
|
balins/fuzzy-tree
|
8dde93cdd32c2c3643ce459fdd5d29b8a0904714
|
[
"BSD-3-Clause"
] | 6
|
2021-06-06T13:32:48.000Z
|
2021-10-13T14:15:04.000Z
|
examples/plot_iris_comparison.py
|
balins/fuzzy-tree
|
8dde93cdd32c2c3643ce459fdd5d29b8a0904714
|
[
"BSD-3-Clause"
] | 1
|
2022-01-23T15:21:06.000Z
|
2022-01-23T15:21:06.000Z
|
examples/plot_iris_comparison.py
|
balins/fuzzytree
|
8dde93cdd32c2c3643ce459fdd5d29b8a0904714
|
[
"BSD-3-Clause"
] | null | null | null |
"""
=========================================================
Comparison of crisp and fuzzy classifiers on iris dataset
=========================================================
A comparison plot for :class:`FuzzyDecisionTreeClassifier`
and sklearn's :class:`DecisionTreeClassifier` on iris
dataset (only two features were selected)
"""
import matplotlib.pyplot as plt
from matplotlib import gridspec
from mlxtend.plotting import plot_decision_regions
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from fuzzytree import FuzzyDecisionTreeClassifier
iris = load_iris()
features = [2, 3]
X = iris.data[:, features]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
clf_fuzz = FuzzyDecisionTreeClassifier().fit(X_train, y_train)
clf_sk = DecisionTreeClassifier().fit(X_train, y_train)
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(10, 8))
labels = ["Fuzzy Decision Tree", "sklearn Decision Tree"]
for clf, lab, grd in zip([clf_fuzz, clf_sk], labels, [[0, 0], [0, 1]]):
plt.subplot(gs[grd[0], grd[1]])
plot_decision_regions(X=X_train, y=y_train, clf=clf, legend=2)
plt.title("%s (train)" % lab)
plt.subplot(gs[grd[0] + 1, grd[1]])
plot_decision_regions(X=X_test, y=y_test, clf=clf, legend=2)
plt.title("%s (test)" % lab)
plt.show()
| 31.577778
| 89
| 0.682618
|
4a011b72423b26c942c86af1d3e2f6e2dc58d347
| 1,515
|
py
|
Python
|
layer/first_layer.py
|
socofels/neural_style_transfer
|
c4349f8266256cc9922df3d0837a1aa786f84712
|
[
"MIT"
] | null | null | null |
layer/first_layer.py
|
socofels/neural_style_transfer
|
c4349f8266256cc9922df3d0837a1aa786f84712
|
[
"MIT"
] | null | null | null |
layer/first_layer.py
|
socofels/neural_style_transfer
|
c4349f8266256cc9922df3d0837a1aa786f84712
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.layers import Layer
from configs.config import CONFIG
class MyLayer(Layer):
def __init__(self,**kwargs):
# self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# 为该层创建一个可训练的权重
self.kernel = self.add_weight(name='kernel',
shape=(CONFIG.IMAGE_HEIGHT,CONFIG.IMAGE_WIDTH,CONFIG.COLOR_CHANNELS),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # 一定要在最后调用它
def call(self, x):
return tf.multiply(x,self.kernel)
def compute_output_shape(self, input_shape):
pass
# return (input_shape)
class fitrst_layer(Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
# 创建权重
self.kernel = self.add_weight(name='kernel',
shape=(input_shape),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
# 逻辑
def call(self, x):
return tf.multiply(x,self.kernel)
# 输出
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
| 32.934783
| 107
| 0.573597
|
4a011c4292b7abf596d1f060653be1252d5fc91e
| 1,793
|
py
|
Python
|
cheritest/trunk/tests/cp2/test_cp2_getincoffset.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 36
|
2015-05-29T16:47:19.000Z
|
2022-02-08T21:16:26.000Z
|
cheritest/trunk/tests/cp2/test_cp2_getincoffset.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 1
|
2015-10-14T13:05:21.000Z
|
2015-10-19T20:34:03.000Z
|
cheritest/trunk/tests/cp2/test_cp2_getincoffset.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 15
|
2015-06-11T07:10:58.000Z
|
2021-06-18T05:14:54.000Z
|
#-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
#
# Check basic behaviour of cgetoffset and cincoffset.
#
class test_cp2_getincoffset(BaseBERITestCase):
@attr('capabilities')
def test_cp2_getoffset1(self):
'''Test that cgetoffset returns correct initial value'''
self.assertRegisterEqual(self.MIPS.a0, 0, "cgetoffset returns incorrect initial value")
@attr('capabilities')
def test_cp2_getoffset2(self):
'''Test that cgetoffset returns correct value after cincoffset'''
self.assertRegisterEqual(self.MIPS.a1, 100, "cgetoffset returns incorrect value after cincoffset")
| 39.844444
| 106
| 0.761294
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.