blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c4968ad51b1860bb989841aef9f8e5818d0387f
|
707dabf6a2e809959d3ee83183f9613662224cd0
|
/live/classes/group.py
|
ea948a92384b9cd625bc60bec1f786185304862a
|
[] |
no_license
|
ideoforms/pylive
|
23c9d44689d9ed29e2c6fce4d543fe95b4389655
|
a8ae0db971852596963c2cc95ee14b056af62bb1
|
refs/heads/master
| 2023-08-10T10:19:17.723642
| 2023-07-25T17:33:26
| 2023-07-25T17:33:26
| 11,980,609
| 409
| 52
| null | 2023-07-25T17:33:28
| 2013-08-08T16:11:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,926
|
py
|
group.py
|
from __future__ import annotations
import logging
from .track import Track
class Group(Track):
"""
Represents a grouped set of Track objects.
"""
def __init__(self, set, track_index: int, group_index: int, name: str, group: Group):
Track.__init__(self, set, track_index, name, group)
self.track_index = track_index
self.group_index = group_index
# needed so that Clip objects can call the 'index' method on Group and Track accordingly
# TODO: rename 'index' to 'track_index' on Track objects too
self.index = track_index
self.is_group = True
self.group: Group = None
self.tracks: list[Track] = []
self.logger = logging.getLogger(__name__)
def __str__(self):
string = "Group (%d): %s" % (self.group_index, self.name)
if len(self.tracks):
string = string + " [tracks %d-%d]" % (self.tracks[0].index, self.tracks[len(self.tracks) - 1].index)
return string
def __iter__(self):
return iter(self.tracks)
def __getstate__(self):
return {
**super().__getstate__(),
"track_index": self.track_index,
"group_index": self.group_index,
"tracks": self.tracks,
}
def __setstate__(self, d: dict):
super().__setstate__(d)
self.track_index = d["track_index"]
self.group_index = d["group_index"]
self.tracks = d["tracks"]
def dump(self):
self.logger.info("%d tracks" % len(self.tracks))
for track in self.tracks:
track.dump()
@property
def active_clips(self):
""" Return a dictionary of all non-empty clipslots: { index : Clip, ... } """
active_clips = [n for n in self.clips if n is not None]
return active_clips
@property
def is_playing(self):
return any(track.is_playing for track in self.tracks)
|
7cda2a4ec22cf3ba6be4e735a138e132f501b07c
|
cb3eeb65764bdb0b437e3e0afe824d52bfef3344
|
/tools/fewshot_exp/crops/create_crops_voc_base.py
|
e37eccd417f2de195c6dbcec6c193a19a5069934
|
[
"MIT"
] |
permissive
|
jiaxi-wu/MPSR
|
dccc8203c6ba3b77ad92c93f0bf048c415040e71
|
3e4ecdbeb02faf1857ada74858b38187213d676e
|
refs/heads/master
| 2022-11-29T05:58:19.780202
| 2020-08-20T14:31:14
| 2020-08-20T14:31:14
| 274,648,664
| 137
| 23
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,969
|
py
|
create_crops_voc_base.py
|
from PIL import Image
from maskrcnn_benchmark.data.datasets.voc import PascalVOCDataset
import os, shutil
import sys
#crop the object from original image and save them in original shape
#save object under categorized folders
split = int(sys.argv[1])
#here we do not crop the original size, but crop a (8 / 7) larger closeup
def get_closeup(image, target):
closeup = []
closeup_target = target.get_field('labels').tolist()
for t in range(len(target)):
x1, y1, x2, y2 = target.bbox[t].tolist()
cutsize = max(x2 - x1, y2 - y1) * 8 / 7 / 2
midx = (x1 + x2) / 2
midy = (y1 + y2) / 2
crop_img = image.crop((int(midx - cutsize), int(midy - cutsize), int(midx + cutsize), int(midy + cutsize)))
closeup.append(crop_img)
return closeup, closeup_target
datadirs = ['datasets/voc/VOC2007', 'datasets/voc/VOC2012']
splits = ['trainval_split%d_base'%split, 'trainval_split%d_base'%split]
for s in range(2):
dataset = PascalVOCDataset(datadirs[s], splits[s])
if not os.path.exists(datadirs[s] + '/Crops'):
os.mkdir(datadirs[s] + '/Crops')
else:
shutil.rmtree(datadirs[s] + '/Crops')
os.mkdir(datadirs[s] + '/Crops')
for cls in PascalVOCDataset.CLASSES[1:]:
os.mkdir(datadirs[s] + '/Crops/' + cls)
cls_count = {cls: 0 for cls in PascalVOCDataset.CLASSES}
for index in range(len(dataset)):
img_id = dataset.ids[index]
img = Image.open(datadirs[s] + '/JPEGImages/%s.jpg'%img_id).convert("RGB")
annos = dataset.get_groundtruth(index)
crops, crop_labels = get_closeup(img, annos)
for crop, label in list(zip(crops, crop_labels)):
#label = PascalVOCDataset.CLASSES[label]
label = dataset.categories[label]
cls_count[label] += 1
crop.save(datadirs[s] + '/Crops/%s/%d.jpg'%(label, cls_count[label]))
print(cls_count)
print('crop amount:%d'%sum(list(cls_count.values())))
|
d65f01b2c1faf9e9cdeb426a67ba92b0e5524ef7
|
4f59f77ee3f0be11930fd530cad72246be4c9fd1
|
/test/test_hifigan.py
|
933d3dea93849907aba0f15f1dad3a55e10e0bbe
|
[
"Apache-2.0"
] |
permissive
|
TensorSpeech/TensorFlowTTS
|
3493ca788f67eee32e343cb492d95bdf38a4306a
|
136877136355c82d7ba474ceb7a8f133bd84767e
|
refs/heads/master
| 2023-07-04T17:25:40.956787
| 2022-03-10T08:34:25
| 2022-03-10T08:34:25
| 249,107,601
| 2,889
| 686
|
Apache-2.0
| 2022-11-25T22:56:20
| 2020-03-22T03:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,900
|
py
|
test_hifigan.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
import tensorflow as tf
from tensorflow_tts.configs import (
HifiGANDiscriminatorConfig,
HifiGANGeneratorConfig,
MelGANDiscriminatorConfig,
)
from tensorflow_tts.models import (
TFHifiGANGenerator,
TFHifiGANMultiPeriodDiscriminator,
TFMelGANMultiScaleDiscriminator,
)
from examples.hifigan.train_hifigan import TFHifiGANDiscriminator
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def make_hifigan_generator_args(**kwargs):
defaults = dict(
out_channels=1,
kernel_size=7,
filters=128,
use_bias=True,
upsample_scales=[8, 8, 2, 2],
stacks=3,
stack_kernel_size=[3, 7, 11],
stack_dilation_rate=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
use_final_nolinear_activation=True,
is_weight_norm=True,
initializer_seed=42,
)
defaults.update(kwargs)
return defaults
def make_hifigan_discriminator_args(**kwargs):
defaults_multisperiod = dict(
out_channels=1,
period_scales=[2, 3, 5, 7, 11],
n_layers=5,
kernel_size=5,
strides=3,
filters=8,
filter_scales=4,
max_filters=1024,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
is_weight_norm=True,
initializer_seed=42,
)
defaults_multisperiod.update(kwargs)
defaults_multiscale = dict(
out_channels=1,
scales=3,
downsample_pooling="AveragePooling1D",
downsample_pooling_params={"pool_size": 4, "strides": 2,},
kernel_sizes=[5, 3],
filters=16,
max_downsample_filters=1024,
use_bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
)
defaults_multiscale.update(kwargs)
return [defaults_multisperiod, defaults_multiscale]
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss",
[
({}, {}, {}),
({"kernel_size": 3}, {}, {}),
({"filters": 1024}, {}, {}),
({"stack_kernel_size": [1, 2, 3]}, {}, {}),
({"stack_kernel_size": [3, 5, 7], "stacks": 3}, {}, {}),
({"upsample_scales": [4, 4, 4, 4]}, {}, {}),
({"upsample_scales": [8, 8, 2, 2]}, {}, {}),
({"filters": 1024, "upsample_scales": [8, 8, 2, 2]}, {}, {}),
],
)
def test_hifigan_trainable(dict_g, dict_d, dict_loss):
batch_size = 4
batch_length = 4096
args_g = make_hifigan_generator_args(**dict_g)
args_d_p, args_d_s = make_hifigan_discriminator_args(**dict_d)
args_g = HifiGANGeneratorConfig(**args_g)
args_d_p = HifiGANDiscriminatorConfig(**args_d_p)
args_d_s = MelGANDiscriminatorConfig(**args_d_s)
generator = TFHifiGANGenerator(args_g)
discriminator_p = TFHifiGANMultiPeriodDiscriminator(args_d_p)
discriminator_s = TFMelGANMultiScaleDiscriminator(args_d_s)
discriminator = TFHifiGANDiscriminator(discriminator_p, discriminator_s)
|
d2a6f9bf55429670cc92cb7fb5a5bfd21f46aca0
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Spacy/source2.7/spacy/lang/sv/morph_rules.py
|
e28322e98d0efa95a94e4ba9306673966dfc2af0
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,912
|
py
|
morph_rules.py
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import LEMMA, PRON_LEMMA
# Used the table of pronouns at https://sv.wiktionary.org/wiki/deras
MORPH_RULES = {
"PRP": {
"jag": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Nom"},
"mig": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"},
"mej": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"},
"du": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Nom"},
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Nom"},
"honom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"},
"hon": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Nom"},
"henne": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Acc"},
"det": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"vi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Nom"},
"oss": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Acc"},
"ni": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Nom"},
"er": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Acc"},
"de": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"},
"dom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"},
"dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"},
"dom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"},
"min": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"mitt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"mina": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"din": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"ditt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"dina": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Gender": "Masc", "Poss": "Yes", "Reflex": "Yes"},
"hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Gender": "Masc", "Poss": "Yes", "Reflex": "Yes"},
"hennes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Gender": "Fem", "Poss": "Yes", "Reflex": "Yes"},
"hennes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Gender": "Fem", "Poss": "Yes", "Reflex": "Yes"},
"dess": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"dess": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vår": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"våran": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vårt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vårat": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"våra": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"er": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"eran": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"ert": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"erat": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"era": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"deras": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"}
},
"VBZ": {
"är": {"VerbForm": "Fin", "Person": "One", "Tense": "Pres", "Mood": "Ind"},
"är": {"VerbForm": "Fin", "Person": "Two", "Tense": "Pres", "Mood": "Ind"},
"är": {"VerbForm": "Fin", "Person": "Three", "Tense": "Pres", "Mood": "Ind"},
},
"VBP": {
"är": {"VerbForm": "Fin", "Tense": "Pres", "Mood": "Ind"}
},
"VBD": {
"var": {"VerbForm": "Fin", "Tense": "Past", "Number": "Sing"},
"vart": {"VerbForm": "Fin", "Tense": "Past", "Number": "Plur"}
}
}
|
9ba88e2b5642d826eb425ffd6afb9f3576e21d33
|
24f354c0a362c0a44fe0946f0a947930f0724f4d
|
/tests/unit/activation/test_python_activator.py
|
f773f74a65457189343278a64e76412c631119f7
|
[
"MIT"
] |
permissive
|
pypa/virtualenv
|
783cf226c806bcb44ee63fd87c37d76e90c121ce
|
6d22da631fd289f89f921a4010047ad969b7bfa7
|
refs/heads/main
| 2023-09-04T06:50:16.410634
| 2023-08-30T14:32:38
| 2023-08-30T14:32:38
| 1,446,474
| 4,313
| 1,073
|
MIT
| 2023-09-12T14:54:09
| 2011-03-06T14:33:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,510
|
py
|
test_python_activator.py
|
from __future__ import annotations
import os
import sys
from ast import literal_eval
from textwrap import dedent
from virtualenv.activation import PythonActivator
from virtualenv.info import IS_WIN
def test_python(raise_on_non_source_class, activation_tester):
class Python(raise_on_non_source_class):
def __init__(self, session) -> None:
super().__init__(
PythonActivator,
session,
sys.executable,
activate_script="activate_this.py",
extension="py",
non_source_fail_message="You must use exec(open(this_file).read(), {'__file__': this_file}))",
)
self.unix_line_ending = not IS_WIN
def env(self, tmp_path):
env = os.environ.copy()
env["PYTHONIOENCODING"] = "utf-8"
for key in ("VIRTUAL_ENV", "PYTHONPATH"):
env.pop(str(key), None)
env["PATH"] = os.pathsep.join([str(tmp_path), str(tmp_path / "other")])
return env
@staticmethod
def _get_test_lines(activate_script):
raw = f"""
import os
import sys
import platform
def print_r(value):
print(repr(value))
print_r(os.environ.get("VIRTUAL_ENV"))
print_r(os.environ.get("VIRTUAL_ENV_PROMPT"))
print_r(os.environ.get("PATH").split(os.pathsep))
print_r(sys.path)
file_at = {str(activate_script)!r}
# CPython 2 requires non-ascii path open to be unicode
with open(file_at, "r", encoding='utf-8') as file_handler:
content = file_handler.read()
exec(content, {{"__file__": file_at}})
print_r(os.environ.get("VIRTUAL_ENV"))
print_r(os.environ.get("VIRTUAL_ENV_PROMPT"))
print_r(os.environ.get("PATH").split(os.pathsep))
print_r(sys.path)
import pydoc_test
print_r(pydoc_test.__file__)
"""
return dedent(raw).splitlines()
def assert_output(self, out, raw, tmp_path): # noqa: ARG002
out = [literal_eval(i) for i in out]
assert out[0] is None # start with VIRTUAL_ENV None
assert out[1] is None # likewise for VIRTUAL_ENV_PROMPT
prev_path = out[2]
prev_sys_path = out[3]
assert out[4] == str(self._creator.dest) # VIRTUAL_ENV now points to the virtual env folder
assert out[5] == str(self._creator.env_name) # VIRTUAL_ENV_PROMPT now has the env name
new_path = out[6] # PATH now starts with bin path of current
assert ([str(self._creator.bin_dir), *prev_path]) == new_path
# sys path contains the site package at its start
new_sys_path = out[7]
new_lib_paths = {str(i) for i in self._creator.libs}
assert prev_sys_path == new_sys_path[len(new_lib_paths) :]
assert new_lib_paths == set(new_sys_path[: len(new_lib_paths)])
# manage to import from activate site package
dest = self.norm_path(self._creator.purelib / "pydoc_test.py")
found = self.norm_path(out[8])
assert found.startswith(dest)
def non_source_activate(self, activate_script):
act = str(activate_script)
return [*self._invoke_script, "-c", f"exec(open({act!r}).read())"]
activation_tester(Python)
|
324be878f03689ec5b097f2f1de8ffe0206959aa
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/quickdoc/AttributeDescriptionEmptyGoogle.py
|
e82a7c161132fb8e40cc4624408590d949e318d4
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
AttributeDescriptionEmptyGoogle.py
|
class My<the_ref>Class:
"""
Attributes:
attr (int) :
"""
attr = 1
|
9d247815dffb71174456e9deaf52e31392b25685
|
85d2c76b8a646c4183fb095c2b5a2e2a916d70a8
|
/solution/math/5618/main.py
|
d6ffb3900373c70a6eb1202fd428afeec9b0c1d7
|
[
"MIT"
] |
permissive
|
tony9402/baekjoon
|
448213dcbdf74b58b6d87191ac54c286bad32f29
|
71f9bfc1f4ed66b97ffc0aff2d17389a095a7d11
|
refs/heads/main
| 2023-08-16T17:54:08.533578
| 2023-08-16T16:36:36
| 2023-08-16T16:36:36
| 200,492,112
| 4,828
| 1,230
|
MIT
| 2023-06-22T11:49:18
| 2019-08-04T12:48:59
|
Python
|
UTF-8
|
Python
| false
| false
| 575
|
py
|
main.py
|
# Authored by : gusdn3477
# Co-authored by : tony9402
# Link : http://boj.kr/aef57ade1e5c4c6e90f08a159fe96ca2
import sys
def input():
return sys.stdin.readline().rstrip()
def GCD(x,y):
if y == 0:
return x
else:
return GCD(y, x%y)
n = int(input())
arr = list(map(int, input().split()))
outputs = list()
gcd = arr[0]
for i in range(1, n):
gcd = GCD(gcd, arr[i])
x = 1
while x * x <= gcd:
if gcd % x == 0:
outputs.append(x)
if x * x != gcd:
outputs.append(gcd // x)
x += 1
outputs.sort()
print(*outputs)
|
2b34d1a5fc6bc93bfea791237a8a060c757bde20
|
d58a58261efe1db9f2956be1e7081dbd4e7eeb0f
|
/datasets/combine_A_and_B.py
|
d23f405476689c78114e27845169bd21a72c9a51
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
mit-han-lab/gan-compression
|
6245dc896ce79470e6a55a39e678ec32197c0fc0
|
3dd79dd4973e4bdad511169fde89b3e9e12adc5e
|
refs/heads/master
| 2023-08-24T09:12:58.311288
| 2022-11-10T00:57:24
| 2022-11-10T00:57:24
| 245,032,939
| 1,135
| 166
|
NOASSERTION
| 2023-02-16T00:44:54
| 2020-03-05T00:27:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,249
|
py
|
combine_A_and_B.py
|
import argparse
import os
import cv2
import numpy as np
parser = argparse.ArgumentParser('create image pairs')
parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str,
default='../dataset/50kshoes_edges')
parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str,
default='../dataset/50kshoes_jpg')
parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000)
parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true')
args = parser.parse_args()
for arg in vars(args):
print('[%s] = ' % arg, getattr(args, arg))
splits = os.listdir(args.fold_A)
for sp in splits:
img_fold_A = os.path.join(args.fold_A, sp)
img_fold_B = os.path.join(args.fold_B, sp)
img_list = os.listdir(img_fold_A)
if args.use_AB:
img_list = [img_path for img_path in img_list if '_A.' in img_path]
num_imgs = min(args.num_imgs, len(img_list))
print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
img_fold_AB = os.path.join(args.fold_AB, sp)
if not os.path.isdir(img_fold_AB):
os.makedirs(img_fold_AB)
print('split = %s, number of images = %d' % (sp, num_imgs))
for n in range(num_imgs):
name_A = img_list[n]
path_A = os.path.join(img_fold_A, name_A)
if args.use_AB:
name_B = name_A.replace('_A.', '_B.')
else:
name_B = name_A
path_B = os.path.join(img_fold_B, name_B)
if os.path.isfile(path_A) and os.path.isfile(path_B):
name_AB = name_A
if args.use_AB:
name_AB = name_AB.replace('_A.', '.') # remove _A
path_AB = os.path.join(img_fold_AB, name_AB)
im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
|
22abe48d6cd1c930ea43365a817c778333c9a405
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/resolve/BuiltinVsClassMember.py
|
e5903764bcd03c587222d0ecce2d8d4aabcb2fc2
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
BuiltinVsClassMember.py
|
class Foo:
def eval(self, value):
pass
def bar(self):
eval('foo')
# <ref>
|
cabf26b34e44024e1558130b76fe5c7422ad8de5
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/polymer/v3_0/css_strip_prefixes.py
|
ee3ee6d5d929648af72581b087a8f147242a8bc7
|
[
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 2,613
|
py
|
css_strip_prefixes.py
|
# Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import fnmatch
import os
import re
import sys
# List of CSS properties to be removed.
CSS_PROPERTIES_TO_REMOVE = [
'-moz-appearance',
'-moz-box-sizing',
'-moz-flex-basis',
'-moz-user-select',
'-ms-align-content',
'-ms-align-self',
'-ms-flex',
'-ms-flex-align',
'-ms-flex-basis',
'-ms-flex-line-pack',
'-ms-flexbox',
'-ms-flex-direction',
'-ms-flex-pack',
'-ms-flex-wrap',
'-ms-inline-flexbox',
'-ms-user-select',
'-webkit-align-content',
'-webkit-align-items',
'-webkit-align-self',
'-webkit-animation',
'-webkit-animation-duration',
'-webkit-animation-iteration-count',
'-webkit-animation-name',
'-webkit-animation-timing-function',
'-webkit-flex',
'-webkit-flex-basis',
'-webkit-flex-direction',
'-webkit-flex-wrap',
'-webkit-inline-flex',
'-webkit-justify-content',
'-webkit-transform',
'-webkit-transform-origin',
'-webkit-transition',
'-webkit-transition-delay',
'-webkit-transition-property',
'-webkit-user-select',
]
# Regex to detect a CSS line of interest (helps avoiding edge cases, like
# removing the 1st line of a multi-line CSS rule).
CSS_LINE_REGEX = '^\s*[^;\s]+:\s*[^;]+;\s*(/\*.+/*/)*\s*$';
def ProcessFile(filename):
# Gather indices of lines to be removed.
indices_to_remove = [];
with open(filename) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if ShouldRemoveLine(line):
indices_to_remove.append(i)
if len(indices_to_remove):
print('stripping CSS from: ' + filename)
# Process line numbers in descinding order, such that the array can be
# modified in-place.
indices_to_remove.reverse()
for i in indices_to_remove:
del lines[i]
# Reconstruct file.
with open(filename, 'w') as f:
for l in lines:
f.write(l)
return
def ShouldRemoveLine(line):
pred = lambda p: re.search(CSS_LINE_REGEX, line) and re.search(p, line)
return any(pred(p) for p in CSS_PROPERTIES_TO_REMOVE)
def main(argv):
parser = argparse.ArgumentParser('Strips CSS rules not needed by Chrome')
parser.add_argument(
'--file_extension', choices=['js', 'html'], required=True)
opts = parser.parse_args(sys.argv[1:])
files_to_process = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk('components-chromium')
for f in fnmatch.filter(files, '*.' + opts.file_extension)]
for f in files_to_process:
ProcessFile(f)
if __name__ == '__main__':
main(sys.argv[1:])
|
ed90584f79fed2a92a2ce5e2bcf50ba5f4eebb54
|
dbb120cceaed09027f250bedbb6f5a8c5d4c71f5
|
/netket/utils/__init__.py
|
ed08483d51581159effe1de7d5bf3d1866a9b833
|
[
"Apache-2.0"
] |
permissive
|
netket/netket
|
b0ec4dc6e0ed5493299a38b8dbfd06e9f946e3b3
|
f4f2844739302fd7e044b722eae8a93d0bfc59ec
|
refs/heads/master
| 2023-08-29T12:03:29.446789
| 2023-08-20T10:21:41
| 2023-08-20T10:21:41
| 130,741,783
| 467
| 181
|
Apache-2.0
| 2023-09-14T20:40:47
| 2018-04-23T18:48:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
__init__.py
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .config_flags import config
from .moduletools import _hide_submodules, rename_class, auto_export as _auto_export
from .version_check import module_version
# error if old dependencies are detected
from . import _dependencies_check
from . import dispatch
from . import struct
from . import numbers
from . import types
from . import float
from .array import HashableArray
from .partial import HashablePartial
from .jax import get_afun_if_module, wrap_afun, wrap_to_support_scalar
from .optional_deps import tensorboard_available
from .seed import random_seed
from .summation import KahanSum
from .holomorphic import is_probably_holomorphic
from .deprecation import (
warn_deprecation,
deprecated,
deprecated_new_name,
deprecate_dtype,
)
from .model_frameworks import maybe_wrap_module
from .history import History, accum_in_tree, accum_histories_in_tree
from . import mpi
_hide_submodules(
__name__,
remove_self=False,
ignore=["numbers", "types", "float", "dispatch", "errors"],
)
|
a75292852cdfe0434399fbc9102368bf153711a5
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/otp/level/EntityTypes.py
|
0ee03a7a9724fa7d6b209d07bdd669b7bd2a6dbf
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,093
|
py
|
EntityTypes.py
|
from .EntityTypeDesc import EntityTypeDesc
from toontown.coghq.SpecImports import *
class Entity(EntityTypeDesc):
abstract = 1
type = 'entity'
attribs = (('type', None, 'const'),
('name', '<unnamed>', 'string'),
('comment', '', 'string'),
('parentEntId', 0, 'entId'))
class LevelMgr(Entity):
type = 'levelMgr'
permanent = 1
attribs = (('name', 'LevelMgr', 'const'), ('parentEntId', 0, 'const'), ('modelFilename', '', 'const'))
class EditMgr(Entity):
type = 'editMgr'
permanent = 1
blockAttribs = ('comment',)
attribs = (('name', 'LevelMgr', 'const'),
('parentEntId', 0, 'const'),
('requestSave', None, 'const'),
('requestNewEntity', None, 'const'),
('insertEntity', None, 'const'),
('removeEntity', None, 'const'))
class AttribModifier(Entity):
type = 'attribModifier'
attribs = (('recursive', 0, 'bool'),
('typeName', '', 'string'),
('attribName', '', 'string'),
('value', '', 'string'))
class Locator(Entity):
type = 'locator'
attribs = (('searchPath', '', 'string'),)
class Nodepath(Entity):
type = 'nodepath'
attribs = (('parentEntId',
0,
'entId',
{'type': 'nodepath'}),
('pos', Point3(0, 0, 0), 'pos'),
('hpr', Vec3(0, 0, 0), 'hpr'),
('scale', 1, 'scale'))
class Zone(Nodepath):
type = 'zone'
permanent = 1
blockAttribs = ('pos', 'hpr')
attribs = (('parentEntId', 0, 'const'), ('description', '', 'string'), ('visibility', [], 'visZoneList'))
class EntrancePoint(Nodepath):
type = 'entrancePoint'
attribs = (('entranceId', -1, 'int'), ('radius',
15,
'float',
{'min': 0}), ('theta',
20,
'float',
{'min': 0}))
class LogicGate(Entity):
type = 'logicGate'
output = 'bool'
attribs = (('input1Event',
0,
'entId',
{'output': 'bool'}),
('input2Event',
0,
'entId',
{'output': 'bool'}),
('isInput1', 0, 'bool'),
('isInput2', 0, 'bool'),
('logicType',
'or',
'choice',
{'choiceSet': ['or',
'and',
'xor',
'nand',
'nor',
'xnor']}))
class CutScene(Entity):
type = 'cutScene'
output = 'bool'
attribs = (('pos', Point3(0, 0, 0), 'pos'),
('hpr', Vec3(0, 0, 0), 'hpr'),
('startStopEvent',
0,
'entId',
{'output': 'bool'}),
('effect',
'irisInOut',
'choice',
{'choiceSet': ['nothing', 'irisInOut', 'letterBox']}),
('motion',
'foo1',
'choice',
{'choiceSet': ['foo1']}),
('duration', 5.0, 'float'))
class CollisionSolid(Nodepath):
type = 'collisionSolid'
attribs = (('solidType',
'sphere',
'choice',
{'choiceSet': ['sphere', 'tube']}),
('radius', 1.0, 'float'),
('length', 0.0, 'float'),
('showSolid', 0, 'bool'))
class Model(Nodepath):
type = 'model'
attribs = (('loadType',
'loadModelCopy',
'choice',
{'choiceSet': ['loadModelCopy', 'loadModel', 'loadModelOnce']}),
('modelPath', None, 'bamfilename'),
('flattenType',
'light',
'choice',
{'choiceSet': ['none',
'light',
'medium',
'strong']}),
('collisionsOnly', 0, 'bool'),
('goonHatType',
'none',
'choice',
{'choiceSet': ['none', 'hardhat', 'security']}))
class Path(Nodepath):
type = 'path'
attribs = (('pathIndex', 0, 'int'), ('pathScale', 1.0, 'float'))
class VisibilityExtender(Entity):
type = 'visibilityExtender'
attribs = (('event',
None,
'entId',
{'output': 'bool'}), ('newZones', [], 'visZoneList'))
class AmbientSound(Nodepath):
type = 'ambientSound'
attribs = (('soundPath', '', 'bamfilename'), ('volume',
1,
'float',
{'min': 0,
'max': 1}), ('enabled', 1, 'bool'))
class PropSpinner(Entity):
type = 'propSpinner'
class EntityGroup(Entity):
type = 'entityGroup'
|
1b55d04165bf1fc75de681e335da004080d25525
|
a33e380ba70fa915bd3a6199cf88dcfa38ca5fab
|
/backpack/core/derivatives/basederivatives.py
|
2a55ac88d6c5fbf207dfd2aa624f5e11dd7eab8e
|
[
"MIT"
] |
permissive
|
f-dangel/backpack
|
1c90aaacad4569dd3153342ad68864466389767c
|
1ebfb4055be72ed9e0f9d101d78806bd4119645e
|
refs/heads/master
| 2023-08-14T05:49:31.904383
| 2023-07-12T18:17:34
| 2023-07-12T18:17:34
| 196,406,270
| 505
| 61
|
MIT
| 2023-09-05T14:06:32
| 2019-07-11T14:03:56
|
Python
|
UTF-8
|
Python
| false
| false
| 22,384
|
py
|
basederivatives.py
|
"""Base classes for more flexible Jacobians and second-order information."""
import warnings
from abc import ABC
from typing import Callable, List, Tuple
from torch import Tensor
from torch.nn import Module
from backpack.core.derivatives import shape_check
class BaseDerivatives(ABC): # noqa: B024
"""First- and second-order partial derivatives of unparameterized module.
Note:
Throughout the code, use these conventions if possible:
- `N`: batch size
- Vectors
- Layer input shape `[N, D_in]`
- Layer output shape `[N, D_out]`
- Images
- Layer input shape `[N, C_in, H_in, W_in]`
- Layer output shape `[N, C_out, H_out, W_out]`
- `V`: vectorization axis
Definition:
For simplicity, consider the vector case, i.e. a function which maps an
`[N, D_in]` `input` into an `[N, D_out]` `output`.
The input-output Jacobian `J` of is tensor of shape `[N, D_out, N_in, D_in]`.
Partial derivatives are ordered as
`J[i, j, k, l] = 𝜕output[i, j] / 𝜕input[k, l].
The transposed input-output Jacobian `Jᵀ` has shape `[N, D_in, N, D_out]`.
Partial derivatives are ordered as
`Jᵀ[i, j, k, l] = 𝜕output[k, l] / 𝜕input[i, j]`.
In general, feature dimension indices `j, l` are product indices.
"""
@shape_check.jac_mat_prod_accept_vectors
@shape_check.jac_mat_prod_check_shapes
def jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Apply Jacobian of the output w.r.t. input to a matrix.
It is assumed that the module input has shape `[N, *]`, while the output is
of shape `[N, •]`. Both `*`, `•` denote arbitrary shapes.
Apply Jacobian to all slices among the vectorization axis.
`result[v, n, •] = ∑ₖ ∑_* J[n, •, k, *] mat[v, n, *]`.
Args:
module: Extended module.
g_inp: Gradients of the module w.r.t. its inputs.
g_out: Gradients of the module w.r.t. its outputs.
mat: Matrix the Jacobian will be applied to. Must have
shape `[V, N, *]`.
Returns:
Jacobian-matrix product. Has shape [V, N, *].
Note:
- The Jacobian can be applied without knowledge about backpropagated
derivatives. Both `g_inp` and `g_out` are usually not required and
can be set to `None`.
"""
return self._jac_mat_prod(module, g_inp, g_out, mat)
def _jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
raise NotImplementedError
@shape_check.jac_t_mat_prod_accept_vectors
@shape_check.jac_t_mat_prod_check_shapes
def jac_t_mat_prod(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mat: Tensor,
subsampling: List[int] = None,
) -> Tensor:
"""Apply transposed input-ouput Jacobian of module output to a matrix.
Implicit application of Jᵀ:
result[v, ̃n, ̃c, ̃w, ...]
= ∑_{n, c, w} Jᵀ[̃n, ̃c, ̃w, ..., n, c, w, ...] mat[v, n, c, w, ...].
Args:
module: module which derivative is calculated
g_inp: input gradients
g_out: output gradients
mat: Matrix the transposed Jacobian will be applied to.
Must have shape ``[V, *module.output.shape]``; but if used with
sub-sampling, the batch dimension is replaced by ``len(subsampling)``.
subsampling: Indices of samples along the output's batch dimension that
should be considered. Defaults to ``None`` (use all samples).
Returns:
Transposed Jacobian-matrix product.
Has shape ``[V, *module.input0.shape]``; but if used with sub-sampling,
the batch dimension is replaced by ``len(subsampling)``.
"""
return self._jac_t_mat_prod(module, g_inp, g_out, mat, subsampling=subsampling)
def _jac_t_mat_prod(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mat: Tensor,
subsampling: List[int] = None,
) -> Tensor:
raise NotImplementedError
# TODO Add shape check
# TODO Use new convention
def ea_jac_t_mat_jac_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Expectation approximation of outer product with input-output Jacobian.
Used for backpropagation in KFRA.
For `yₙ = f(xₙ) n=1,...,n`, compute `E(Jₙᵀ mat Jₙ) = 1/n ∑ₙ Jₙᵀ mat Jₙ`.
In index notation, let `output[n]=f(input[n]) n = 1,...,n`. Then,
`result[i,j]
= 1/n ∑ₙₖₗ (𝜕output[n,k] / 𝜕input[n,i]) mat[k,l] (𝜕output[n,j] / 𝜕input[n,l])
Args:
module: Extended module.
g_inp: Gradients of the module w.r.t. its inputs.
g_out: Gradients of the module w.r.t. its outputs.
mat: Matrix of shape `[D_out, D_out]`.
# noqa: DAR202
Returns:
Matrix of shape `[D_in, D_in]`.
Note:
- This operation can be applied without knowledge about backpropagated
derivatives. Both `g_inp` and `g_out` are usually not required and
can be set to `None`.
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
def hessian_is_zero(self, module: Module) -> bool:
"""Returns whether Hessian is zero.
I.e. whether ``∂²output[i] / ∂input[j] ∂input[k] = 0 ∀ i,j,k``.
Args:
module: current module to evaluate
# noqa: DAR202
Returns:
whether Hessian is zero
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
def hessian_is_diagonal(self, module: Module) -> bool:
"""Is `∂²output[i] / ∂input[j] ∂input[k]` nonzero only if `i = j = k`.
The Hessian diagonal is only defined for layers that preserve the size
of their input.
Must be implemented by descendants that don't implement ``hessian_is_zero``.
Args:
module: current module to evaluate
# noqa: DAR202
Returns:
whether Hessian is diagonal
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
# FIXME Currently returns `∂²output[i] / ∂input[i]² * g_out[0][i]`,
# which s the residual matrix diagonal, rather than the Hessian diagonal
def hessian_diagonal(
self, module: Module, g_in: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Tensor:
"""Return the Hessian diagonal `∂²output[i] / ∂input[i]²`.
Only required if `hessian_is_diagonal` returns `True`.
The Hessian diagonal is only defined for layers that preserve the size
of their input.
Args:
module: Module whose output-input Hessian diagonal is computed.
g_in: Gradients w.r.t. the module input.
g_out: Gradients w.r.t. the module output.
# noqa: DAR202
Returns:
Hessian diagonal. Has same shape as module input.
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
def hessian_is_psd(self) -> bool:
"""Is `∂²output[i] / ∂input[j] ∂input[k]` positive semidefinite (PSD).
# noqa: DAR202
Returns:
whether hessian is positive semi definite
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
@shape_check.residual_mat_prod_accept_vectors
@shape_check.residual_mat_prod_check_shapes
def residual_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Multiply with the residual term.
Performs mat → [∑_{k} Hz_k(x) 𝛿z_k] mat.
Args:
module: module
g_inp: input gradients
g_out: output gradients
mat: matrix to multiply
Returns:
product
Note:
This function only has to be implemented if the residual is not
zero and not diagonal (for instance, `BatchNorm`).
"""
return self._residual_mat_prod(module, g_inp, g_out, mat)
def _residual_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
raise NotImplementedError
@staticmethod
def _reshape_like(mat: Tensor, shape: Tuple[int]) -> Tensor:
"""Reshape as like with trailing and additional 0th dimension.
If like is [N, C, H, ...], returns shape [-1, N, C, H, ...]
Args:
mat: Matrix to reshape.
shape: Trailing target shape.
Returns:
reshaped matrix
"""
return mat.reshape(-1, *shape)
@classmethod
def reshape_like_input(
cls, mat: Tensor, module: Module, subsampling: List[int] = None
) -> Tensor:
"""Reshapes matrix according to input.
Args:
mat: matrix to reshape
module: module which input shape is used
subsampling: Indices of active samples. ``None`` means use all samples.
Returns:
reshaped matrix
"""
shape = list(module.input0.shape)
if subsampling is not None:
shape[0] = len(subsampling)
return cls._reshape_like(mat, shape)
@classmethod
def reshape_like_output(cls, mat: Tensor, module: Module) -> Tensor:
"""Reshapes matrix like output.
Args:
mat: matrix to reshape
module: module which output is used
Returns:
reshaped matrix
"""
return cls._reshape_like(mat, module.output.shape)
class BaseParameterDerivatives(BaseDerivatives, ABC): # noqa: B024
"""First- and second order partial derivatives of a module with parameters.
Assumptions (true for `nn.Linear`, `nn.Conv(Transpose)Nd`, `nn.BatchNormNd`):
- Parameters are saved as `.weight` and `.bias` fields in a module
- The output is linear in the model parameters
Shape conventions:
------------------
Weight [C_w, H_w, W_w, ...] (usually 1d, 2d, 4d)
Bias [C_b, ...] (usually 1d)
For most layers, these shapes correspond to shapes of the module input or output.
"""
@shape_check.param_mjp_accept_vectors
def param_mjp(
self,
param_str: str,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mat: Tensor,
sum_batch: bool = True,
subsampling: List[int] = None,
) -> Tensor:
"""Compute matrix-Jacobian products (MJPs) of the module w.r.t. a parameter.
Handles both vector and matrix inputs. Preserves input format in output.
Internally calls out to ``_{param_str}_jac_t_mat_prod`` function that must be
implemented by descendants. It follows the same signature, but does not have
the ``param_str`` argument.
Args:
param_str: Attribute name under which the parameter is stored in the module.
module: Module whose Jacobian will be applied. Must provide access to IO.
g_inp: Gradients w.r.t. module input.
g_out: Gradients w.r.t. module output.
mat: Matrix the Jacobian will be applied to. Has shape
``[V, *module.output.shape]`` (matrix case) or same shape as
``module.output`` (vector case). If used with subsampling, has dimension
len(subsampling) instead of batch size along the batch axis.
sum_batch: Sum out the MJP's batch axis. Default: ``True``.
subsampling: Indices of samples along the output's batch dimension that
should be considered. Defaults to ``None`` (use all samples).
Returns:
Matrix-Jacobian products. Has shape ``[V, *param_shape]`` when batch
summation is enabled (same shape as parameter in the vector case). Without
batch summation, the result has shape ``[V, N, *param_shape]`` (vector case
has shape ``[N, *param_shape]``). If used with subsampling, the batch size N
is replaced by len(subsampling).
Raises:
NotImplementedError: if required method is not implemented by derivatives class
"""
# input check
shape_check.shape_like_output(mat, module, subsampling=subsampling)
method_name = f"_{param_str}_jac_t_mat_prod"
mjp = getattr(self, method_name, None)
if mjp is None:
raise NotImplementedError(
f"Computation requires implementation of {method_name}, but {self} "
f"(defining derivatives of {module}) does not implement it."
)
mjp_out = mjp(
module, g_inp, g_out, mat, sum_batch=sum_batch, subsampling=subsampling
)
# output check
shape_check.check_like_with_sum_batch(
mjp_out, module, param_str, sum_batch=sum_batch
)
shape_check.check_same_V_dim(mjp_out, mat)
return mjp_out
@shape_check.bias_jac_mat_prod_accept_vectors
@shape_check.bias_jac_mat_prod_check_shapes
def bias_jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Apply Jacobian of the output w.r.t. bias to a matrix.
Args:
module: module to perform derivatives on
g_inp: input gradients
g_out: output gradients
mat: Matrix the Jacobian will be applied to.
Must have shape [V, C_b, ...].
Returns:
Jacobian-matrix product. Has shape [V, N, C_out, H_out, ...].
"""
return self._bias_jac_mat_prod(module, g_inp, g_out, mat)
def _bias_jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
raise NotImplementedError
@shape_check.weight_jac_mat_prod_accept_vectors
@shape_check.weight_jac_mat_prod_check_shapes
def weight_jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Apply Jacobian of the output w.r.t. weight to a matrix.
Args:
module: module to perform derivatives on
g_inp: input gradients
g_out: output gradients
mat: Matrix the Jacobian will be applied to.
Must have shape [V, C_w, H_w, ...].
Returns:
Jacobian-matrix product.
Has shape [V, N, C_out, H_out, ...].
"""
return self._weight_jac_mat_prod(module, g_inp, g_out, mat)
def _weight_jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
raise NotImplementedError
class BaseLossDerivatives(BaseDerivatives, ABC): # noqa: B024
"""Second- order partial derivatives of loss functions."""
# TODO Add shape check
def sqrt_hessian(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
subsampling: List[int] = None,
) -> Tensor:
"""Symmetric factorization ('sqrt') of the loss Hessian.
The Hessian factorization is returned in format ``Hs = [D, N, D]``, where
``Hs[:, n, :]`` is the Hessian factorization for the ``n``th sample, i.e.
``Hs[:, n, :]ᵀ Hs[:, n, :]`` is the Hessian w.r.t. to the ``n``th sample.
Args:
module: Loss layer whose factorized Hessian will be computed.
g_inp: Gradients w.r.t. module input.
g_out: Gradients w.r.t. module output.
subsampling: Indices of data samples to be considered. Default of ``None``
uses all data in the mini-batch.
Returns:
Symmetric factorization of the loss Hessian for each sample. If the input
to the loss has shape ``[N, D]``, this is a tensor of shape ``[D, N, D]``;
if used with sub-sampling, ``N`` is replaced by ``len(subsampling)``.
For fixed ``n``, squaring the matrix implied by the slice ``[:, n, :]``
results in the loss Hessian w.r.t. to sample ``n``.
"""
self._check_2nd_order_make_sense(module, g_out)
return self._sqrt_hessian(module, g_inp, g_out, subsampling=subsampling)
def _sqrt_hessian(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
subsampling: List[int] = None,
) -> Tensor:
raise NotImplementedError
# TODO Add shape check
def sqrt_hessian_sampled(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mc_samples: int = 1,
subsampling: List[int] = None,
) -> Tensor:
"""A Monte-Carlo sampled symmetric factorization of the loss Hessian.
The Hessian factorization is returned in format ``Hs = [M, N, D]``, where
``Hs[:, n, :]`` approximates the Hessian factorization for the ``n``th sample,
i.e. ``Hs[:, n, :]ᵀ Hs[:, n, :]ᵀ`` approximates the Hessian w.r.t. to sample
``n``.
Args:
module: Loss layer whose factorized Hessian will be computed.
g_inp: Gradients w.r.t. module input.
g_out: Gradients w.r.t. module output.
mc_samples: Number of samples used for MC approximation.
subsampling: Indices of data samples to be considered. Default of ``None``
uses all data in the mini-batch.
Returns:
Symmetric factorization of the loss Hessian for each sample. If the input
to the loss has shape ``[N, D]``, this is a tensor of shape ``[M, N, D]``
when using ``M`` MC samples; if used with sub-sampling, ``N`` is replaced
by ``len(subsampling)``. For fixed ``n``, squaring the matrix implied by the
slice ``[:, n, :]`` approximates the loss Hessian w.r.t. to sample ``n``.
"""
self._check_2nd_order_make_sense(module, g_out)
return self._sqrt_hessian_sampled(
module, g_inp, g_out, mc_samples=mc_samples, subsampling=subsampling
)
def _sqrt_hessian_sampled(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mc_samples: int = 1,
subsampling=None,
) -> Tensor:
raise NotImplementedError
@shape_check.make_hessian_mat_prod_accept_vectors
@shape_check.make_hessian_mat_prod_check_shapes
def make_hessian_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Callable[[Tensor], Tensor]:
"""Multiplication of the input Hessian with a matrix.
Return a function that maps mat to H * mat.
Args:
module: module to perform derivatives on
g_inp: input gradients
g_out: output gradients
Returns:
function that maps mat to H * mat
"""
self._check_2nd_order_make_sense(module, g_out)
return self._make_hessian_mat_prod(module, g_inp, g_out)
def _make_hessian_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Callable[[Tensor], Tensor]:
raise NotImplementedError
# TODO Add shape check
def sum_hessian(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Tensor:
"""Loss Hessians, summed over the batch dimension.
Args:
module: module to perform derivatives on
g_inp: input gradients
g_out: output gradients
Returns:
sum of hessians
"""
self._check_2nd_order_make_sense(module, g_out)
return self._sum_hessian(module, g_inp, g_out)
def _sum_hessian(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Tensor:
raise NotImplementedError
def _check_2nd_order_make_sense(self, module: Module, g_out: Tuple[Tensor]) -> None:
"""Verify conditions for 2nd-order extensions to be working.
2nd-order extensions are only guaranteed to work if the `loss`,
on which `backward()` is called, is a scalar that has not been
modified further after passing through the loss function module.
Args:
module: module to perform derivatives on
g_out: output gradients
"""
self._check_output_is_scalar(module)
self._check_loss_has_not_been_modified(module, g_out)
@classmethod
def _check_output_is_scalar(cls, module: Module) -> None:
"""Raise an exception is the module output is not a scalar.
Args:
module: module to perform derivatives on
Raises:
ValueError: if output is not scalar
"""
if module.output.numel() != 1:
raise ValueError(
"Output must be scalar. Got {}".format(module.output.shape)
)
@classmethod
def _check_loss_has_not_been_modified(
cls, module: Module, g_out: Tuple[Tensor]
) -> None:
"""Raise a warning if the module output seems to have been changed.
Args:
module: module to perform derivatives on
g_out: output gradients
"""
grad_out_is_identity = g_out is None or (g_out[0] == 1.0).all().item()
if not grad_out_is_identity:
warnings.warn(
"The output of {} seems to have been modified.".format(module)
+ " Backpack might give wrong second-order information."
+ " Make sure you call backward() on the output of a loss"
+ " function module from torch.nn",
UserWarning,
)
|
e20f53cffda0cca08d1f41e56718d199a0195d6f
|
e4070e15e5227f02c92d04510941b82ee39732ad
|
/grobid_client/grobid_client.py
|
b63cf2098a1cf863029f8fc2b7c2a3cb10460a5d
|
[
"Apache-2.0"
] |
permissive
|
kermitt2/grobid_client_python
|
73ff4434341a038806ddd9ccd441d2b01be8ef08
|
b9e295754d18c3eb880f5e54be5ced0dcc834227
|
refs/heads/master
| 2023-09-03T11:14:33.232076
| 2023-08-25T09:49:45
| 2023-08-25T09:49:45
| 150,803,955
| 114
| 44
|
Apache-2.0
| 2023-07-12T23:19:03
| 2018-09-28T23:23:48
|
Python
|
UTF-8
|
Python
| false
| false
| 16,720
|
py
|
grobid_client.py
|
"""
Grobid Python Client
This version uses the standard ThreadPoolExecutor for parallelizing the
concurrent calls to the GROBID services. Given the limits of
ThreadPoolExecutor (input stored in memory, blocking Executor.map until the
whole input is acquired), it works with batches of PDF of a size indicated
in the config.json file (default is 1000 entries). We are moving from first
batch to the second one only when the first is entirely processed - which
means it is slightly sub-optimal, but should scale better. Working without
batch would mean acquiring a list of millions of files in directories and
would require something scalable too (e.g. done in a separate thread),
which is not implemented for the moment.
"""
import os
import io
import json
import argparse
import time
import concurrent.futures
import ntpath
import requests
import pathlib
from .client import ApiClient
class ServerUnavailableException(Exception):
pass
class GrobidClient(ApiClient):
def __init__(self, grobid_server='localhost',
batch_size=1000,
coordinates=["persName", "figure", "ref", "biblStruct", "formula", "s" ],
sleep_time=5,
timeout=60,
config_path=None,
check_server=True):
self.config = {
'grobid_server': grobid_server,
'batch_size': batch_size,
'coordinates': coordinates,
'sleep_time': sleep_time,
'timeout': timeout
}
if config_path:
self._load_config(config_path)
if check_server:
self._test_server_connection()
def _load_config(self, path="./config.json"):
"""
Load the json configuration
"""
config_json = open(path).read()
self.config = json.loads(config_json)
def _test_server_connection(self):
"""Test if the server is up and running."""
the_url = self.get_server_url("isalive")
try:
r = requests.get(the_url)
except:
print("GROBID server does not appear up and running, the connection to the server failed")
raise ServerUnavailableException
status = r.status_code
if status != 200:
print("GROBID server does not appear up and running " + str(status))
else:
print("GROBID server is up and running")
def _output_file_name(self, input_file, input_path, output):
# we use ntpath here to be sure it will work on Windows too
if output is not None:
input_file_name = str(os.path.relpath(os.path.abspath(input_file), input_path))
filename = os.path.join(
output, os.path.splitext(input_file_name)[0] + ".grobid.tei.xml"
)
else:
input_file_name = ntpath.basename(input_file)
filename = os.path.join(
ntpath.dirname(input_file),
os.path.splitext(input_file_name)[0] + ".grobid.tei.xml",
)
return filename
def process(
self,
service,
input_path,
output=None,
n=10,
generateIDs=False,
consolidate_header=True,
consolidate_citations=False,
include_raw_citations=False,
include_raw_affiliations=False,
tei_coordinates=False,
segment_sentences=False,
force=True,
verbose=False,
):
batch_size_pdf = self.config["batch_size"]
input_files = []
for (dirpath, dirnames, filenames) in os.walk(input_path):
for filename in filenames:
if filename.endswith(".pdf") or filename.endswith(".PDF") or \
(service == 'processCitationList' and (filename.endswith(".txt") or filename.endswith(".TXT"))):
if verbose:
try:
print(filename)
except Exception:
# may happen on linux see https://stackoverflow.com/questions/27366479/python-3-os-walk-file-paths-unicodeencodeerror-utf-8-codec-cant-encode-s
pass
input_files.append(os.sep.join([dirpath, filename]))
if len(input_files) == batch_size_pdf:
self.process_batch(
service,
input_files,
input_path,
output,
n,
generateIDs,
consolidate_header,
consolidate_citations,
include_raw_citations,
include_raw_affiliations,
tei_coordinates,
segment_sentences,
force,
verbose,
)
input_files = []
# last batch
if len(input_files) > 0:
self.process_batch(
service,
input_files,
input_path,
output,
n,
generateIDs,
consolidate_header,
consolidate_citations,
include_raw_citations,
include_raw_affiliations,
tei_coordinates,
segment_sentences,
force,
verbose,
)
def process_batch(
self,
service,
input_files,
input_path,
output,
n,
generateIDs,
consolidate_header,
consolidate_citations,
include_raw_citations,
include_raw_affiliations,
tei_coordinates,
segment_sentences,
force,
verbose=False,
):
if verbose:
print(len(input_files), "files to process in current batch")
# we use ThreadPoolExecutor and not ProcessPoolExecutor because it is an I/O intensive process
with concurrent.futures.ThreadPoolExecutor(max_workers=n) as executor:
#with concurrent.futures.ProcessPoolExecutor(max_workers=n) as executor:
results = []
for input_file in input_files:
# check if TEI file is already produced
filename = self._output_file_name(input_file, input_path, output)
if not force and os.path.isfile(filename):
print(filename, "already exist, skipping... (use --force to reprocess pdf input files)")
continue
selected_process = self.process_pdf
if service == 'processCitationList':
selected_process = self.process_txt
r = executor.submit(
selected_process,
service,
input_file,
generateIDs,
consolidate_header,
consolidate_citations,
include_raw_citations,
include_raw_affiliations,
tei_coordinates,
segment_sentences)
results.append(r)
for r in concurrent.futures.as_completed(results):
input_file, status, text = r.result()
filename = self._output_file_name(input_file, input_path, output)
if status != 200 or text is None:
print("Processing of", input_file, "failed with error", str(status), ",", text)
# writing error file with suffixed error code
try:
pathlib.Path(os.path.dirname(filename)).mkdir(parents=True, exist_ok=True)
with open(filename.replace(".grobid.tei.xml", "_"+str(status)+".txt"), 'w', encoding='utf8') as tei_file:
if text is not None:
tei_file.write(text)
else:
tei_file.write("")
except OSError:
print("Writing resulting TEI XML file", filename, "failed")
else:
# writing TEI file
try:
pathlib.Path(os.path.dirname(filename)).mkdir(parents=True, exist_ok=True)
with open(filename,'w',encoding='utf8') as tei_file:
tei_file.write(text)
except OSError:
print("Writing resulting TEI XML file", filename, "failed")
def process_pdf(
self,
service,
pdf_file,
generateIDs,
consolidate_header,
consolidate_citations,
include_raw_citations,
include_raw_affiliations,
tei_coordinates,
segment_sentences
):
pdf_handle = open(pdf_file, "rb")
files = {
"input": (
pdf_file,
pdf_handle,
"application/pdf",
{"Expires": "0"},
)
}
the_url = self.get_server_url(service)
# set the GROBID parameters
the_data = {}
if generateIDs:
the_data["generateIDs"] = "1"
if consolidate_header:
the_data["consolidateHeader"] = "1"
if consolidate_citations:
the_data["consolidateCitations"] = "1"
if include_raw_citations:
the_data["includeRawCitations"] = "1"
if include_raw_affiliations:
the_data["includeRawAffiliations"] = "1"
if tei_coordinates:
the_data["teiCoordinates"] = self.config["coordinates"]
if segment_sentences:
the_data["segmentSentences"] = "1"
try:
res, status = self.post(
url=the_url, files=files, data=the_data, headers={"Accept": "text/plain"}, timeout=self.config['timeout']
)
if status == 503:
time.sleep(self.config["sleep_time"])
return self.process_pdf(
service,
pdf_file,
generateIDs,
consolidate_header,
consolidate_citations,
include_raw_citations,
include_raw_affiliations,
tei_coordinates,
segment_sentences
)
except requests.exceptions.ReadTimeout:
pdf_handle.close()
return (pdf_file, 408, None)
pdf_handle.close()
return (pdf_file, status, res.text)
def get_server_url(self, service):
return self.config['grobid_server'] + "/api/" + service
def process_txt(
self,
service,
txt_file,
generateIDs,
consolidate_header,
consolidate_citations,
include_raw_citations,
include_raw_affiliations,
tei_coordinates,
segment_sentences
):
# create request based on file content
references = None
with open(txt_file) as f:
references = [line.rstrip() for line in f]
the_url = self.get_server_url(service)
# set the GROBID parameters
the_data = {}
if consolidate_citations:
the_data["consolidateCitations"] = "1"
if include_raw_citations:
the_data["includeRawCitations"] = "1"
the_data["citations"] = references
res, status = self.post(
url=the_url, data=the_data, headers={"Accept": "application/xml"}
)
if status == 503:
time.sleep(self.config["sleep_time"])
return self.process_txt(
service,
txt_file,
generateIDs,
consolidate_header,
consolidate_citations,
include_raw_citations,
include_raw_affiliations,
tei_coordinates,
segment_sentences
)
return (txt_file, status, res.text)
def main():
valid_services = [
"processFulltextDocument",
"processHeaderDocument",
"processReferences",
"processCitationList"
]
parser = argparse.ArgumentParser(description="Client for GROBID services")
parser.add_argument(
"service",
help="one of " + str(valid_services),
)
parser.add_argument(
"--input", default=None, help="path to the directory containing PDF files or .txt (for processCitationList only, one reference per line) to process"
)
parser.add_argument(
"--output",
default=None,
help="path to the directory where to put the results (optional)",
)
parser.add_argument(
"--config",
default="./config.json",
help="path to the config file, default is ./config.json",
)
parser.add_argument("--n", default=10, help="concurrency for service usage")
parser.add_argument(
"--generateIDs",
action="store_true",
help="generate random xml:id to textual XML elements of the result files",
)
parser.add_argument(
"--consolidate_header",
action="store_true",
help="call GROBID with consolidation of the metadata extracted from the header",
)
parser.add_argument(
"--consolidate_citations",
action="store_true",
help="call GROBID with consolidation of the extracted bibliographical references",
)
parser.add_argument(
"--include_raw_citations",
action="store_true",
help="call GROBID requesting the extraction of raw citations",
)
parser.add_argument(
"--include_raw_affiliations",
action="store_true",
help="call GROBID requestiong the extraciton of raw affiliations",
)
parser.add_argument(
"--force",
action="store_true",
help="force re-processing pdf input files when tei output files already exist",
)
parser.add_argument(
"--teiCoordinates",
action="store_true",
help="add the original PDF coordinates (bounding boxes) to the extracted elements",
)
parser.add_argument(
"--segmentSentences",
action="store_true",
help="segment sentences in the text content of the document with additional <s> elements",
)
parser.add_argument(
"--verbose",
action="store_true",
help="print information about processed files in the console",
)
args = parser.parse_args()
input_path = args.input
config_path = args.config
output_path = args.output
if args.n is not None:
try:
n = int(args.n)
except ValueError:
print("Invalid concurrency parameter n:", n, ", n = 10 will be used by default")
pass
# if output path does not exist, we create it
if output_path is not None and not os.path.isdir(output_path):
try:
print("output directory does not exist but will be created:", output_path)
os.makedirs(output_path)
except OSError:
print("Creation of the directory", output_path, "failed")
else:
print("Successfully created the directory", output_path)
service = args.service
generateIDs = args.generateIDs
consolidate_header = args.consolidate_header
consolidate_citations = args.consolidate_citations
include_raw_citations = args.include_raw_citations
include_raw_affiliations = args.include_raw_affiliations
force = args.force
tei_coordinates = args.teiCoordinates
segment_sentences = args.segmentSentences
verbose = args.verbose
if service is None or not service in valid_services:
print("Missing or invalid service, must be one of", valid_services)
exit(1)
try:
client = GrobidClient(config_path=config_path)
except ServerUnavailableException:
exit(1)
start_time = time.time()
client.process(
service,
input_path,
output=output_path,
n=n,
generateIDs=generateIDs,
consolidate_header=consolidate_header,
consolidate_citations=consolidate_citations,
include_raw_citations=include_raw_citations,
include_raw_affiliations=include_raw_affiliations,
tei_coordinates=tei_coordinates,
segment_sentences=segment_sentences,
force=force,
verbose=verbose,
)
runtime = round(time.time() - start_time, 3)
print("runtime: %s seconds " % (runtime))
if __name__ == "__main__":
main()
|
f433a06a864dba9b7df70cc8cb6dfb83cdfd524e
|
2c5414ed169892c538cd802c06d67001d9e4b960
|
/plugins/python/regress/plugin_conflict.py
|
363219334b32cbf497269c8c0b91274a11abbb66
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Zlib",
"BSD-Source-Code",
"ISC",
"BSD-2-Clause"
] |
permissive
|
sudo-project/sudo
|
bdfeebb02cb39151fc4cfe69366b887f092e0c16
|
956de5cbbc650d6aec19804cd376a39164e76e5b
|
refs/heads/main
| 2023-09-04T11:12:09.797952
| 2023-09-02T21:25:58
| 2023-09-02T21:25:58
| 57,972,154
| 922
| 216
|
NOASSERTION
| 2023-08-18T00:43:55
| 2016-05-03T13:41:24
|
C
|
UTF-8
|
Python
| false
| false
| 326
|
py
|
plugin_conflict.py
|
import sudo
import sys
sys.path = []
class ConflictPlugin(sudo.Plugin):
def __init__(self, plugin_options, **kwargs):
sudo.log_info("PATH before: {} (should be empty)".format(sys.path))
sys.path = [sudo.options_as_dict(plugin_options).get("Path")]
sudo.log_info("PATH set: {}".format(sys.path))
|
2d3676014aeb8559bd0831e2da51d4526ece775c
|
79cd7118917561ab5b8d25f04143e0975578b74f
|
/pytorch_widedeep/bayesian_models/__init__.py
|
993996c88f74daf47b559d07e2e6986eae95fc96
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
jrzaurin/pytorch-widedeep
|
aac80263ba8e94d36b41fb1f47181a66471d7594
|
74f1ab6feb2e231fdb8c10478638d9e8d5cf3a47
|
refs/heads/master
| 2023-09-06T06:41:41.800801
| 2023-09-04T15:32:38
| 2023-09-04T15:32:38
| 107,763,164
| 1,036
| 124
|
Apache-2.0
| 2023-09-04T15:32:39
| 2017-10-21T08:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 95
|
py
|
__init__.py
|
from pytorch_widedeep.bayesian_models.tabular import (
BayesianWide,
BayesianTabMlp,
)
|
6eb3da3cf96e74244b17ccafbcd463e09fbf8fe5
|
e5e0d729f082999a9bec142611365b00f7bfc684
|
/tensorflow/lite/testing/generate_examples.py
|
98f32d854a9f31ac29a5d14fe9480adc6447efe3
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/tensorflow
|
ed6294098c7354dfc9f09631fc5ae22dbc278138
|
7cbba04a2ee16d21309eefad5be6585183a2d5a9
|
refs/heads/r1.15.5+nv23.03
| 2023-08-16T22:25:18.037979
| 2023-08-03T22:09:23
| 2023-08-03T22:09:23
| 263,748,045
| 763
| 117
|
Apache-2.0
| 2023-07-03T15:45:19
| 2020-05-13T21:34:32
|
C++
|
UTF-8
|
Python
| false
| false
| 3,647
|
py
|
generate_examples.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import argparse
import os
import sys
from tensorflow.lite.testing import generate_examples_lib
from tensorflow.lite.testing import toco_convert
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
parser.add_argument("output_path",
help="Directory where the outputs will be go.")
parser.add_argument(
"--zip_to_output",
type=str,
help="Particular zip to output.",
required=True)
parser.add_argument("--toco",
type=str,
help="Path to toco tool.",
required=True)
parser.add_argument(
"--known_bugs_are_errors",
action="store_true",
help=("If a particular model is affected by a known bug,"
" count it as a converter error."))
parser.add_argument(
"--ignore_converter_errors",
action="store_true",
help="Raise an exception if any converter error is encountered.")
parser.add_argument(
"--save_graphdefs",
action="store_true",
help="Include intermediate graphdefs in the output zip files.")
parser.add_argument(
"--run_with_flex",
action="store_true",
help="Whether the TFLite Flex converter is being used.")
parser.add_argument(
"--make_edgetpu_tests",
action="store_true",
help="Whether to generate test cases for edgetpu.")
parser.add_argument(
"--make_forward_compat_test",
action="store_true",
help="Make tests by setting TF forward compatibility horizon to the future")
# Toco binary path provided by the generate rule.
bin_path = None
def main(unused_args):
options = generate_examples_lib.Options()
options.output_path = FLAGS.output_path
options.zip_to_output = FLAGS.zip_to_output
options.toco = FLAGS.toco
options.known_bugs_are_errors = FLAGS.known_bugs_are_errors
options.ignore_converter_errors = FLAGS.ignore_converter_errors
options.save_graphdefs = FLAGS.save_graphdefs
options.run_with_flex = FLAGS.run_with_flex
options.make_edgetpu_tests = FLAGS.make_edgetpu_tests
options.make_forward_compat_test = FLAGS.make_forward_compat_test
options.tflite_convert_function = toco_convert.toco_convert
generate_examples_lib.generate_examples(options)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print("Usage: %s <path out> <zip file to generate>")
exit(1)
else:
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
f2094cdab0072aea060763462c78d78b7d064bc3
|
c6a101547c2b7f36fe83a725974a8a7f02cf176d
|
/data_structures/binary_trees/largest_subtree_sum.py
|
d79002f2afcf5a882de69f86c1fdc32b6e85a8be
|
[
"MIT"
] |
permissive
|
prabhupant/python-ds
|
737cc35574de5c2ece0f0813cf00775324a8dbe7
|
f7d6d78fedaf84b7527965bb1798b7a8da989474
|
refs/heads/master
| 2023-08-22T05:04:22.937675
| 2022-10-04T01:29:39
| 2022-10-04T01:29:39
| 199,366,418
| 2,325
| 704
|
MIT
| 2022-10-10T13:01:10
| 2019-07-29T02:48:57
|
Python
|
UTF-8
|
Python
| false
| false
| 500
|
py
|
largest_subtree_sum.py
|
# Find the largest subtree sum in a binary tree
# Do a postorder traversal
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def sum_util(root, ans):
if root is None:
return 0
s = root.val + sum_util(root.left, ans) + sum_util(root.right, ans)
ans[0] = max(ans[0], s)
return s
def find_sum(root):
if root is None:
return 0
ans = [-99999999]
sum_util(root, ans)
return ans[0]
|
9d750ee4f2ec167c18d66680d33624233dc00348
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Non_Linear_Finite_Element_Analysis_of_Solids_and_Structures_Borst/pyfem-1.0/pyfem/elements/Spring.py
|
ad466fd86969780f5e230f33a0670c5beece23b1
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
Spring.py
|
############################################################################
# This Python file is part of PyFEM-1.0, released on Aug. 29, 2012. #
# The PyFEM code accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# R. de Borst, M.A. Crisfield, J.J.C. Remmers and C.V. Verhoosel #
# John Wiley and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by J.J.C. Remmers, C.V. Verhoosel and R. de Borst. #
# Comments and suggestions can be sent to: #
# PyFEM-support@tue.nl #
# #
# The latest version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
from .Element import Element
from pyfem.util.transformations import toElementCoordinates, toGlobalCoordinates
from numpy import zeros, eye, array
class Spring ( Element ):
#Number of dofs per element
dofTypes = ['u','v']
def __init__ ( self, elnodes , props ):
Element.__init__( self, elnodes , props )
def __type__ ( self ):
return name
def getTangentStiffness ( self, elemdat ):
#Compute the current state vector
a = toElementCoordinates( elemdat.state , elemdat.coords )
Da = toElementCoordinates( elemdat.Dstate , elemdat.coords )
#Compute the elongation of the spring
elong = a[2]-a[0]
#Compute the force in the spring
Fs = elong * elemdat.props.k
#Compute the element internal force vector in the element coordinate system
elFint = array([-Fs,0.,Fs,0])
#Determine the element tangent stiffness in the element coordinate system
elKbar = zeros( (4,4) )
elKbar[:2,:2] = elemdat.props.k*eye(2)
elKbar[:2,2:] = -elemdat.props.k*eye(2)
elKbar[2:,:2] = elKbar[:2,2:]
elKbar[2:,2:] = elKbar[:2,:2]
#Rotate element tangent stiffness to the global coordinate system
elemdat.stiff = toGlobalCoordinates( elKbar, elemdat.coords )
elemdat.fint = toGlobalCoordinates( elFint, elemdat.coords )
#------------------------------------------------------------------
def getInternalForce ( self, elemdat ):
#Compute the current state vector
a = toElementCoordinates( elemdat.state , elemdat.coords )
Da = toElementCoordinates( elemdat.Dstate , elemdat.coords )
#Compute the elongation of the spring
elong = a[2]-a[0]
#Compute the force in the spring
Fs = elong * elemdat.props.k
#Compute the element internal force vector in the element coordinate system
elFint = array([-Fs,0.,Fs,0])
#Rotate element fint to the global coordinate system
elemdat.fint = toGlobalCoordinates( elFint, elemdat.coords )
|
aa7de59d0c744f6e8b496d56cf86e62b407f914f
|
776bdcfeaf7418fc0504e8f5e1703e31f2cc23cb
|
/riko/modules/fetchsitefeed.py
|
544dfc3ed13a734671ec38f0fa72ceb66acc1eb4
|
[
"MIT"
] |
permissive
|
nerevu/riko
|
f0c7176de2c7e94331daef50c44962140671885c
|
4d27102b605b8b4050ba566d5e0895d8d5f8b09a
|
refs/heads/master
| 2023-03-08T02:51:44.474765
| 2021-12-28T23:01:31
| 2021-12-28T23:01:31
| 60,261,863
| 1,747
| 90
|
MIT
| 2020-07-29T20:20:36
| 2016-06-02T12:22:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,707
|
py
|
fetchsitefeed.py
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.fetchsitefeed
~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functions for fetching the first RSS or Atom feed discovered in a web
site.
Uses a web site's auto-discovery information to find an RSS or Atom feed. If
multiple feeds are discovered, only the first one is fetched. If a site changes
their feed URL in the future, this module can discover the new URL for you (as
long as the site updates their auto-discovery links). For sites with only one
stream, this module provides a good alternative to the Fetch Feed module.
Also note that not all sites provide auto-discovery links on their web site's
home page.
This module provides a simpler alternative to the Feed Auto-Discovery Module.
The latter returns a list of information about all the feeds discovered in a
site, but (unlike this module) doesn't fetch the feed data itself.
Examples:
basic usage::
>>> from riko import get_path
>>> from riko.modules.fetchsitefeed import pipe
>>>
>>> title = 'Using NFC tags in the car'
>>> next(pipe(conf={'url': get_path('bbc.html')}))['title'] == title
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from . import processor
from riko import autorss
from riko.utils import gen_entries, get_abspath
from riko.parsers import parse_rss
from riko.bado import coroutine, return_value, io
OPTS = {"ftype": "none"}
logger = gogo.Gogo(__name__, monolog=True).logger
@coroutine
def async_parser(_, objconf, skip=False, **kwargs):
"""Asynchronously parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from meza.fntools import Objectify
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['title'])
... objconf = Objectify({'url': get_path('bbc.html')})
... d = async_parser(None, objconf, stream={})
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
Using NFC tags in the car
"""
if skip:
stream = kwargs["stream"]
else:
url = get_abspath(objconf.url)
rss = yield autorss.async_get_rss(url)
link = get_abspath(next(rss)["link"])
content = yield io.async_url_read(link)
parsed = parse_rss(content)
stream = gen_entries(parsed)
return_value(stream)
def parser(_, objconf, skip=False, **kwargs):
"""Parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from meza.fntools import Objectify
>>>
>>> objconf = Objectify({'url': get_path('bbc.html')})
>>> result = parser(None, objconf, stream={})
>>> next(result)['title'] == 'Using NFC tags in the car'
True
"""
if skip:
stream = kwargs["stream"]
else:
url = get_abspath(objconf.url)
rss = autorss.get_rss(url)
objconf.url = get_abspath(next(rss)["link"])
parsed = parse_rss(**objconf)
stream = gen_entries(parsed)
return stream
@processor(isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A source that fetches and parses the first feed found on a site.
Args:
item (dict): The entry to process (not used)
kwargs (dict): The keyword arguments passed to the wrapper.
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'url'.
url (str): The web site to fetch
Returns:
dict: twisted.internet.defer.Deferred an iterator of items
Examples:
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['title'])
... d = async_pipe(conf={'url': get_path('bbc.html')})
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... pass
... except SystemExit:
... pass
...
Using NFC tags in the car
"""
return async_parser(*args, **kwargs)
@processor(**OPTS)
def pipe(*args, **kwargs):
"""A source that fetches and parses the first feed found on a site.
Args:
item (dict): The entry to process (not used)
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'url'.
url (str): The web site to fetch
Yields:
dict: item
Examples:
>>> from riko import get_path
>>> title = 'Using NFC tags in the car'
>>> next(pipe(conf={'url': get_path('bbc.html')}))['title'] == title
True
"""
return parser(*args, **kwargs)
|
71b6400b0f94def17afd28ac367e4b1c4c803d4b
|
01184c7098e40569dd48219fbe3012321cf31244
|
/test/utils/namespace/_QT.py
|
ec6dd36d54cbf947e6af0443a0315a5e6401063e
|
[
"BSD-3-Clause"
] |
permissive
|
RDFLib/rdflib
|
1c81136f2656207042f81374540d8e1f02be28f5
|
077f4ac3abb3038b266f40dc95a8ccf9f4e9a84c
|
refs/heads/main
| 2023-08-30T11:22:00.041615
| 2023-08-29T21:31:43
| 2023-08-29T21:31:43
| 3,342,046
| 1,754
| 562
|
BSD-3-Clause
| 2023-09-12T14:58:35
| 2012-02-03T05:49:13
|
Python
|
UTF-8
|
Python
| false
| false
| 561
|
py
|
_QT.py
|
from rdflib.namespace import DefinedNamespace, Namespace
from rdflib.term import URIRef
class QT(DefinedNamespace):
_fail = True
_NS = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
QueryForm: URIRef # Super class of all query forms
QueryTest: URIRef # The class of query tests
data: URIRef # Optional: data for the query test
graphData: URIRef # Optional: named-graph only data for the query test (ie. not loaded into the background graph)
query: URIRef # The query to ask
queryForm: URIRef # None
|
51416a0d50eaca714eb8a17562dd7727cae004f1
|
3093dd966d4019f5e96f335677716afd6ffad7ac
|
/test/repos/fizzbuzz_service/fizzbuzz_service/checkers/fizzbuzz/fizzbuzz_checker.py
|
aa6f1613e1042b9cc56da3a5cce8188b41a5e66f
|
[
"MIT"
] |
permissive
|
sourcegraph/python-langserver
|
db62fda4e025d5a3241e33b63d5380f585cb444d
|
214b2717b44a5bd7aaf4ac077ac1b9054de064ec
|
refs/heads/master
| 2021-10-24T16:33:53.998475
| 2019-03-26T23:10:36
| 2019-03-26T23:10:36
| 66,597,766
| 118
| 11
|
MIT
| 2018-06-29T20:40:13
| 2016-08-25T22:32:13
|
Python
|
UTF-8
|
Python
| false
| false
| 243
|
py
|
fizzbuzz_checker.py
|
from ..fizz import fizz_checker
from ..buzz.buzz_checker import should_buzz
def should_fizzbuzz(number):
'''Whether or not "fizzbuzz" should be printed for this number'''
return fizz_checker.should_fizz(number) and should_buzz(number)
|
b13b2c3379ff847d0c8fe2e7ed2bf5af8e56668c
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-dris/huaweicloudsdkdris/v1/model/target_pos.py
|
6bcce0d1490ffa61e9fc89ffdf4b56f10db32883
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,985
|
py
|
target_pos.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class TargetPos:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'left_top_x': 'int',
'left_top_y': 'int',
'right_bottom_x': 'int',
'right_bottom_y': 'int'
}
attribute_map = {
'left_top_x': 'left_top_x',
'left_top_y': 'left_top_y',
'right_bottom_x': 'right_bottom_x',
'right_bottom_y': 'right_bottom_y'
}
def __init__(self, left_top_x=None, left_top_y=None, right_bottom_x=None, right_bottom_y=None):
"""TargetPos
The model defined in huaweicloud sdk
:param left_top_x: **参数说明**:目标区域框左上X坐标。
:type left_top_x: int
:param left_top_y: **参数说明**:目标区域框左上Y坐标。
:type left_top_y: int
:param right_bottom_x: **参数说明**:目标区域框右下X坐标。
:type right_bottom_x: int
:param right_bottom_y: **参数说明**:目标区域框右下Y坐标。
:type right_bottom_y: int
"""
self._left_top_x = None
self._left_top_y = None
self._right_bottom_x = None
self._right_bottom_y = None
self.discriminator = None
if left_top_x is not None:
self.left_top_x = left_top_x
if left_top_y is not None:
self.left_top_y = left_top_y
if right_bottom_x is not None:
self.right_bottom_x = right_bottom_x
if right_bottom_y is not None:
self.right_bottom_y = right_bottom_y
@property
def left_top_x(self):
"""Gets the left_top_x of this TargetPos.
**参数说明**:目标区域框左上X坐标。
:return: The left_top_x of this TargetPos.
:rtype: int
"""
return self._left_top_x
@left_top_x.setter
def left_top_x(self, left_top_x):
"""Sets the left_top_x of this TargetPos.
**参数说明**:目标区域框左上X坐标。
:param left_top_x: The left_top_x of this TargetPos.
:type left_top_x: int
"""
self._left_top_x = left_top_x
@property
def left_top_y(self):
"""Gets the left_top_y of this TargetPos.
**参数说明**:目标区域框左上Y坐标。
:return: The left_top_y of this TargetPos.
:rtype: int
"""
return self._left_top_y
@left_top_y.setter
def left_top_y(self, left_top_y):
"""Sets the left_top_y of this TargetPos.
**参数说明**:目标区域框左上Y坐标。
:param left_top_y: The left_top_y of this TargetPos.
:type left_top_y: int
"""
self._left_top_y = left_top_y
@property
def right_bottom_x(self):
"""Gets the right_bottom_x of this TargetPos.
**参数说明**:目标区域框右下X坐标。
:return: The right_bottom_x of this TargetPos.
:rtype: int
"""
return self._right_bottom_x
@right_bottom_x.setter
def right_bottom_x(self, right_bottom_x):
"""Sets the right_bottom_x of this TargetPos.
**参数说明**:目标区域框右下X坐标。
:param right_bottom_x: The right_bottom_x of this TargetPos.
:type right_bottom_x: int
"""
self._right_bottom_x = right_bottom_x
@property
def right_bottom_y(self):
"""Gets the right_bottom_y of this TargetPos.
**参数说明**:目标区域框右下Y坐标。
:return: The right_bottom_y of this TargetPos.
:rtype: int
"""
return self._right_bottom_y
@right_bottom_y.setter
def right_bottom_y(self, right_bottom_y):
"""Sets the right_bottom_y of this TargetPos.
**参数说明**:目标区域框右下Y坐标。
:param right_bottom_y: The right_bottom_y of this TargetPos.
:type right_bottom_y: int
"""
self._right_bottom_y = right_bottom_y
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TargetPos):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
8b1f8a38cc30b72aaf340e74e2925779f78fdc3e
|
83963c19fd120dcc7498b726cc56de7fbb900a47
|
/osxphotos/cli/verbose.py
|
64e12ff3f005aa22c26487904426ffb412a41e8e
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
RhetTbull/osxphotos
|
55ad4f1257bcd26bb3fbadde6ce5dd59c0917354
|
2cb5a4d18a27be6ccf68f5f35abd39418d238016
|
refs/heads/main
| 2023-09-02T18:11:06.227191
| 2023-09-02T16:06:51
| 2023-09-02T16:06:51
| 192,160,985
| 1,287
| 93
|
MIT
| 2023-09-14T14:10:58
| 2019-06-16T07:07:49
|
Python
|
UTF-8
|
Python
| false
| false
| 8,732
|
py
|
verbose.py
|
"""helper functions for printing verbose output"""
from __future__ import annotations
import os
from datetime import datetime
from typing import IO, Any, Callable, Optional
import click
from rich.console import Console
from rich.theme import Theme
from .click_rich_echo import (
rich_click_echo,
set_rich_console,
set_rich_theme,
set_rich_timestamp,
)
from .color_themes import get_theme
from .common import CLI_COLOR_ERROR, CLI_COLOR_WARNING, time_stamp
# set to 1 if running tests
OSXPHOTOS_IS_TESTING = bool(os.getenv("OSXPHOTOS_IS_TESTING", default=False))
# include error/warning emoji's in verbose output
ERROR_EMOJI = True
# global to store verbose level
__verbose_level = 1
# global verbose function
__verbose_function: Callable[..., None] | None = None
__all__ = [
"get_verbose_console",
"get_verbose_level",
"set_verbose_level",
"verbose_print",
"verbose",
]
def _reset_verbose_globals():
"""Reset globals for testing"""
global __verbose_level
global __verbose_function
global _console
__verbose_level = 1
__verbose_function = None
_console = _Console()
def noop(*args, **kwargs):
"""no-op function"""
pass
def verbose(*args, level: int = 1):
"""Print verbose output
Args:
*args: arguments to pass to verbose function for printing
level: verbose level; if level > get_verbose_level(), output is suppressed
"""
# Notes:
# Normally you should use verbose_print() to get the verbose function instead of calling this directly
# This is here so that verbose can be directly imported and used in other modules without calling verbose_print()
# Use of verbose_print() will set the verbose function so that calling verbose() will work as expected
global __verbose_function
if __verbose_function is None:
return
__verbose_function(*args, level=level)
def set_verbose_level(level: int):
"""Set verbose level"""
global __verbose_level
global __verbose_function
__verbose_level = level
if level > 0 and __verbose_function is None:
# if verbose level set but verbose function not set, set it to default
# verbose_print sets the global __verbose_function
__verbose_function = _verbose_print_function(level)
elif level == 0 and __verbose_function is not None:
# if verbose level set to 0 but verbose function is set, set it to no-op
__verbose_function = noop
def get_verbose_level() -> int:
"""Get verbose level"""
global __verbose_level
return __verbose_level
class _Console:
"""Store console object for verbose output"""
def __init__(self):
self._console: Optional[Console] = None
@property
def console(self):
return self._console
@console.setter
def console(self, console: Console):
self._console = console
_console = _Console()
def get_verbose_console(theme: Optional[Theme] = None) -> Console:
"""Get console object or create one if not already created
Args:
theme: optional rich.theme.Theme object to use for formatting
Returns:
Console object
"""
global _console
if _console.console is None:
_console.console = Console(force_terminal=True, theme=theme)
return _console.console
def verbose_print(
verbose: int = 1,
timestamp: bool = False,
rich: bool = True,
theme: str | None = None,
highlight: bool = False,
file: Optional[IO] = None,
**kwargs: Any,
) -> Callable[..., None]:
"""Configure verbose printing and create verbose function to print output
Args:
verbose: if > 0, returns verbose print function otherwise returns no-op function; the value of verbose is the verbose level
timestamp: if True, includes timestamp in verbose output
rich: use rich.print instead of click.echo
highlight: if True, use automatic rich.print highlighting
theme: optional name of theme to use for formatting (will be loaded by get_theme())
file: optional file handle to write to instead of stdout
kwargs: any extra arguments to pass to click.echo or rich.print depending on whether rich==True
Returns:
function to print output
Note: sets the console for rich_echo to be the same as the console used for verbose output
"""
set_verbose_level(verbose)
color_theme = get_theme(theme)
verbose_function = _verbose_print_function(
verbose=verbose,
timestamp=timestamp,
rich=rich,
theme=color_theme,
highlight=highlight,
file=file,
**kwargs,
)
# set console for rich_echo to be same as for verbose
set_rich_console(get_verbose_console())
set_rich_theme(color_theme)
set_rich_timestamp(timestamp)
# set global verbose function to match
global __verbose_function
__verbose_function = verbose_function
return verbose_function
def _verbose_print_function(
verbose: bool = True,
timestamp: bool = False,
rich: bool = False,
highlight: bool = False,
theme: Optional[Theme] = None,
file: Optional[IO] = None,
**kwargs: Any,
) -> Callable[..., None]:
"""Create verbose function to print output
Args:
verbose: if > 0, returns verbose print function otherwise returns no-op function; the value of verbose is the verbose level
timestamp: if True, includes timestamp in verbose output
rich: use rich.print instead of click.echo
highlight: if True, use automatic rich.print highlighting
theme: optional rich.theme.Theme object to use for formatting
file: optional file handle to write to instead of stdout
kwargs: any extra arguments to pass to click.echo or rich.print depending on whether rich==True
Returns:
function to print output
"""
# configure console even if verbose is False so that rich_echo will work correctly
global _console
if file:
_console.console = Console(theme=theme, file=file)
else:
_console.console = Console(theme=theme, width=10_000)
if not verbose:
return noop
# closure to capture timestamp
def verbose_(*args, level: int = 1):
"""print output if verbose flag set"""
if get_verbose_level() < level:
return
styled_args = []
timestamp_str = f"{str(datetime.now())} -- " if timestamp else ""
for arg in args:
if isinstance(arg, str):
arg = timestamp_str + arg
if "error" in arg.lower():
arg = click.style(arg, fg=CLI_COLOR_ERROR)
elif "warning" in arg.lower():
arg = click.style(arg, fg=CLI_COLOR_WARNING)
styled_args.append(arg)
click.echo(*styled_args, **kwargs, file=file or None)
def rich_verbose_(*args, level: int = 1):
"""rich.print output if verbose flag set"""
if get_verbose_level() < level:
return
global ERROR_EMOJI
timestamp_str = time_stamp() if timestamp else ""
new_args = []
for arg in args:
if isinstance(arg, str):
if "error" in arg.lower():
arg = f"[error]{arg}"
if ERROR_EMOJI:
arg = f":cross_mark-emoji: {arg}"
elif "warning" in arg.lower():
arg = f"[warning]{arg}"
if ERROR_EMOJI:
arg = f":warning-emoji: {arg}"
arg = timestamp_str + arg
new_args.append(arg)
_console.console.print(*new_args, highlight=highlight, **kwargs)
def rich_verbose_testing_(*args, level: int = 1):
"""print output if verbose flag set using rich.print"""
if get_verbose_level() < level:
return
global ERROR_EMOJI
timestamp_str = time_stamp() if timestamp else ""
new_args = []
for arg in args:
if isinstance(arg, str):
if "error" in arg.lower():
arg = f"[error]{arg}"
if ERROR_EMOJI:
arg = f":cross_mark-emoji: {arg}"
elif "warning" in arg.lower():
arg = f"[warning]{arg}"
if ERROR_EMOJI:
arg = f":warning-emoji: {arg}"
arg = timestamp_str + arg
new_args.append(arg)
rich_click_echo(*new_args, theme=theme, **kwargs)
if rich and not OSXPHOTOS_IS_TESTING:
return rich_verbose_
elif rich:
return rich_verbose_testing_
else:
return verbose_
|
f47292324269e352c52288ae770524fa16e7b536
|
a64eeba4575eee849b459dab9c7000350ee636f1
|
/mediapipe/tasks/python/components/containers/detections.py
|
833552c42df055d06e4a2ffbffd6197c8177de60
|
[
"Apache-2.0",
"dtoa"
] |
permissive
|
google/mediapipe
|
0b6b56aff8bacc7b680c205f0788f1b49dd33f5e
|
007824594bf1d07c7c1467df03a43886f8a4b3ad
|
refs/heads/master
| 2023-09-01T16:11:21.218234
| 2023-09-01T11:55:21
| 2023-09-01T11:57:34
| 191,820,100
| 23,940
| 5,164
|
Apache-2.0
| 2023-09-14T09:01:36
| 2019-06-13T19:16:41
|
C++
|
UTF-8
|
Python
| false
| false
| 5,843
|
py
|
detections.py
|
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detections data class."""
import dataclasses
from typing import Any, List, Optional
from mediapipe.framework.formats import detection_pb2
from mediapipe.framework.formats import location_data_pb2
from mediapipe.tasks.python.components.containers import bounding_box as bounding_box_module
from mediapipe.tasks.python.components.containers import category as category_module
from mediapipe.tasks.python.components.containers import keypoint as keypoint_module
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
_DetectionListProto = detection_pb2.DetectionList
_DetectionProto = detection_pb2.Detection
_LocationDataProto = location_data_pb2.LocationData
@dataclasses.dataclass
class Detection:
"""Represents one detected object in the object detector's results.
Attributes:
bounding_box: A BoundingBox object.
categories: A list of Category objects.
keypoints: A list of NormalizedKeypoint objects.
"""
bounding_box: bounding_box_module.BoundingBox
categories: List[category_module.Category]
keypoints: Optional[List[keypoint_module.NormalizedKeypoint]] = None
@doc_controls.do_not_generate_docs
def to_pb2(self) -> _DetectionProto:
"""Generates a Detection protobuf object."""
labels = []
label_ids = []
scores = []
display_names = []
relative_keypoints = []
for category in self.categories:
scores.append(category.score)
if category.index:
label_ids.append(category.index)
if category.category_name:
labels.append(category.category_name)
if category.display_name:
display_names.append(category.display_name)
if self.keypoints:
for keypoint in self.keypoints:
relative_keypoint_proto = _LocationDataProto.RelativeKeypoint()
if keypoint.x:
relative_keypoint_proto.x = keypoint.x
if keypoint.y:
relative_keypoint_proto.y = keypoint.y
if keypoint.label:
relative_keypoint_proto.keypoint_label = keypoint.label
if keypoint.score:
relative_keypoint_proto.score = keypoint.score
relative_keypoints.append(relative_keypoint_proto)
return _DetectionProto(
label=labels,
label_id=label_ids,
score=scores,
display_name=display_names,
location_data=_LocationDataProto(
format=_LocationDataProto.Format.BOUNDING_BOX,
bounding_box=self.bounding_box.to_pb2(),
relative_keypoints=relative_keypoints,
),
)
@classmethod
@doc_controls.do_not_generate_docs
def create_from_pb2(cls, pb2_obj: _DetectionProto) -> 'Detection':
"""Creates a `Detection` object from the given protobuf object."""
categories = []
keypoints = []
for idx, score in enumerate(pb2_obj.score):
categories.append(
category_module.Category(
score=score,
index=pb2_obj.label_id[idx]
if idx < len(pb2_obj.label_id)
else None,
category_name=pb2_obj.label[idx]
if idx < len(pb2_obj.label)
else None,
display_name=pb2_obj.display_name[idx]
if idx < len(pb2_obj.display_name)
else None,
)
)
if pb2_obj.location_data.relative_keypoints:
for idx, elem in enumerate(pb2_obj.location_data.relative_keypoints):
keypoints.append(
keypoint_module.NormalizedKeypoint(
x=elem.x,
y=elem.y,
label=elem.keypoint_label,
score=elem.score,
)
)
return Detection(
bounding_box=bounding_box_module.BoundingBox.create_from_pb2(
pb2_obj.location_data.bounding_box
),
categories=categories,
keypoints=keypoints,
)
def __eq__(self, other: Any) -> bool:
"""Checks if this object is equal to the given object.
Args:
other: The object to be compared with.
Returns:
True if the objects are equal.
"""
if not isinstance(other, Detection):
return False
return self.to_pb2().__eq__(other.to_pb2())
@dataclasses.dataclass
class DetectionResult:
"""Represents the list of detected objects.
Attributes:
detections: A list of `Detection` objects.
"""
detections: List[Detection]
@doc_controls.do_not_generate_docs
def to_pb2(self) -> _DetectionListProto:
"""Generates a DetectionList protobuf object."""
return _DetectionListProto(
detection=[detection.to_pb2() for detection in self.detections])
@classmethod
@doc_controls.do_not_generate_docs
def create_from_pb2(cls, pb2_obj: _DetectionListProto) -> 'DetectionResult':
"""Creates a `DetectionResult` object from the given protobuf object."""
return DetectionResult(detections=[
Detection.create_from_pb2(detection) for detection in pb2_obj.detection
])
def __eq__(self, other: Any) -> bool:
"""Checks if this object is equal to the given object.
Args:
other: The object to be compared with.
Returns:
True if the objects are equal.
"""
if not isinstance(other, DetectionResult):
return False
return self.to_pb2().__eq__(other.to_pb2())
|
106254c30f22d3eae63b944e26754ae9425776ba
|
39bbabca04b639979145ebd373e45f83cf3c6f10
|
/ambf_ros_modules/ambf_client/python/tests/rl_test.py
|
d531f2a6ae4a569fa58fb0c9fb1b326a5669a573
|
[] |
no_license
|
WPI-AIM/ambf
|
5a26fff8fa5f150dfdad597562aac42da126cb01
|
189cb5ec8b7ae03b12c0e215d5f67c26c31f6ffe
|
refs/heads/ambf-2.0
| 2023-08-05T07:26:05.634330
| 2023-07-26T19:57:11
| 2023-07-26T19:57:11
| 168,765,517
| 137
| 71
| null | 2023-05-24T18:07:23
| 2019-02-01T21:58:25
|
C++
|
UTF-8
|
Python
| false
| false
| 4,593
|
py
|
rl_test.py
|
#!/usr/bin/env python
# //==============================================================================
# /*
# Software License Agreement (BSD License)
# Copyright (c) 2019, AMBF
# (http://practicepoint.wpi.edu)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of authors nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# \author <http://practicepoint.wpi.edu>
# \author <amunawar@wpi.edu>
# \author Adnan Munawar
# \version 0.1
# */
# //==============================================================================
import numpy as np
from ambf_comm import AmbfEnv
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
from rl.agents import DDPGAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
ENV_NAME = 'Torus'
# Get the environment and extract the number of actions.
env = AmbfEnv()
env.make(ENV_NAME)
env.reset()
assert len(env.action_space.shape) == 1
nb_actions = env.action_space.shape[0]
# Next, we build a very simple model.
actor = Sequential()
actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
actor.add(Dense(16))
actor.add(Activation('sigmoid'))
actor.add(Dense(16))
actor.add(Activation('sigmoid'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('linear'))
print(actor.summary())
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Concatenate()([action_input, flattened_observation])
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
print(critic.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3)
agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
agent.fit(env, nb_steps=50000, visualize=False, verbose=1, nb_max_episode_steps=200)
# After training is done, we save the final weights.
agent.save_weights('ddpg_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
agent.test(env, nb_episodes=5, visualize=True, nb_max_episode_steps=200)
|
9a017ae41b9a85c298937326cd26b94bfb472253
|
c43b5835b4499f4e6d6fa4efda9546dc67ae0767
|
/sfepy/postprocess/plot_facets.py
|
20233e2ba940e05d29a10a814c27a16505f22665
|
[
"BSD-3-Clause"
] |
permissive
|
sfepy/sfepy
|
4b74e7839b5e7b5e8d90e19ab6e90a068fe33df4
|
0c2d1690e764b601b2687be1e4261b82207ca366
|
refs/heads/master
| 2023-09-04T22:07:28.041123
| 2023-08-28T14:47:50
| 2023-08-28T14:47:50
| 802,525
| 651
| 188
|
BSD-3-Clause
| 2023-09-12T07:28:19
| 2010-07-28T09:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,294
|
py
|
plot_facets.py
|
"""
Functions to visualize the geometry elements and numbering and orientation of
their facets (edges and faces).
The standard geometry elements can be plotted by running::
$ python sfepy/postprocess/plot_facets.py
"""
from __future__ import absolute_import
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.linalg import (get_perpendiculars, normalize_vectors,
make_axis_rotation_matrix)
from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs
import six
from six.moves import range
def plot_geometry(ax, gel):
"""
Plot a geometry element as a wireframe.
"""
ax = plot_mesh(ax, gel.coors, [gel.conn], gel.edges)
ax = plot_global_dofs(ax, gel.coors, [gel.conn])
return ax
def plot_edges(ax, gel, length):
"""
Plot edges of a geometry element as numbered arrows.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
if gel.edges is None: return ax
l2 = 0.5 * length
for ii, edge in enumerate(gel.edges):
cc = gel.coors[edge]
centre = 0.5 * cc.sum(axis=0)
vdir = (cc - centre)
normalize_vectors(vdir)
cc = l2 * vdir + centre
draw_arrow(ax, cc, length=0.3*length, linewidth=3, color='b')
ax.text(*centre, s=ii,
color='b', fontsize=10, weight='light')
return ax
def plot_faces(ax, gel, radius, n_point):
"""
Plot faces of a 3D geometry element as numbered oriented arcs. An arc
centre corresponds to the first node of a face. It points from the first
edge towards the last edge of the face.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
if dim < 3: return ax
for ii, face in enumerate(gel.faces):
cc = gel.coors[face]
t1 = cc[1, :] - cc[0, :]
t2 = cc[-1, :] - cc[0, :]
n = nm.cross(t1, t2)
nt1 = nm.linalg.norm(t1)
nt2 = nm.linalg.norm(t2)
angle = nm.arccos(nm.dot(t1, t2) / (nt1 * nt2))
da = angle / (n_point - 1)
mtx = make_axis_rotation_matrix(n, da)
rt = cc[0] + radius * t1 / nt1
coors = [rt]
for ip in range(n_point - 1):
rt = nm.dot(mtx.T, (rt - cc[0])) + cc[0]
coors.append(rt)
coors = nm.array(coors, dtype=nm.float64)
centre = coors.sum(axis=0) / coors.shape[0]
draw_arrow(ax, coors, length=0.3*radius, linewidth=3, color='r')
ax.text(*centre, s=ii,
color='r', fontsize=10, weight='light')
return ax
def draw_arrow(ax, coors, angle=20.0, length=0.3, **kwargs):
"""
Draw a line ended with an arrow head, in 2D or 3D.
"""
color = kwargs.get('color', 'b')
c0 = coors[-2]
c1 = coors[-1]
vd = c1 - c0
nvd = nm.linalg.norm(vd)
vd /= nvd
c0 = c1 - length * vd
ps = get_perpendiculars(vd)
rangle = nm.deg2rad(min(angle, 60.0))
plength = length * nm.arctan(rangle)
if coors.shape[1] == 2:
from matplotlib.patches import Polygon
cx, cy = coors[:, 0], coors[:, 1]
ax.plot(cx, cy, **kwargs)
p0 = c0 + plength * ps
p1 = c0 - plength * ps
pol = Polygon([p0, p1, c1], color=color)
ax.add_artist(pol)
else:
import mpl_toolkits.mplot3d as plt3
cx, cy, cz = coors[:, 0], coors[:, 1], coors[:, 2]
ax.plot(cx, cy, cz, **kwargs)
p00 = c0 + plength * ps[0]
p01 = c0 - plength * ps[0]
p10 = c0 + plength * ps[1]
p11 = c0 - plength * ps[1]
arr = plt3.art3d.Poly3DCollection([[p00, p01, c1],
[p10, p11, c1]], color=color)
ax.add_collection3d(arr)
if __name__ == '__main__':
from sfepy.discrete.fem.geometry_element import (GeometryElement,
geometry_data)
for key, gd in six.iteritems(geometry_data):
if key == '1_2' : continue
gel = GeometryElement(key)
ax = plot_geometry(None, gel)
ax = plot_edges(ax, gel, length=0.2)
ax = plot_faces(ax, gel, radius=0.3, n_point=5)
dd = 0.05
ax.set_xlim([-dd, 1.0 + dd])
ax.set_ylim([-dd, 1.0 + dd])
if gel.dim == 3:
ax.set_zlim([-dd, 1.0 + dd])
plt.show()
|
4418168eccde5a568c8187276231a32d31b6f18e
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/angle/third_party/glmark2/src/waflib/ansiterm.py
|
1d8bc78f1605f6c661e558cd828cfc8aa2e2cf0a
|
[
"GPL-3.0-only",
"BSD-3-Clause",
"LicenseRef-scancode-x11-opengl",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 9,153
|
py
|
ansiterm.py
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,sys
from waflib import Utils
wlock=Utils.threading.Lock()
try:
from ctypes import Structure,windll,c_short,c_ushort,c_ulong,c_int,byref,c_wchar,POINTER,c_long
except ImportError:
class AnsiTerm(object):
def __init__(self,stream):
self.stream=stream
try:
self.errors=self.stream.errors
except AttributeError:
pass
self.encoding=self.stream.encoding
def write(self,txt):
try:
wlock.acquire()
self.stream.write(txt)
self.stream.flush()
finally:
wlock.release()
def fileno(self):
return self.stream.fileno()
def flush(self):
self.stream.flush()
def isatty(self):
return self.stream.isatty()
else:
class COORD(Structure):
_fields_=[("X",c_short),("Y",c_short)]
class SMALL_RECT(Structure):
_fields_=[("Left",c_short),("Top",c_short),("Right",c_short),("Bottom",c_short)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_=[("Size",COORD),("CursorPosition",COORD),("Attributes",c_ushort),("Window",SMALL_RECT),("MaximumWindowSize",COORD)]
class CONSOLE_CURSOR_INFO(Structure):
_fields_=[('dwSize',c_ulong),('bVisible',c_int)]
try:
_type=unicode
except NameError:
_type=str
to_int=lambda number,default:number and int(number)or default
STD_OUTPUT_HANDLE=-11
STD_ERROR_HANDLE=-12
windll.kernel32.GetStdHandle.argtypes=[c_ulong]
windll.kernel32.GetStdHandle.restype=c_ulong
windll.kernel32.GetConsoleScreenBufferInfo.argtypes=[c_ulong,POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
windll.kernel32.GetConsoleScreenBufferInfo.restype=c_long
windll.kernel32.SetConsoleTextAttribute.argtypes=[c_ulong,c_ushort]
windll.kernel32.SetConsoleTextAttribute.restype=c_long
windll.kernel32.FillConsoleOutputCharacterW.argtypes=[c_ulong,c_wchar,c_ulong,POINTER(COORD),POINTER(c_ulong)]
windll.kernel32.FillConsoleOutputCharacterW.restype=c_long
windll.kernel32.FillConsoleOutputAttribute.argtypes=[c_ulong,c_ushort,c_ulong,POINTER(COORD),POINTER(c_ulong)]
windll.kernel32.FillConsoleOutputAttribute.restype=c_long
windll.kernel32.SetConsoleCursorPosition.argtypes=[c_ulong,POINTER(COORD)]
windll.kernel32.SetConsoleCursorPosition.restype=c_long
windll.kernel32.SetConsoleCursorInfo.argtypes=[c_ulong,POINTER(CONSOLE_CURSOR_INFO)]
windll.kernel32.SetConsoleCursorInfo.restype=c_long
class AnsiTerm(object):
def __init__(self,s):
self.stream=s
try:
self.errors=s.errors
except AttributeError:
pass
self.encoding=s.encoding
self.cursor_history=[]
handle=(s.fileno()==2)and STD_ERROR_HANDLE or STD_OUTPUT_HANDLE
self.hconsole=windll.kernel32.GetStdHandle(handle)
self._sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
self._csinfo=CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleCursorInfo(self.hconsole,byref(self._csinfo))
self._orig_sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
r=windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(self._orig_sbinfo))
self._isatty=r==1
def screen_buffer_info(self):
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(self._sbinfo))
return self._sbinfo
def clear_line(self,param):
mode=param and int(param)or 0
sbinfo=self.screen_buffer_info()
if mode==1:
line_start=COORD(0,sbinfo.CursorPosition.Y)
line_length=sbinfo.Size.X
elif mode==2:
line_start=COORD(sbinfo.CursorPosition.X,sbinfo.CursorPosition.Y)
line_length=sbinfo.Size.X-sbinfo.CursorPosition.X
else:
line_start=sbinfo.CursorPosition
line_length=sbinfo.Size.X-sbinfo.CursorPosition.X
chars_written=c_ulong()
windll.kernel32.FillConsoleOutputCharacterW(self.hconsole,c_wchar(' '),line_length,line_start,byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole,sbinfo.Attributes,line_length,line_start,byref(chars_written))
def clear_screen(self,param):
mode=to_int(param,0)
sbinfo=self.screen_buffer_info()
if mode==1:
clear_start=COORD(0,0)
clear_length=sbinfo.CursorPosition.X*sbinfo.CursorPosition.Y
elif mode==2:
clear_start=COORD(0,0)
clear_length=sbinfo.Size.X*sbinfo.Size.Y
windll.kernel32.SetConsoleCursorPosition(self.hconsole,clear_start)
else:
clear_start=sbinfo.CursorPosition
clear_length=((sbinfo.Size.X-sbinfo.CursorPosition.X)+sbinfo.Size.X*(sbinfo.Size.Y-sbinfo.CursorPosition.Y))
chars_written=c_ulong()
windll.kernel32.FillConsoleOutputCharacterW(self.hconsole,c_wchar(' '),clear_length,clear_start,byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole,sbinfo.Attributes,clear_length,clear_start,byref(chars_written))
def push_cursor(self,param):
sbinfo=self.screen_buffer_info()
self.cursor_history.append(sbinfo.CursorPosition)
def pop_cursor(self,param):
if self.cursor_history:
old_pos=self.cursor_history.pop()
windll.kernel32.SetConsoleCursorPosition(self.hconsole,old_pos)
def set_cursor(self,param):
y,sep,x=param.partition(';')
x=to_int(x,1)-1
y=to_int(y,1)-1
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,x),sbinfo.Size.X),min(max(0,y),sbinfo.Size.Y))
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def set_column(self,param):
x=to_int(param,1)-1
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,x),sbinfo.Size.X),sbinfo.CursorPosition.Y)
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def move_cursor(self,x_offset=0,y_offset=0):
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,sbinfo.CursorPosition.X+x_offset),sbinfo.Size.X),min(max(0,sbinfo.CursorPosition.Y+y_offset),sbinfo.Size.Y))
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def move_up(self,param):
self.move_cursor(y_offset=-to_int(param,1))
def move_down(self,param):
self.move_cursor(y_offset=to_int(param,1))
def move_left(self,param):
self.move_cursor(x_offset=-to_int(param,1))
def move_right(self,param):
self.move_cursor(x_offset=to_int(param,1))
def next_line(self,param):
sbinfo=self.screen_buffer_info()
self.move_cursor(x_offset=-sbinfo.CursorPosition.X,y_offset=to_int(param,1))
def prev_line(self,param):
sbinfo=self.screen_buffer_info()
self.move_cursor(x_offset=-sbinfo.CursorPosition.X,y_offset=-to_int(param,1))
def rgb2bgr(self,c):
return((c&1)<<2)|(c&2)|((c&4)>>2)
def set_color(self,param):
cols=param.split(';')
sbinfo=self.screen_buffer_info()
attr=sbinfo.Attributes
for c in cols:
c=to_int(c,0)
if 29<c<38:
attr=(attr&0xfff0)|self.rgb2bgr(c-30)
elif 39<c<48:
attr=(attr&0xff0f)|(self.rgb2bgr(c-40)<<4)
elif c==0:
attr=self._orig_sbinfo.Attributes
elif c==1:
attr|=0x08
elif c==4:
attr|=0x80
elif c==7:
attr=(attr&0xff88)|((attr&0x70)>>4)|((attr&0x07)<<4)
windll.kernel32.SetConsoleTextAttribute(self.hconsole,attr)
def show_cursor(self,param):
self._csinfo.bVisible=1
windll.kernel32.SetConsoleCursorInfo(self.hconsole,byref(self._csinfo))
def hide_cursor(self,param):
self._csinfo.bVisible=0
windll.kernel32.SetConsoleCursorInfo(self.hconsole,byref(self._csinfo))
ansi_command_table={'A':move_up,'B':move_down,'C':move_right,'D':move_left,'E':next_line,'F':prev_line,'G':set_column,'H':set_cursor,'f':set_cursor,'J':clear_screen,'K':clear_line,'h':show_cursor,'l':hide_cursor,'m':set_color,'s':push_cursor,'u':pop_cursor,}
ansi_tokens=re.compile('(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))')
def write(self,text):
try:
wlock.acquire()
if self._isatty:
for param,cmd,txt in self.ansi_tokens.findall(text):
if cmd:
cmd_func=self.ansi_command_table.get(cmd)
if cmd_func:
cmd_func(self,param)
else:
self.writeconsole(txt)
else:
self.stream.write(text)
finally:
wlock.release()
def writeconsole(self,txt):
chars_written=c_ulong()
writeconsole=windll.kernel32.WriteConsoleA
if isinstance(txt,_type):
writeconsole=windll.kernel32.WriteConsoleW
done=0
todo=len(txt)
chunk=32<<10
while todo!=0:
doing=min(chunk,todo)
buf=txt[done:done+doing]
r=writeconsole(self.hconsole,buf,doing,byref(chars_written),None)
if r==0:
chunk>>=1
continue
done+=doing
todo-=doing
def fileno(self):
return self.stream.fileno()
def flush(self):
pass
def isatty(self):
return self._isatty
if sys.stdout.isatty()or sys.stderr.isatty():
handle=sys.stdout.isatty()and STD_OUTPUT_HANDLE or STD_ERROR_HANDLE
console=windll.kernel32.GetStdHandle(handle)
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
def get_term_cols():
windll.kernel32.GetConsoleScreenBufferInfo(console,byref(sbinfo))
return sbinfo.Size.X-1
try:
import struct,fcntl,termios
except ImportError:
pass
else:
if(sys.stdout.isatty()or sys.stderr.isatty())and os.environ.get('TERM','')not in('dumb','emacs'):
FD=sys.stdout.isatty()and sys.stdout.fileno()or sys.stderr.fileno()
def fun():
return struct.unpack("HHHH",fcntl.ioctl(FD,termios.TIOCGWINSZ,struct.pack("HHHH",0,0,0,0)))[1]
try:
fun()
except Exception as e:
pass
else:
get_term_cols=fun
|
9dd2f689fc6b0813cbd9503dec82fd9f46f9135a
|
8a62bbff9378187a898f336532bb49de18cb88e4
|
/2020-librispeech-data-prepare/14_create_trans_raw.py
|
cb9f036cce716a35bccf739ac4b0209f61d1a221
|
[] |
no_license
|
rwth-i6/returnn-experiments
|
e2cdecb67febe646d702282ced8c290f1dd8edd0
|
a46021329c030af361e0becb25ea92afca9610ce
|
refs/heads/master
| 2023-06-08T08:56:11.891782
| 2023-05-30T12:46:45
| 2023-05-30T12:46:45
| 67,426,132
| 159
| 52
| null | 2023-05-30T12:46:46
| 2016-09-05T14:07:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,222
|
py
|
14_create_trans_raw.py
|
#!/usr/bin/env python3
import better_exchook
import os
import subprocess
import tempfile
from zipfile import ZipFile, ZipInfo
import contextlib
my_dir = os.path.dirname(os.path.abspath(__file__))
Parts = [
"dev-clean", "dev-other",
"test-clean", "test-other",
"train-clean-100", "train-clean-360", "train-other-500"]
def generic_open(filename, mode="r"):
"""
Wrapper around :func:`open`.
Automatically wraps :func:`gzip.open` if filename ends with ``".gz"``.
:param str filename:
:param str mode: text mode by default
:rtype: typing.TextIO|typing.BinaryIO
"""
if filename.endswith(".gz"):
import gzip
if "b" not in mode:
mode += "t"
return gzip.open(filename, mode)
return open(filename, mode)
def sh(*args):
print("$ %s" % " ".join(args))
subprocess.check_call(args)
@contextlib.contextmanager
def pushd(d):
"""
:param str d: directory
"""
assert os.path.isdir(d)
old_working_dir = os.getcwd()
os.chdir(d)
yield
os.chdir(old_working_dir)
def create_librispeech_txt(dataset_dir):
"""
Create separate txt files to be used with :class:`returnn.OggZipDataset`.
Example:
https://github.com/rwth-i6/returnn-experiments/blob/master/2019-asr-e2e-trafo-vs-lstm/tedlium2/full-setup/03_convert_to_ogg.py
:param str dataset_dir:
"""
output_dir = dataset_dir
with pushd(output_dir):
for part in Parts:
dest_meta_filename_gz = "%s.txt.gz" % part
if os.path.exists(dest_meta_filename_gz):
print("File exists:", dest_meta_filename_gz)
continue
dest_meta_filename = "%s.txt" % part
dest_meta_file = open(dest_meta_filename, "w")
dest_meta_file.write("[\n")
zip_filename = "%s/%s.zip" % (dataset_dir, part)
assert os.path.exists(zip_filename)
zip_file = ZipFile(zip_filename)
assert zip_file.filelist
count_lines = 0
for info in zip_file.filelist:
assert isinstance(info, ZipInfo)
path = info.filename.split("/")
if path[0].startswith(part):
subdir = path[0] # e.g. "train-clean-100"
assert subdir == part
if path[-1].endswith(".trans.txt"):
print("read", part, path[-1])
for line in zip_file.read(info).decode("utf8").splitlines():
seq_name, txt = line.split(" ", 1) # seq_name is e.g. "19-198-0000"
count_lines += 1
ogg_filename = "%s/%s.flac.ogg" % ("/".join(path[:-1]), seq_name)
ogg_bytes = zip_file.read(ogg_filename)
assert len(ogg_bytes) > 0
# ffprobe does not work correctly on piped input. That is why we have to store it in a temp file.
with tempfile.NamedTemporaryFile(suffix=".ogg") as temp_file:
temp_file.write(ogg_bytes)
temp_file.flush()
duration_str = subprocess.check_output(
["ffprobe", temp_file.name,
'-show_entries', 'format=duration', '-v', 'quiet', '-of', 'compact'],
stderr=subprocess.STDOUT).decode("utf8").strip()
duration_str = duration_str.split("=")[-1] # e.g. "format|duration=10.028000"
assert float(duration_str) > 0 # just a check
dest_meta_file.write(
"{'text': %r, 'file': %r, 'seq_name': '%s', 'duration': %s},\n" % (
txt, ogg_filename, "%s-%s" % (part, seq_name), duration_str))
assert count_lines > 0
dest_meta_file.write("]\n")
dest_meta_file.close()
sh("gzip", dest_meta_filename)
assert os.path.exists(dest_meta_filename_gz)
def extract_raw_strings_py(part):
"""
:param str part:
:rtype: str
"""
dataset_dir = "%s/data/dataset-ogg" % my_dir
dataset_path_prefix = "%s/%s" % (dataset_dir, part)
py_txt_output_path = "%s/data/dataset/%s.py.txt.gz" % (my_dir, part)
if os.path.exists(py_txt_output_path):
print("File exists, skipping:", py_txt_output_path)
return py_txt_output_path
args = [
"%s/returnn/tools/dump-dataset-raw-strings.py" % my_dir,
"--dataset", repr({
"class": "OggZipDataset",
"path": ["%s.zip" % dataset_path_prefix, "%s.txt.gz" % dataset_path_prefix],
'use_cache_manager': True,
"audio": None,
"targets": None, # we will just use the raw strings
}),
"--out", py_txt_output_path]
sh(*args)
assert os.path.exists(py_txt_output_path)
return py_txt_output_path
def main():
os.makedirs("%s/data/dataset" % my_dir, exist_ok=True)
create_librispeech_txt(dataset_dir="%s/data/dataset-ogg" % my_dir)
trans_file = open("%s/data/dataset/train-trans-all.txt" % my_dir, "w")
for part in Parts:
py_txt_output_path = extract_raw_strings_py(part)
if part.startswith("train"):
py_txt = eval(generic_open(py_txt_output_path).read())
assert isinstance(py_txt, dict) and len(py_txt) > 0
example_key, example_value = next(iter(py_txt.items()))
assert isinstance(example_key, str) and isinstance(example_value, str)
for seq_tag, raw_txt in sorted(py_txt.items()):
trans_file.write("%s\n" % raw_txt)
trans_file.close()
if __name__ == '__main__':
better_exchook.install()
main()
|
0fa2e1692fb51ed2ccc300af3bb1d9e3dfbcb8db
|
ddc7c80a52df8fb251c1c8c7fec8ed4a5301ac43
|
/panda_sim_custom_action_server/src/panda_sim_custom_action_server/bezier.py
|
77f6353df8a4492acb43e7f772dbcf1d15a60249
|
[
"Apache-2.0"
] |
permissive
|
justagist/panda_simulator
|
efd6059884b5a68bf6a400c19100ee5f594def28
|
4c469b01bf0763dc232fb77d029d25f258ebbf43
|
refs/heads/melodic-devel
| 2022-01-22T06:16:03.939551
| 2021-12-22T13:41:32
| 2021-12-22T13:41:32
| 220,040,644
| 174
| 67
|
Apache-2.0
| 2021-08-02T08:01:29
| 2019-11-06T16:24:21
|
C++
|
UTF-8
|
Python
| false
| false
| 12,075
|
py
|
bezier.py
|
# Copyright (c) 2011, Ian McMahon
# Modifications Copyright (c) 2014-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Bezier library was implemented as a class project in CIS515,
Fundamentals of Linear Algebra, taught by Professor Jean Gallier
in the summer of 2011 at the University of Pennsylvania. For an
excellent explanation of Cubic Bezier Curves, and the math
represented in this library, see
http://www.cis.upenn.edu/~cis515/proj1-12.pdf
~~~~~~~~~~~~~~~~~~~~~~~~ Bezier ~~~~~~~~~~~~~~~~~~~~~~~~
A library for computing Bezier Cubic Splines for an arbitrary
set of control points in R2, R3, up to RN space.
Cubic Segment:
C(t) = (1 - t)^3*b0 + 3(1 - t)*b1 + 3(1 - t)*t^2*b2 + t^3*b3
Bezier Spline of Cubic Segments:
B(t) = C_(i)(t-i+1), i-1 <= t <= i
where C0 continuity exists: C_(i)(1) = C_(i+1)(0)
where C1 continuity exists: C'_(i)(1) = C'_(i+1)(0)
and where C2 continuity exists: C"_(i)(1) = C"_(i+1)(0)
ex. usage:
import numpy
import bezier
points_array = numpy.array([[1, 2, 3], [4, 4, 4],
[6, 4, 6], [2, 5, 6],
[5, 6, 7]])
d_pts = bezier.de_boor_control_pts(points_array)
b_coeffs = bezier.bezier_coefficients(points_array, d_pts)
b_curve = bezier.bezier_curve(b_coeffs, 50)
# plotting example
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
#plot bezier curve
ax.plot(b_curve[:,0], b_curve[:,1], b_curve[:,2])
#plot specified points
ax.plot(points_array[:,0], points_array[:,1], points_array[:,2], 'g*')
ax.set_title("Cubic Bezier Spline")
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.legend(["Bezier Curve", "Control Points"], loc=2)
plt.show()
"""
import numpy as np
def de_boor_control_pts(points_array, d0=None,
dN=None, natural=True):
"""
Compute the de Boor control points for a given
set for control points
params:
points_array: array of user-supplied control points
numpy.array of size N by k
N is the number of input control points
k is the number of dimensions for each point
d0: the first control point - None if "natural"
numpy.array of size 1 by k
dN: the last control point - None if "natural"
numpy.array of size 1 by k
natural: flag to signify natural start/end conditions
bool
returns:
d_pts: array of de Boor control points
numpy.array of size N+3 by k
"""
# N+3 auxiliary points required to compute d_pts
# dpts_(-1) = x_(0)
# dpts_(N+1) = x_(N)
# so it is only necessary to find N+1 pts, dpts_(0) to to dpts_(N)
(rows, k) = np.shape(points_array)
N = rows - 1 # minus 1 because list includes x_(0)
# Compute A matrix
if natural:
if N > 2:
A = np.zeros((N-1, N-1))
A[np.ix_([0], [0, 1])] = [4, 1]
A[np.ix_([N-2], [N-3, N-2])] = [1, 4]
else:
A = 4.0
else:
if N > 2:
A = np.zeros((N-1, N-1))
A[np.ix_([0], [0, 1])] = [3.5, 1]
A[np.ix_([N-2], [N-3, N-2])] = [1, 3.5]
else:
A = 3.5
for i in range(1, N-2):
A[np.ix_([i], [i-1, i, i+1])] = [1, 4, 1]
# Construct de Boor Control Points from A matrix
d_pts = np.zeros((N+3, k))
for col in range(0, k):
x = np.zeros((max(N-1, 1), 1))
if N > 2:
# Compute start / end conditions
if natural:
x[N-2, 0] = 6*points_array[-2, col] - points_array[-1, col]
x[0, 0] = 6*points_array[1, col] - points_array[0, col]
else:
x[N-2, 0] = 6*points_array[-2, col] - 1.5*dN[0, col]
x[0, 0] = 6*points_array[1, col] - 1.5*d0[0, col]
x[range(1, N-3+1), 0] = 6*points_array[range(2, N-2+1), col]
# Solve bezier interpolation
d_pts[2:N+1, col] = np.linalg.solve(A, x).T
else:
# Compute start / end conditions
if natural:
x[0, 0] = 6*points_array[1, col] - points_array[0, col]
else:
x[0, 0] = 6*points_array[1, col] - 1.5*d0[col]
# Solve bezier interpolation
d_pts[2, col] = x / A
# Store off start and end positions
d_pts[0, :] = points_array[0, :]
d_pts[-1, :] = points_array[-1, :]
# Compute the second to last de Boor point based on end conditions
if natural:
one_third = (1.0/3.0)
two_thirds = (2.0/3.0)
d_pts[1, :] = (two_thirds)*points_array[0, :] + (one_third)*d_pts[2, :]
d_pts[N+1, :] = ((one_third)*d_pts[-3, :] +
(two_thirds)*points_array[-1, :])
else:
d_pts[1, :] = d0
d_pts[N+1, :] = dN
return d_pts
def bezier_coefficients(points_array, d_pts):
"""
Compute the Bezier coefficients for a given
set for user-supplied control pts and
de Boor control pts.
These B coeffs are used to compute the cubic
splines for each cubic spline segment as
follows (where t is a percentage of time between
b_coeff segments):
C(t) = (1 - t)^3*b0 + 3(1 - t)*b1
+ 3(1 - t)*t^2*b2 + t^3*b3
params:
points_array: array of user-supplied control points
numpy.array of size N by k
N is the number of control points
k is the number of dimensions for each point
d_pts: array of de Boor control points
numpy.array of size N+3 by k
returns:
b_coeffs: k-dimensional array of 4 Bezier coefficients
for every control point
numpy.array of size N by 4 by k
"""
(rows, k) = np.shape(points_array)
N = rows - 1 # N minus 1 because points array includes x_0
b_coeffs = np.zeros(shape=(k, N, 4))
for i in range(0, N):
points_array_i = i+1
d_pts_i = i + 2
if i == 0:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = d_pts[d_pts_i - 1, axis_pos]
b_coeffs[axis_pos, i, 2] = (0.5 * d_pts[d_pts_i - 1, axis_pos]
+ 0.5 * d_pts[d_pts_i, axis_pos])
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
elif i == N-1:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = (0.5 * d_pts[d_pts_i - 1, axis_pos]
+ 0.5 * d_pts[d_pts_i, axis_pos])
b_coeffs[axis_pos, i, 2] = d_pts[d_pts_i, axis_pos]
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
else:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = (2.0/3.0 * d_pts[d_pts_i - 1,
axis_pos]
+ 1.0/3.0 * d_pts[d_pts_i,
axis_pos])
b_coeffs[axis_pos, i, 2] = (1.0/3.0 * d_pts[d_pts_i - 1,
axis_pos]
+ 2.0/3.0 * d_pts[d_pts_i,
axis_pos])
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
return b_coeffs
def _cubic_spline_point(b_coeff, t):
"""
Internal convenience function for calculating
a k-dimensional point defined by the supplied
Bezier coefficients. Finds the point that
describes the current position along the bezier
segment for k dimensions.
params:
b_coeff => b0...b3: Four k-dimensional Bezier
coefficients each one is a numpy.array
of size k by 1, so
b_coeff is a numpy array of size k by 4
k is the number of dimensions for each
coefficient
t: percentage of time elapsed for this segment
0 <= int <= 1.0
returns:
current position in k dimensions
numpy.array of size 1 by k
"""
return (pow((1-t), 3)*b_coeff[:, 0] +
3*pow((1-t), 2)*t*b_coeff[:, 1] +
3*(1-t)*pow(t, 2)*b_coeff[:, 2] +
pow(t, 3)*b_coeff[:, 3]
)
def bezier_point(b_coeffs, b_index, t):
"""
Finds the k values that describe the current
position along the bezier curve for k dimensions.
params:
b_coeffs: k-dimensional array
for every control point with 4 Bezier coefficients
numpy.array of size k by N by 4
N is the number of control points
k is the number of dimensions for each point
b_index: index position out between two of
the N b_coeffs for this point in time
int
t: percentage of time that has passed between
the two control points
0 <= int <= 1.0
returns:
b_point: current position in k dimensions
numpy.array of size 1 by k
"""
if b_index <= 0:
b_point = b_coeffs[:, 0, 0]
elif b_index > b_coeffs.shape[1]:
b_point = b_coeffs[:, -1, -1]
else:
t = 0.0 if t < 0.0 else t
t = 1.0 if t > 1.0 else t
b_coeff_set = b_coeffs[:, b_index-1, range(4)]
b_point = _cubic_spline_point(b_coeff_set, t)
return b_point
def bezier_curve(b_coeffs, num_intervals):
"""
Iterpolation of the entire Bezier curve at once,
using a specified number of intervals between
control points (encapsulated by b_coeffs).
params:
b_coeffs: k-dimensional array of 4 Bezier coefficients
for every control point
numpy.array of size N by 4 by k
N is the number of control points
k is the number of dimensions for each point
num_intervals: the number of intervals between
control points
int > 0
returns:
b_curve: positions along the bezier curve in k-dimensions
numpy.array of size N*num_interval+1 by k
(the +1 is to include the start position on the curve)
"""
assert num_intervals > 0,\
"Invalid number of intervals chosen (must be greater than 0)"
interval = 1.0 / num_intervals
(num_axes, num_bpts, _) = np.shape(b_coeffs)
b_curve = np.zeros((num_bpts*num_intervals+1, num_axes))
# Copy out initial point
b_curve[0, :] = b_coeffs[:, 0, 0]
for current_bpt in range(num_bpts):
b_coeff_set = b_coeffs[:, current_bpt, range(4)]
for iteration, t in enumerate(np.linspace(interval, 1,
num_intervals)):
b_curve[(current_bpt *
num_intervals +
iteration+1), :] = _cubic_spline_point(b_coeff_set, t)
return b_curve
|
f1e3c3e23508e5e2016c753533adb9245fb3f9b1
|
a173777f4ba02c1e683d75810fa6932487ba42cc
|
/2020/0CTF-quals/flash/Replace.py
|
bae2d8b89753c201452cfb60b3c05e1fa3264150
|
[] |
no_license
|
perfectblue/ctf-writeups
|
ba9454ef06e1004253f004154fba6ae00d88ca09
|
3f2a8a2c2598d700f33cb3f39ceb515e2ba46312
|
refs/heads/master
| 2023-06-25T19:28:05.222110
| 2022-12-11T04:55:13
| 2022-12-11T04:55:13
| 133,306,580
| 606
| 75
| null | 2023-01-20T22:38:17
| 2018-05-14T04:53:27
|
Python
|
UTF-8
|
Python
| false
| false
| 709
|
py
|
Replace.py
|
import jarray
from ghidra.util.task.TaskMonitorAdapter import DUMMY_MONITOR
def replace(prog, haystack, result, start, end, align=4):
addrs = prog.getAddressFactory()
mem = prog.getMemory()
ram = addrs.getAddressSpace('ram').getBaseSpaceID()
buff = jarray.zeros(len(haystack), 'b')
cdm = prog.getCodeManager()
assert len(haystack) == len(result)
for addr in range(start, end, align):
addrend = addrs.getAddress(ram, addr + len(buff))
addr = addrs.getAddress(ram, addr)
mem.getBytes(addr, buff)
if haystack == buff:
cdm.clearCodeUnits(addr, addrend, False, DUMMY_MONITOR)
mem.setBytes(addr, result)
print(addr)
|
59333f28ccce0ab3d0f641838206e3ece7d6b911
|
091e40b7cc9ac83d30e860603b4c8cdc785b77ee
|
/tests/forecast/test_deep_model.py
|
9118dda2153d583f716462b4c409447cf4ea3a40
|
[
"BSD-3-Clause"
] |
permissive
|
salesforce/Merlion
|
b7a75326cd05883285f25ff856c89dc80570e602
|
01c3fc3406ebf19798cedcddbe829ae5339e1424
|
refs/heads/main
| 2023-04-10T02:25:43.959522
| 2023-03-22T18:39:54
| 2023-03-22T18:39:54
| 390,401,992
| 2,905
| 268
|
BSD-3-Clause
| 2023-03-22T18:39:56
| 2021-07-28T15:30:56
|
Python
|
UTF-8
|
Python
| false
| false
| 9,174
|
py
|
test_deep_model.py
|
#
# Copyright (c) 2023 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import logging
import os
import sys
import shutil
import unittest
import gdown
import pandas as pd
from os.path import abspath, dirname, join, exists
from merlion.evaluate.forecast import ForecastMetric
from merlion.models.forecast.autoformer import AutoformerConfig, AutoformerForecaster
from merlion.models.forecast.transformer import TransformerConfig, TransformerForecaster
from merlion.models.forecast.informer import InformerConfig, InformerForecaster
from merlion.models.forecast.etsformer import ETSformerConfig, ETSformerForecaster
from merlion.models.forecast.deep_ar import DeepARConfig, DeepARForecaster
from merlion.models.utils.rolling_window_dataset import RollingWindowDataset
from merlion.transform.bound import LowerUpperClip
from merlion.transform.normalize import MinMaxNormalize
from merlion.transform.resample import TemporalResample
from merlion.transform.sequence import TransformSequence
from merlion.utils.time_series import TimeSeries, to_pd_datetime
from ts_datasets.forecast import SeattleTrail
from ts_datasets.forecast.custom import CustomDataset
logger = logging.getLogger(__name__)
rootdir = dirname(dirname(dirname(abspath(__file__))))
class TestDeepModels(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_past = 16
self.max_forecast_steps = 8
self.early_stop_patience = 4
self.num_epochs = 2
self.use_gpu = True
self.batch_size = 32
df = self._obtain_df("weather")
bound = 16 * 20
train_df = df[0:bound]
test_df = df[bound : 2 * bound]
self.train_df = train_df
self.test_df = test_df
self.train_data = TimeSeries.from_pd(self.train_df)
self.test_data = TimeSeries.from_pd(self.test_df)
def test_deep_ar_predict_univariate(self):
print("-" * 80)
logger.info("test_deep_ar_predict_univariate\n" + "-" * 80)
self._test_deep_ar(20)
def test_deep_ar_predict_multivariate(self):
print("-" * 80)
logger.info("test_deep_ar_predict_multivariate\n" + "-" * 80)
self._test_deep_ar(None)
def test_autoformer_predict_univariate(self):
print("-" * 80)
logger.info("test_autoformer_predict_univariate\n" + "-" * 80)
self._test_autoformer(9)
def test_autoformer_predict_multivariate(self):
print("-" * 80)
logger.info("test_autoformer_predict_multivariate\n" + "-" * 80)
self._test_autoformer(None)
def test_informer_predict_univariate(self):
print("-" * 80)
logger.info("test_informer_predict_univariate\n" + "-" * 80)
self._test_informer(3)
def test_informer_predict_multivariate(self):
print("-" * 80)
logger.info("test_informer_predict_multivariate\n" + "-" * 80)
self._test_informer(None)
def test_etsformer_predict_univariate(self):
print("-" * 80)
logger.info("test_etsformer_predict_univariate\n" + "-" * 80)
self._test_etsformer(15)
def test_etsformer_predict_multivariate(self):
print("-" * 80)
logger.info("test_etsformer_predict_multivariate\n" + "-" * 80)
self._test_etsformer(None)
def test_transformer_predict_univariate(self):
print("-" * 80)
logger.info("test_transformer_predict_univariate\n" + "-" * 80)
self._test_transformer(0)
def test_transformer_predict_multivariate(self):
print("-" * 80)
logger.info("test_transformer_predict_multivariate\n" + "-" * 80)
self._test_transformer(None)
def _test_deep_ar(self, target_seq_index):
logger.info("Testing Deep AR forecasting")
config = DeepARConfig(
n_past=self.n_past,
max_forecast_steps=self.max_forecast_steps,
early_stop_patience=self.early_stop_patience,
num_epochs=self.num_epochs,
use_gpu=self.use_gpu,
batch_size=self.batch_size,
target_seq_index=target_seq_index,
)
forecaster = DeepARForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _test_autoformer(self, target_seq_index):
logger.info("Testing Autoformer forecasting")
start_token_len = 3
config = AutoformerConfig(
n_past=self.n_past,
max_forecast_steps=self.max_forecast_steps,
start_token_len=start_token_len,
early_stop_patience=self.early_stop_patience,
num_epochs=self.num_epochs,
use_gpu=self.use_gpu,
batch_size=self.batch_size,
target_seq_index=target_seq_index,
)
forecaster = AutoformerForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _test_transformer(self, target_seq_index):
logger.info("Testing Transformer forecasting")
start_token_len = 3
config = TransformerConfig(
n_past=self.n_past,
max_forecast_steps=self.max_forecast_steps,
start_token_len=start_token_len,
early_stop_patience=self.early_stop_patience,
num_epochs=self.num_epochs,
use_gpu=self.use_gpu,
batch_size=self.batch_size,
target_seq_index=target_seq_index,
)
forecaster = TransformerForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _test_informer(self, target_seq_index):
logger.info("Testing Informer forecasting")
start_token_len = 3
config = InformerConfig(
n_past=self.n_past,
max_forecast_steps=self.max_forecast_steps,
start_token_len=start_token_len,
early_stop_patience=self.early_stop_patience,
num_epochs=self.num_epochs,
use_gpu=self.use_gpu,
batch_size=self.batch_size,
target_seq_index=target_seq_index,
)
forecaster = InformerForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _test_etsformer(self, target_seq_index):
logger.info("Testing ETSformer forecasting")
config = ETSformerConfig(
n_past=self.n_past,
max_forecast_steps=self.max_forecast_steps,
top_K=3, # top fourier basis
early_stop_patience=self.early_stop_patience,
num_epochs=self.num_epochs,
use_gpu=self.use_gpu,
batch_size=self.batch_size,
target_seq_index=target_seq_index,
)
forecaster = ETSformerForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _obtain_df(self, dataset_name="weather"):
data_dir = join(rootdir, "data")
if dataset_name == "weather":
data_url = "https://drive.google.com/drive/folders/1Xz84ci5YKWL6O2I-58ZsVe42lYIfqui1"
data_folder = join(data_dir, "weather")
data_file_path = join(data_folder, "weather.csv")
else:
raise NotImplementedError
if not exists(data_file_path):
while True:
try:
gdown.download_folder(data_url, quiet=False, use_cookies=False)
except TimeoutError:
logger.error("Timeout Error, try downloading again...")
else:
logger.info("Successfully downloaded %s!" % (dataset_name))
break
shutil.move("./%s" % (dataset_name), data_folder)
weather_ds = CustomDataset(data_folder)
df, metadata = weather_ds[0]
return df
def _test_model(self, forecaster, train_data, test_data):
config = forecaster.config
model_name = forecaster.deep_model_class.__name__
model_save_path = join("./models", model_name.lower())
logger.info(model_name)
# training & saving
forecaster.train(train_data)
forecaster.save(model_save_path)
# Single data forecasting testing
dataset = RollingWindowDataset(
test_data,
target_seq_index=config.target_seq_index,
n_past=config.n_past,
n_future=config.max_forecast_steps,
ts_index=True,
)
test_prev, test = dataset[0]
forecaster.load(model_save_path)
pred, _ = forecaster.forecast(test.time_stamps, time_series_prev=test_prev)
assert pred.dim == 1 if forecaster.target_seq_index is not None else train_data.dim
try:
shutil.rmtree(model_save_path)
except OSError as e:
logger.error(f"Error: {e.filename} - {e.strerror}.")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", stream=sys.stdout, level=logging.INFO
)
unittest.main()
|
5fdbb4975390331c3a92b7491f85a1e83e478dba
|
a205f76141a6f21f7c200f2159bedcfab582dc00
|
/utils/tracking_utils_bk.py
|
dfd33c0495cd46f405ea8d7eea6aea45e0d20559
|
[] |
no_license
|
iiau-tracker/SPLT
|
c054a356e5a9181238b116df6da9e6fedb5d4808
|
a196e603798e9be969d9d985c087c11cad1cda43
|
refs/heads/py36
| 2022-12-04T21:11:59.610453
| 2020-04-27T13:52:34
| 2020-04-27T13:52:34
| 198,601,914
| 138
| 31
| null | 2022-11-22T03:56:30
| 2019-07-24T09:23:34
|
Python
|
UTF-8
|
Python
| false
| false
| 26,372
|
py
|
tracking_utils_bk.py
|
# coding=utf-8
import cv2
import numpy as np
from PIL import Image
import tensorflow as tf
from google.protobuf import text_format
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.protos import pipeline_pb2
from utils.region_to_bbox import region_to_bbox
"""============================================================================"""
def crop_template_Hao(img, box, times=1.3):
im_h, im_w, _ = img.shape
cw = int(box[0] + box[2] / 2)
ch = int(box[1] + box[3] / 2)
half_w = int(box[2] / 2 * times)
half_h = int(box[3] / 2 * times)
top, bottom, left, right = (0, 0, 0, 0)
if cw < half_w: left = half_w - cw
if ch < half_h: top = half_h - ch
if (cw + half_w) > im_w: right = half_w + cw - im_w
if (ch + half_h) > im_h: bottom = half_h + ch - im_h
cw += left
ch += top
new_im = cv2.copyMakeBorder( # BGR [123.68, 116.779, 103.939]
img, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=[123, 117, 104])
new_im = new_im[
ch - half_h:ch + half_h,
cw - half_w:cw + half_w, :]
return cv2.resize(new_im, (140, 140))
def gen_search_patch_Hao(img, last_reliable_w, last_reliable_h):
# 2.8 300
# 2.4 256
crop_sz = int((last_reliable_w + last_reliable_h) / 2 * 2.4)
H = int(img.shape[0] / crop_sz) * 256
W = int(img.shape[1] / crop_sz) * 256
crop_win = np.array([0, 0, 256, 256], dtype=int)
if H == 0:
H = 256
if W == 0:
W = 256
Y, X = np.mgrid[0:H - 128:128, 0:W - 128:128]
Y = Y.reshape(-1)
X = X.reshape(-1)
if len(X) > 490:
step = len(X) / 490
sel_idx = list(range(len(X)))[::step][:490]
sel_idx.append(len(X)-1)
X = X[sel_idx]
Y = Y[sel_idx]
else:
pass
search = cv2.resize(img, (W, H))
search = search.astype(np.float32)[None, :, :, :]
im = np.ones([len(X), 256, 256, 3])
pos_i = np.zeros([len(X), 4])
for i in range(len(X)):
im[i] = search[0,
crop_win[1] + Y[i]:crop_win[3] + Y[i],
crop_win[0] + X[i]:crop_win[2] + X[i], :]
# cv2.imwrite('/home/space/Documents/vot-toolkit/VOT_workspace/MBMD_Hao1/recrop{:0>d}.jpg'.format(i), im[i])
pos_i[i] = np.array([
(crop_win[1] + Y[i] + crop_win[3] + Y[i]) / 2.0 / H * img.shape[0],
(crop_win[0] + X[i] + crop_win[2] + X[i]) / 2.0 / W * img.shape[1],
last_reliable_h,
last_reliable_w
])
pos_i[:, 0] = pos_i[:, 0] - last_reliable_h / 2.0
pos_i[:, 1] = pos_i[:, 1] - last_reliable_w / 2.0
pos_i[:, 2] = pos_i[:, 0] + last_reliable_h
pos_i[:, 3] = pos_i[:, 1] + last_reliable_w
return im, pos_i.astype(int)
# def gen_search_patch_test_Hao(img, gt_box, last_reliable_w, last_reliable_h):
# # 2.8 300
# # 2.4 256
# crop_sz = int((last_reliable_w + last_reliable_h) / 2 * 2.4)
# cx = gt_box[0] + gt_box[2]*1.0/2
# cy = gt_box[1] + gt_box[3]*1.0/2
# H = int(img.shape[0] / crop_sz) * 256
# W = int(img.shape[1] / crop_sz) * 256
# crop_win = np.array([0, 0, 256, 256], dtype=int)
# if H == 0:
# H = 256
# if W == 0:
# W = 256
# Y, X = np.mgrid[0:H - 128:128, 0:W - 128:128]
# Y = Y.reshape(-1)
# X = X.reshape(-1)
# if len(X) > 500:
# step = len(X) / 500
# sel_idx = list(range(len(X)))[::step][:500]
# sel_idx.append(len(X)-1)
# X = X[sel_idx]
# Y = Y[sel_idx]
# else:
# pass
# search = cv2.resize(img, (W, H))
# search = search.astype(np.float32)[None, :, :, :]
# im = np.ones([len(X), 256, 256, 3])
# target = np.zeros([len(X)])
# pos_i = np.zeros([len(X), 4])
# for i in range(len(X)):
# im[i] = search[0,
# crop_win[1] + Y[i]:crop_win[3] + Y[i],
# crop_win[0] + X[i]:crop_win[2] + X[i], :]
# if crop_win[1] + Y[i] <= cy \
# and cy < crop_win[3] + Y[i] \
# and crop_win[0] + X[i] <= cx \
# and cx < crop_win[2] + X[i]:
# target[i] = 1
# pos_i[i] = np.array([
# (crop_win[1] + Y[i] + crop_win[3] + Y[i]) / 2.0 / H * img.shape[0],
# (crop_win[0] + X[i] + crop_win[2] + X[i]) / 2.0 / W * img.shape[1],
# last_reliable_h,
# last_reliable_w
# ])
# pos_i[:, 0] = pos_i[:, 0] - last_reliable_h / 2.0
# pos_i[:, 1] = pos_i[:, 1] - last_reliable_w / 2.0
# pos_i[:, 2] = pos_i[:, 0] + last_reliable_h
# pos_i[:, 3] = pos_i[:, 1] + last_reliable_w
# return im, pos_i.astype(int), target.astype(int)
def get_configs_from_pipeline_file(config_file):
"""Reads training configuration from a pipeline_pb2.TrainEvalPipelineConfig.
Reads training config from file specified by pipeline_config_path flag.
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(config_file, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model.ssd
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
eval_config = pipeline_config.eval_config
return model_config, train_config, input_config, eval_config
def restore_model(
sess,
model_scope,
checkpoint_path,
variables_to_restore,
NET):
"""恢复Regression Network的权重
"""
'''必须把V的权重都给排除在外,不然的话会报"在checkpoint文件中找不到某些变量的错误"'''
if NET == 'resnet101':
name_to_var_dict = dict(
[(var.op.name.lstrip(model_scope + '/'), var) # lstrip: 截掉字符串左边的指定字符
for var in variables_to_restore
if not var.op.name.startswith('fc')
and not var.op.name.startswith('resnet_v1_101')
and not var.op.name.startswith('Fix_Resnet_V1')
and not var.op.name.startswith('Variable')])
if NET == 'resnet50':
name_to_var_dict = dict(
[(var.op.name.lstrip(model_scope + '/'), var) # lstrip: 截掉字符串左边的指定字符
for var in variables_to_restore
if not var.op.name.startswith('fc')
and not var.op.name.startswith('resnet_v1_50')
and not var.op.name.startswith('Fix_Resnet_V1')
and not var.op.name.startswith('Variable')])
if NET == 'M':
name_to_var_dict = dict(
[(var.op.name.lstrip(model_scope + '/'), var) # lstrip: 截掉字符串左边的指定字符
for var in variables_to_restore
if not var.op.name.startswith('fc')
and not var.op.name.startswith('MobilenetV1')
# and not var.op.name.startswith('Fix_Resnet_V1')
and not var.op.name.startswith('Variable')])
if NET == 'vgg16':
name_to_var_dict = dict(
[(var.op.name.lstrip(model_scope + '/'), var) # lstrip: 截掉字符串左边的指定字符
for var in variables_to_restore
if not var.op.name.startswith('fc')
and not var.op.name.startswith('vgg_16')
# and not var.op.name.startswith('Fix_Resnet_V1')
and not var.op.name.startswith('Variable')])
'''如果想要看变量名的话,就把下面两行注释打开'''
# for i in range(len(name_to_var_dict.keys())):
# print name_to_var_dict.keys()[i]
# print name_to_var_dict.keys()
if NET == None:
name_to_var_dict = dict(
[(var.op.name.lstrip(model_scope + '/'), var) # lstrip: 截掉字符串左边的指定字符
for var in variables_to_restore])
saver = tf.train.Saver(name_to_var_dict)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path)
saver.restore(sess, latest_checkpoint)
def crop_search_region(
img, gt,
win_size=300, scale=4,
mean_rgb=128, offset=None):
# gt: [ymin, xmin, ymax, xmax]
bnd_ymin, bnd_xmin, bnd_ymax, bnd_xmax = gt
bnd_w = bnd_xmax - bnd_xmin
bnd_h = bnd_ymax - bnd_ymin
cy = (bnd_ymin + bnd_ymax)/2
cx = (bnd_xmin + bnd_xmax)/2
origin_win_size_h = bnd_h * scale
origin_win_size_w = bnd_w * scale
im_size = img.shape[:2]
min_x = np.round(cx - origin_win_size_w / 2).astype(np.int32)
max_x = np.round(cx + origin_win_size_w / 2).astype(np.int32)
min_y = np.round(cy - origin_win_size_h / 2).astype(np.int32)
max_y = np.round(cy + origin_win_size_h / 2).astype(np.int32)
if offset is not None:
min_offset_y = bnd_ymax - max_y
max_offset_y = bnd_ymin - min_y
min_offset_x = bnd_xmax - max_x
max_offset_x = bnd_xmin - min_x
offset[0] = np.clip(
offset[0] * origin_win_size_h,
min_offset_y, max_offset_y)
offset[1] = np.clip(
offset[1] * origin_win_size_w,
min_offset_x, max_offset_x)
offset = np.int32(offset)
min_y += offset[0]
max_y += offset[0]
min_x += offset[1]
max_x += offset[1]
win_loc = np.array([min_y, min_x])
unscaled_w = max_x - min_x + 1
unscaled_h = max_y - min_y + 1
min_x_win = 0
min_y_win = 0
max_x_win = unscaled_w
max_y_win = unscaled_h
min_x_im = min_x
min_y_im = min_y
max_x_im = max_x + 1
max_y_im = max_y + 1
"""PIL crop auto-pad"""
img = Image.fromarray(img)
img = img.crop([min_x_im, min_y_im, max_x_im, max_y_im])
img = np.array(img)
if min_x < 0:
min_x_im = 0
min_x_win = 0 - min_x
if min_y < 0:
min_y_im = 0
min_y_win = 0 - min_y
if max_x+1 > im_size[1]:
max_x_im = im_size[1]
max_x_win = unscaled_w - (max_x + 1 - im_size[1])
if max_y+1 > im_size[0]:
max_y_im = im_size[0]
max_y_win = unscaled_h- (max_y +1 - im_size[0])
unscaled_win = np.ones([unscaled_h, unscaled_w, 3], dtype=np.uint8) * mean_rgb
unscaled_win[min_y_win:max_y_win, min_x_win:max_x_win] \
= img[min_y_win:max_y_win, min_x_win:max_x_win]
height_scale = np.float32(unscaled_h)/win_size
width_scale = np.float32(unscaled_w)/win_size
win = cv2.resize(unscaled_win, (win_size, win_size))
return win, win_loc, [height_scale, width_scale]
"""============================================================================"""
def compile_results(gt, bboxes, dist_threshold):
l = np.size(bboxes, 0)
gt4 = np.zeros((l, 4))
new_distances = np.zeros(l)
new_ious = np.zeros(l)
n_thresholds = 50
precisions_ths = np.zeros(n_thresholds)
for i in range(l):
gt4[i, :] = region_to_bbox(gt[i, :], center=False)
new_distances[i] = _compute_distance(bboxes[i, :], gt4[i, :])
new_ious[i] = _compute_iou(bboxes[i, :], gt4[i, :])
# what's the percentage of frame in which center displacement is inferior to given threshold? (OTB metric)
precision = sum(new_distances < dist_threshold)/float(np.size(new_distances)) * 100.0
# find above result for many thresholds, then report the AUC
thresholds = np.linspace(0, 25, n_thresholds+1)
thresholds = thresholds[-n_thresholds:]
# reverse it so that higher values of precision goes at the beginning
thresholds = thresholds[::-1]
for i in range(n_thresholds):
precisions_ths[i] = sum(new_distances < thresholds[i])/float(np.size(new_distances))
# integrate over the thresholds
precision_auc = np.trapz(precisions_ths)
# per frame averaged intersection over union (OTB metric)
iou = np.mean(new_ious) * 100
return l, precision, precision_auc, iou
def _compute_distance(boxA, boxB):
a = np.array((boxA[0]+boxA[2]/2, boxA[1]+boxA[3]/2))
b = np.array((boxB[0]+boxB[2]/2, boxB[1]+boxB[3]/2))
dist = np.linalg.norm(a - b)
assert dist >= 0
assert dist != float('Inf')
return dist
def _compute_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[0] + boxA[2], boxB[0] + boxB[2])
yB = min(boxA[1] + boxA[3], boxB[1] + boxB[3])
if xA < xB and yA < yB:
# compute the area of intersection rectangle
interArea = (xB - xA) * (yB - yA)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = boxA[2] * boxA[3]
boxBArea = boxB[2] * boxB[3]
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the intersection area
iou = interArea / float(boxAArea + boxBArea - interArea)
else:
iou = 0
assert iou >= 0
assert iou <= 1.01
return iou
def show_res(im, box, win_name,score=None,save_path=None,frame_id=None,all_frame=None,score_max=None):
cv2.namedWindow(win_name,cv2.WINDOW_NORMAL)
cv2.rectangle(im, (box[1], box[0]),
(box[3], box[2]), [0, 255, 0], 2)
if score is not None:
cv2.putText(im,str(score),(20,40), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
if score_max is not None:
cv2.putText(im,str(score_max),(20,60), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
if frame_id is not None:
cv2.putText(im,str(frame_id),(20,20), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
#cv2.imwrite("/home/xiaobai/lijun/base_vid_maml_box_baseline/fig/%05d.jpg"%frame_id, im[:, :, -1::-1])
cv2.imshow(win_name, im)
cv2.waitKey(1)
def show_save(im, box, win_name, frame_id=None, save=True):
cv2.namedWindow(win_name,cv2.WINDOW_NORMAL)
cv2.rectangle(im, (box[1], box[0]),
(box[3], box[2]), [0, 255, 0], 2)
im = cv2.resize(im, (640,360))
if frame_id is not None:
cv2.putText(im,'#'+str(frame_id),(5,25), cv2.FONT_HERSHEY_PLAIN,2,(0,255,255),2)
if save:
cv2.imwrite("/home/space/Documents/Experiment/ICCV19/video/01_%05d.jpg"%frame_id, im)
cv2.imshow(win_name, im)
cv2.waitKey(1)
def generate_init_training_samples(img, box, win_size, src_scales=None, tar_scales=None, batch_size=20, mean_rgb=128):
if src_scales is None:
src_scales = [1.2, 3]
if tar_scales is None:
tar_scales = [3.7, 4.5]
out_images = np.zeros([batch_size, 1, win_size, win_size, 3], dtype=np.uint8)
out_gt_box = np.zeros([batch_size, 1, 4], dtype=np.float32)
init_img = img.crop(np.int32([box[1], box[0], box[3], box[2]]))
init_img = init_img.resize([128,128], resample=Image.BILINEAR)
init_img = np.array(init_img)
init_img = np.expand_dims(np.expand_dims(init_img,axis=0),axis=0)
init_img = np.tile(init_img,(batch_size,1,1,1,1))
for ind in range(batch_size):
src_scale = np.random.rand(1)[0]*(src_scales[1]-src_scales[0]) + src_scales[0]
tar_scale = np.random.rand(1)[0]*(tar_scales[1]-tar_scales[0]) + tar_scales[0]
src_offset = np.random.laplace(0, 0.2, [2])
tar_offset = np.random.laplace(0, 0.2, [2])
# src_win, src_gt, _, _ = crop_search_region(img, box, win_size, src_scale, offset=src_offset)
tar_win, tar_gt, _, _ = crop_search_region(img, box, win_size, tar_scale, offset=tar_offset)
#out_images[ind, 0] = init_img
out_images[ind, 0] = tar_win
out_gt_box[ind, 0] = tar_gt
return out_images, init_img,out_gt_box
def build_init_graph(model, model_scope, reuse=None):
input_init_image = tf.placeholder(dtype=tf.uint8, shape=[128,128,3])
float_init_image = tf.to_float(input_init_image)
float_init_image = tf.expand_dims(tf.expand_dims(float_init_image, axis=0), axis=0)
preprocessed_init_image = model.preprocess(float_init_image, [128,128])
with tf.variable_scope(model_scope, reuse=reuse):
init_feature_maps = model.extract_init_feature(preprocessed_init_image)
return init_feature_maps,input_init_image
def build_box_predictor(model, model_scope,init_feature_maps,reuse=None):
input_cur_image = tf.placeholder(dtype=tf.uint8, shape=[300, 300, 3])
images = tf.expand_dims(input_cur_image, axis=0)
float_images = tf.to_float(images)
preprocessed_images = model.preprocess(float_images)
preprocessed_images = tf.expand_dims(preprocessed_images, axis=0)
input_init_gt_box = tf.constant(np.zeros((1, 4)), dtype=tf.float32)
init_gt_box = tf.reshape(input_init_gt_box, shape=[1,1,4])
groundtruth_classes = tf.ones(dtype=tf.float32, shape=[1, 1, 1])
model.provide_groundtruth(init_gt_box,
groundtruth_classes,
None)
with tf.variable_scope(model_scope, reuse=reuse):
prediction_dict = model.predict_box_with_init(init_feature_maps, preprocessed_images, istraining=False)
detections = model.postprocess(prediction_dict)
original_image_shape = tf.shape(preprocessed_images)
absolute_detection_boxlist = box_list_ops.to_absolute_coordinates(
box_list.BoxList(tf.squeeze(detections['detection_boxes'], axis=0)),
original_image_shape[2], original_image_shape[3])
return absolute_detection_boxlist.get(), detections['detection_scores'], input_cur_image
def build_test_graph(model, model_scope, reuse=None,weights_dict=None):
input_init_gt_box = tf.constant(np.zeros((1,4)), dtype=tf.float32)
# input_init_image = tf.constant(init_img_array, dtype=tf.uint8)
input_init_image = tf.placeholder(dtype=tf.uint8, shape=[128,128,3])
input_cur_image = tf.placeholder(dtype=tf.uint8, shape=[300,300,3])
init_gt_box = tf.reshape(input_init_gt_box, shape=[1,1,4])
groundtruth_classes = tf.ones(dtype=tf.float32, shape=[1,1,1])
float_init_image = tf.to_float(input_init_image)
float_init_image = tf.expand_dims(tf.expand_dims(float_init_image, axis=0), axis=0)
preprocessed_init_image = model.preprocess(float_init_image, [128,128])
images = tf.expand_dims(input_cur_image, axis=0)
float_images = tf.to_float(images)
preprocessed_images = model.preprocess(float_images)
preprocessed_images = tf.expand_dims(preprocessed_images, axis=0)
model.provide_groundtruth(init_gt_box,
groundtruth_classes,
None)
with tf.variable_scope(model_scope, reuse=reuse):
prediction_dict = model.predict(preprocessed_init_image, preprocessed_images,istraining=False,reuse=reuse)
detections = model.postprocess(prediction_dict)
original_image_shape = tf.shape(preprocessed_images)
absolute_detection_boxlist = box_list_ops.to_absolute_coordinates(
box_list.BoxList(tf.squeeze(detections['detection_boxes'], axis=0)),
original_image_shape[2], original_image_shape[3])
return absolute_detection_boxlist.get(), detections['detection_scores'], input_cur_image, input_init_image
def build_extract_feature_graph(model, model_scope,reuse=None):
batch_size = 20
seq_len = 1
image = tf.placeholder(dtype=tf.uint8, shape=[batch_size, seq_len, 300,300,3])
float_image = tf.to_float(image)
float_image = tf.reshape(float_image,[-1,300,300,3])
preprocessed_images = model.preprocess(float_image)
preprocessed_images = tf.reshape(preprocessed_images,[batch_size,seq_len,300,300,3])
random_noise = tf.random_normal([batch_size, seq_len, 300, 300, 3], mean=0, stddev=0.1)
preprocessed_images = preprocessed_images + random_noise
with tf.variable_scope(model_scope, reuse=reuse):
output_dict = model.extract_feature(preprocessed_images)
init_image = tf.placeholder(dtype=tf.uint8, shape=[1,seq_len, 128,128,3])
float_init_image = tf.to_float(init_image)
float_init_image = tf.reshape(float_init_image,[-1,128,128,3])
preprocessed_init_images = model.preprocess(float_init_image,[128,128])
preprocessed_init_images = tf.reshape(preprocessed_init_images,[1,seq_len,128,128,3])
with tf.variable_scope(model_scope, reuse=reuse):
init_feature_maps = model.extract_init_feature(preprocessed_init_images)
return image, init_image, output_dict, init_feature_maps
def build_extract_feature_graph1(model, model_scope,reuse=None):
batch_size = 5
seq_len = 1
image = tf.placeholder(dtype=tf.uint8, shape=[batch_size, seq_len, 300,300,3])
float_image = tf.to_float(image)
float_image = tf.reshape(float_image,[-1,300,300,3])
preprocessed_images = model.preprocess(float_image)
preprocessed_images = tf.reshape(preprocessed_images,[batch_size,seq_len,300,300,3])
random_noise = tf.random_normal([batch_size, seq_len, 300, 300, 3], mean=0, stddev=0.1)
preprocessed_images = preprocessed_images + random_noise
with tf.variable_scope(model_scope, reuse=reuse):
output_dict = model.extract_feature(preprocessed_images)
init_image = tf.placeholder(dtype=tf.uint8, shape=[1,seq_len, 128,128,3])
float_init_image = tf.to_float(init_image)
float_init_image = tf.reshape(float_init_image,[-1,128,128,3])
preprocessed_init_images = model.preprocess(float_init_image,[128,128])
preprocessed_init_images = tf.reshape(preprocessed_init_images,[1,seq_len,128,128,3])
with tf.variable_scope(model_scope, reuse=reuse):
init_feature_maps = model.extract_init_feature(preprocessed_init_images)
return image, init_image, output_dict, init_feature_maps
# def build_train_boxpredictor_graph(model, model_scope,reuse=None):
# batch_size = 20
# seq_len = 1
# init_features = tf.placeholder(dtype=tf.float32, shape=[batch_size,seq_len,1,1,])
def build_train_graph(model,model_scope, lr=1e-5, reuse=None):
batch_size = 20
seq_len = 1
featureOp0 = tf.placeholder(dtype=tf.float32, shape=[batch_size,19,19,512])
featureOp1 = tf.placeholder(dtype=tf.float32, shape=[batch_size,10,10,512])
# featureOp2 = tf.placeholder(dtype=tf.float32, shape=[batch_size,5,5,256])
# featureOp3 = tf.placeholder(dtype=tf.float32, shape=[batch_size,3,3,256])
# featureOp4 = tf.placeholder(dtype=tf.float32, shape=[batch_size,2,2,256])
# featureOp5 = tf.placeholder(dtype=tf.float32, shape=[batch_size,1,1,256])
initFeatureOp = tf.placeholder(dtype=tf.float32, shape=[batch_size,1,1,512])
feature_maps = [featureOp0,featureOp1]
train_gt_box = tf.placeholder(dtype=tf.float32, shape=[batch_size,seq_len,4])
train_gt_class = tf.ones(dtype=tf.uint8, shape=[batch_size,seq_len,1])
model.provide_groundtruth(train_gt_box,train_gt_class,None)
with tf.variable_scope(model_scope,reuse=reuse):
train_prediction_dict = model.predict_box(initFeatureOp,feature_maps,istraining=True)
losses_dict = model.loss(train_prediction_dict)
total_loss = 0
# total_loss = losses_dict['classification_loss']
for loss in losses_dict.values():
total_loss += loss
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
# optimizer = tf.train.AdamOptimizer()
variables_to_restore = tf.global_variables()
all_trainable_variables = tf.trainable_variables()
trainable_variables = [var for var in all_trainable_variables if (var.op.name.startswith(model_scope + '/BoxPredictor') )]
grad_vars = optimizer.compute_gradients(total_loss, trainable_variables)
for grad, var in grad_vars:
if grad is not None:
if var.name.endswith("Conv3x3_OutPut_40/weights:0") or var.name.endswith("Conv3x3_OutPut_40/biases:0") or var.name.endswith("Conv3x3_OutPut_20/weights:0") \
or var.name.endswith("Conv3x3_OutPut_20/biases:0") or var.name.endswith("Conv1x1_OutPut_20/weights:0") or var.name.endswith("Conv1x1_OutPut_20/biases:0") \
or var.name.endswith("Conv1x1_OutPut_10/weights:0") or var.name.endswith(
"Conv1x1_OutPut_10/biases:0"):
grad *= 10.0
grad_updates = optimizer.apply_gradients(grad_vars)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
return train_tensor, variables_to_restore,featureOp0, featureOp1, initFeatureOp, train_gt_box
def crop_init_array(init_img,gt_boxes):
img1_xiaobai = np.array(init_img)
pad_x = 36.0 / 264.0 * (gt_boxes[0, 3] - gt_boxes[0, 1]) * init_img.width
pad_y = 36.0 / 264.0 * (gt_boxes[0, 2] - gt_boxes[0, 0]) * init_img.height
cx = (gt_boxes[0, 3] + gt_boxes[0, 1]) / 2.0 * init_img.width
cy = (gt_boxes[0, 2] + gt_boxes[0, 0]) / 2.0 * init_img.height
startx = gt_boxes[0, 1] * init_img.width - pad_x
starty = gt_boxes[0, 0] * init_img.height - pad_y
endx = gt_boxes[0, 3] * init_img.width + pad_x
endy = gt_boxes[0, 2] * init_img.height + pad_y
left_pad = max(0, int(-startx))
top_pad = max(0, int(-starty))
right_pad = max(0, int(endx - init_img.width + 1))
bottom_pad = max(0, int(endy - init_img.height + 1))
startx = int(startx + left_pad)
starty = int(starty + top_pad)
endx = int(endx + left_pad)
endy = int(endy + top_pad)
if top_pad or left_pad or bottom_pad or right_pad:
r = np.pad(img1_xiaobai[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)), mode='constant',
constant_values=128)
g = np.pad(img1_xiaobai[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)), mode='constant',
constant_values=128)
b = np.pad(img1_xiaobai[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)), mode='constant',
constant_values=128)
r = np.expand_dims(r, 2)
g = np.expand_dims(g, 2)
b = np.expand_dims(b, 2)
img1_xiaobai = np.concatenate((r, g, b), axis=2)
img1_xiaobai = Image.fromarray(img1_xiaobai)
# gt_boxes resize
init_img_crop = img1_xiaobai.crop(np.int32([startx, starty, endx, endy]))
init_img_crop = init_img_crop.resize([128, 128], resample=Image.BILINEAR)
init_img_array = np.array(init_img_crop)
return init_img_array
|
d01724e157042d5ac17215d1827ca2d05c18e1cc
|
05fe579c12f0013ce83a106083ddb66ace5e8f47
|
/mindinsight/lineagemgr/summary/lineage_summary_analyzer.py
|
de95e051088adb353e480d5c5a7310ab4bbb5b59
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] |
permissive
|
mindspore-ai/mindinsight
|
59d3f47144ada9a12d2c82d9826ad5f5288aed78
|
a774d893fb2f21dbc3edb5cd89f9e6eec274ebf1
|
refs/heads/master
| 2023-07-22T22:46:43.075617
| 2023-07-17T11:26:58
| 2023-07-17T11:26:58
| 250,692,948
| 224
| 24
|
Apache-2.0
| 2020-12-29T12:22:51
| 2020-03-28T01:58:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,632
|
py
|
lineage_summary_analyzer.py
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""This module provides python APIs to get lineage summary from summary log."""
import struct
from collections import namedtuple
from enum import Enum
from google.protobuf.json_format import MessageToDict
from google.protobuf.message import DecodeError
from mindinsight.datavisual.proto_files.mindinsight_lineage_pb2 import LineageEvent
from mindinsight.lineagemgr.common.exceptions.exceptions import MindInsightException, \
LineageSummaryAnalyzeException, LineageFileHandlerReadError
from mindinsight.lineagemgr.common.log import logger as log
from mindinsight.lineagemgr.common.validator.validate_path import safe_normalize_path
from mindinsight.lineagemgr.summary.file_handler import FileHandler
LineageInfo = namedtuple('LineageInfo', ['train_lineage', 'eval_lineage', 'dataset_graph'])
class SummaryTag(Enum):
"""The tag value of lineage fields."""
# the value is `field_number << 3 | wire_type`
WALL_TIME = 'wall_time'
STEP = 'step'
VERSION = 'version'
GRAPH = 'graph'
SUMMARY = 'summary'
TRAIN_LINEAGE = 'train_lineage'
EVAL_LINEAGE = 'evaluation_lineage'
DATASET_GRAPH = 'dataset_graph'
class SummaryAnalyzer:
"""
Summary log Analyzer.
Args:
file_path (str): The path of summary log.
Raises:
LineageSummaryAnalyzeException: Raise when read files failed.
"""
HEADER_SIZE = 8
HEADER_CRC_SIZE = 4
BODY_CRC_SIZE = 4
def __init__(self, file_path):
self.file_handler = FileHandler(file_path)
def load_events(self):
"""
Load events in summary log.
Returns:
generator, the event generator.
"""
while self._has_next():
yield self._read_event()
def _has_next(self):
"""
Check if the file has reached the end.
Returns:
bool, whether the file has reached the end.
"""
current_offset = self.file_handler.tell()
if current_offset < self.file_handler.size:
return True
return False
def _read_event(self):
"""
Read event.
Returns:
LineageEvent, the event body.
"""
body_size = self._read_header()
body_str = self._read_body(body_size)
event = LineageEvent().FromString(body_str)
return event
def _read_header(self):
"""
Read header information.
Returns:
int, the length of event body.
"""
header_str = self.file_handler.read(self.HEADER_SIZE)
self.file_handler.read(self.HEADER_CRC_SIZE)
body_len = struct.unpack("<Q", header_str)[0]
return body_len
def _read_body(self, body_size):
"""
Read event body information.
Args:
body_size (int): The size of event body.
Returns:
bytes, the event body in bytes.
"""
body_str = self.file_handler.read(body_size)
self.file_handler.read(self.BODY_CRC_SIZE)
return body_str
class LineageSummaryAnalyzer(SummaryAnalyzer):
"""
Summary log analyzer for lineage information.
Args:
file_path (str): The path of summary log.
Raises:
LineageSummaryAnalyzeException: If failed to get lineage information.
"""
def __init__(self, file_path):
file_path = safe_normalize_path(file_path, 'lineage_summary_path', None)
super(LineageSummaryAnalyzer, self).__init__(file_path)
def get_latest_info(self):
"""
Get latest lineage info in summary log file.
Returns:
LineageInfo, the lineage summary information.
"""
lineage_events = {
SummaryTag.TRAIN_LINEAGE: None,
SummaryTag.EVAL_LINEAGE: None,
SummaryTag.DATASET_GRAPH: None
}
for event in self.load_events():
for tag, _ in lineage_events.items():
if event.HasField(tag.value):
lineage_events[tag] = event
break
lineage_info = LineageInfo(
train_lineage=lineage_events.get(SummaryTag.TRAIN_LINEAGE),
eval_lineage=lineage_events.get(SummaryTag.EVAL_LINEAGE),
dataset_graph=lineage_events.get(SummaryTag.DATASET_GRAPH)
)
return lineage_info
@classmethod
def get_summary_infos(cls, file_path):
"""
Get lineage summary information from summary log file.
Args:
file_path (str): The file path of summary log.
Returns:
LineageInfo, the lineage summary information.
Raises:
LineageSummaryAnalyzeException: If failed to get lineage information.
"""
analyzer = cls(file_path)
err_msg = "Can not analyze lineage info, file path is %s. Detail: %s"
try:
lineage_info = analyzer.get_latest_info()
except (MindInsightException, IOError, DecodeError, LineageFileHandlerReadError) as err:
log.debug(err_msg, file_path, str(err))
raise LineageSummaryAnalyzeException(str(err))
except Exception as err:
log.debug(err_msg, file_path, str(err))
raise LineageSummaryAnalyzeException(str(err))
return lineage_info
@staticmethod
def get_user_defined_info(file_path):
"""
Get user defined info.
Args:
file_path (str): The file path of summary log.
Returns:
list, the list of dict format user defined information
which converted from proto message.
"""
all_user_message = []
summary_analyzer = SummaryAnalyzer(file_path)
for event in summary_analyzer.load_events():
if event.HasField("user_defined_info"):
user_defined_info = MessageToDict(
event,
preserving_proto_field_name=True
).get("user_defined_info")
user_dict = LineageSummaryAnalyzer._get_dict_from_proto(user_defined_info)
all_user_message.append(user_dict)
return all_user_message
@staticmethod
def _get_dict_from_proto(user_defined_info):
"""
Convert the proto message UserDefinedInfo to its dict format.
Args:
user_defined_info (UserDefinedInfo): The proto message of user defined info.
Returns:
dict, the converted dict.
"""
user_dict = dict()
proto_dict = user_defined_info.get("user_info")
for proto_item in proto_dict:
if proto_item and isinstance(proto_item, dict):
key, value = list(list(proto_item.values())[0].items())[0]
if isinstance(value, dict):
user_dict[key] = LineageSummaryAnalyzer._get_dict_from_proto(value)
else:
user_dict[key] = value
return user_dict
|
317306b3e96db4c2e1e3fab64c1b8a73ef16fe1f
|
c1b32c2e36f64c6d7c352242e9e1f6b16ea02da5
|
/tests/core/test_activations.py
|
49f66a774f8e7b2fce49afbb99963b39017bc8cc
|
[
"MIT"
] |
permissive
|
sicara/tf-explain
|
00246fde5305ad96611fdba23563c97fbc4cdc38
|
9d7d1e900ec3e3e4b5338fbc43dfb93539acecc2
|
refs/heads/master
| 2023-08-21T22:58:55.150396
| 2022-06-30T08:14:18
| 2022-06-30T08:14:18
| 196,956,879
| 1,033
| 122
|
MIT
| 2022-06-30T08:14:19
| 2019-07-15T08:26:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
test_activations.py
|
import numpy as np
from tf_explain.core.activations import ExtractActivations
def test_should_generate_subgraph(convolutional_model):
activations_model = ExtractActivations.generate_activations_graph(
convolutional_model, ["activation_1"]
)
assert activations_model.layers[-1].name == "activation_1"
def test_should_extract_activations(random_data, convolutional_model, mocker):
non_normalized_grid = np.array([[1, 2], [1, 2]])
mocker.patch(
"tf_explain.core.activations.filter_display", return_value=non_normalized_grid
)
explainer = ExtractActivations()
grid = explainer.explain(random_data, convolutional_model, ["activation_1"])
expected_output = np.array([[0, 255], [0, 255]]).astype("uint8")
np.testing.assert_array_equal(grid, expected_output)
def test_should_save_output_grid(output_dir):
grid = np.random.random((208, 208))
explainer = ExtractActivations()
explainer.save(grid, output_dir, "output.png")
assert len(list(output_dir.glob("output.png"))) == 1
|
655fe6fafd7e6bb6735db09c481187da4f1a524e
|
4f14b1901d909b0b917d35815e7b19233692f25b
|
/number-theoretic-transform-integer-dft/numbertheoretictransform-test.py
|
c5849f9c7e9525a07187cd13e1e625feb5a7fb1c
|
[] |
no_license
|
nayuki/Nayuki-web-published-code
|
e61a761e5c188aeacd35e5c8ddd005460545c94e
|
49414617b088ec4c4e339a6c1caa7ec0f40eb58f
|
refs/heads/master
| 2023-08-24T10:54:42.862243
| 2023-03-14T05:29:56
| 2023-03-14T05:29:56
| 25,706,873
| 133
| 53
| null | 2017-02-20T08:39:16
| 2014-10-24T20:33:24
|
Java
|
UTF-8
|
Python
| false
| false
| 9,567
|
py
|
numbertheoretictransform-test.py
|
#
# Number-theoretic transform test (Python)
#
# Copyright (c) 2022 Project Nayuki
# All rights reserved. Contact Nayuki for licensing.
# https://www.nayuki.io/page/number-theoretic-transform-integer-dft
#
import random, unittest
from typing import List, Set, Tuple
import numbertheoretictransform as ntt
class NumberTheoreticTransformTest(unittest.TestCase):
def test_forward_transform(self) -> None:
actual: List[int] = ntt.transform([6, 0, 10, 7, 2], 3, 11)
expect: List[int] = [3, 7, 0, 5, 4]
self.assertEqual(expect, actual)
def test_inverse_transform(self) -> None:
actual: List[int] = ntt.inverse_transform([3, 7, 0, 5, 4], 3, 11)
expect: List[int] = [6, 0, 10, 7, 2]
self.assertEqual(expect, actual)
def test_simple_convolution(self) -> None:
mod: int = 673
root: int = 326
vec0: List[int] = ntt.transform([4, 1, 4, 2, 1, 3, 5, 6], root, mod)
vec1: List[int] = ntt.transform([6, 1, 8, 0, 3, 3, 9, 8], root, mod)
vec2: List[int] = [(x * y % mod) for (x, y) in zip(vec0, vec1)]
actual: List[int] = ntt.inverse_transform(vec2, root, mod)
expect: List[int] = [123, 120, 106, 92, 139, 144, 140, 124]
self.assertEqual(expect, actual)
def test_automatic_convolution(self) -> None:
actual: List[int] = ntt.circular_convolve(
[4, 1, 4, 2, 1, 3, 5, 6],
[6, 1, 8, 0, 3, 3, 9, 8])
expect: List[int] = [123, 120, 106, 92, 139, 144, 140, 124]
self.assertEqual(expect, actual)
def test_transform_roundtrip_randomly(self) -> None:
TRIALS: int = 300
for _ in range(TRIALS):
veclen: int = random.randint(1, 100)
maxval: int = random.randint(1, 100)
vec: List[int] = [random.randrange(maxval + 1) for _ in range(veclen)]
temp, root, mod = ntt.find_params_and_transform(vec, maxval + 1)
inv: List[int] = ntt.inverse_transform(temp, root, mod)
self.assertEqual(vec, inv)
def test_transform_linearity_randomly(self) -> None:
TRIALS: int = 100
for _ in range(TRIALS):
veclen: int = random.randint(1, 100)
maxval: int = random.randint(1, 100)
vec0: List[int] = [random.randrange(maxval + 1) for _ in range(veclen)]
vec1: List[int] = [random.randrange(maxval + 1) for _ in range(veclen)]
out0, root, mod = ntt.find_params_and_transform(vec0, maxval + 1)
out1: List[int] = ntt.transform(vec1, root, mod)
out01: List[int] = [(x + y) % mod for (x, y) in zip(out0, out1)]
vec2: List[int] = [(x + y) % mod for (x, y) in zip(vec0, vec1)]
out2: List[int] = ntt.transform(vec2, root, mod)
self.assertEqual(out2, out01)
def test_convolution_randomly(self) -> None:
TRIALS: int = 100
for _ in range(TRIALS):
veclen: int = random.randint(1, 100)
maxval: int = random.randint(1, 100)
vec0: List[int] = [random.randrange(maxval + 1) for _ in range(veclen)]
vec1: List[int] = [random.randrange(maxval + 1) for _ in range(veclen)]
actual: List[int] = ntt.circular_convolve(vec0, vec1)
expect: List[int] = NumberTheoreticTransformTest._circular_convolve(vec0, vec1)
self.assertEqual(expect, actual)
@staticmethod # Naive algorithm
def _circular_convolve(vec0: List[int], vec1: List[int]) -> List[int]:
assert len(vec0) == len(vec1)
result: List[int] = [0] * len(vec0)
for (i, val0) in enumerate(vec0):
for (j, val1) in enumerate(vec1):
result[(i + j) % len(vec0)] += val0 * val1
return result
def test_transform_radix2_vs_naive(self) -> None:
TRIALS: int = 300
for _ in range(TRIALS):
veclen: int = 2**random.randrange(8)
maxval: int = random.randint(1, 100)
vec: List[int] = [random.randrange(maxval + 1) for _ in range(veclen)]
temp, root, mod = ntt.find_params_and_transform(vec, maxval + 1)
ntt.transform_radix_2(vec, root, mod)
self.assertEqual(temp, vec)
def test_transform_radix2_roundtrip_randomly(self) -> None:
TRIALS: int = 10
for _ in range(TRIALS):
veclen = 2**random.randint(0, 16)
vallimit = 2**random.randint(1, 16)
invec: List[int] = [random.randrange(vallimit) for _ in range(veclen)]
mod: int = ntt.find_modulus(len(invec), vallimit)
root: int = ntt.find_primitive_root(len(invec), mod - 1, mod)
vec: List[int] = list(invec)
ntt.transform_radix_2(vec, root, mod)
ntt.transform_radix_2(vec, ntt.reciprocal(root, mod), mod)
scaler: int = ntt.reciprocal(veclen, mod)
vec = [(x * scaler % mod) for x in vec]
self.assertEqual(invec, vec)
def test_find_generator(self) -> None:
CASES: List[Tuple[int,int,Set[int]]] = [
( 2, 1, {1}),
( 3, 2, {2}),
( 4, 2, {3}),
( 5, 4, {2, 3}),
( 6, 2, {5}),
( 7, 6, {3, 5}),
( 8, 4, set()),
( 9, 6, {2, 5}),
(10, 4, {3, 7}),
(11, 10, {2, 6, 7, 8}),
(12, 4, set()),
(13, 12, {2, 6, 7, 11}),
(14, 6, {3, 5}),
(15, 8, set()),
(16, 8, set()),
(17, 16, {3, 5, 6, 7, 10, 11, 12, 14}),
(18, 6, {5, 11}),
(19, 18, {2, 3, 10, 13, 14, 15}),
(20, 8, set()),
(21, 12, set()),
(22, 10, {7, 13, 17, 19}),
(23, 22, {5, 7, 10, 11, 14, 15, 17, 19, 20, 21}),
]
for (mod, totient, gens) in CASES:
if len(gens) > 0:
gen: int = ntt.find_generator(totient, mod)
self.assertTrue(gen in gens)
else:
self.assertRaises(ValueError, ntt.find_generator, totient, mod)
def test_is_primitive_root(self) -> None:
CASES: List[Tuple[int,int,Set[int]]] = [
( 2, 1, {1}),
( 3, 2, {2}),
( 4, 2, {3}),
( 5, 2, {4}),
( 5, 4, {2, 3}),
( 6, 2, {5}),
( 7, 2, {6}),
( 7, 3, {2, 4}),
( 7, 6, {3, 5}),
( 8, 2, {3, 5, 7}),
( 8, 4, set()),
( 9, 2, {8}),
( 9, 3, {4, 7}),
( 9, 6, {2, 5}),
(10, 2, {9}),
(10, 4, {3, 7}),
(11, 2, {10}),
(11, 5, {3, 4, 5, 9}),
(11, 10, {2, 6, 7, 8}),
(12, 2, {5, 7, 11}),
(12, 4, set()),
(13, 2, {12}),
(13, 3, {3, 9}),
(13, 4, {5, 8}),
(13, 6, {4, 10}),
(13, 12, {2, 6, 7, 11}),
(14, 2, {13}),
(14, 3, {9, 11}),
(14, 6, {3, 5}),
(15, 2, {4, 11, 14}),
(15, 4, {2, 7, 8, 13}),
(15, 8, set()),
(16, 8, set()),
(17, 16, {3, 5, 6, 7, 10, 11, 12, 14}),
(18, 6, {5, 11}),
(19, 18, {2, 3, 10, 13, 14, 15}),
(20, 8, set()),
(21, 12, set()),
(22, 10, {7, 13, 17, 19}),
(23, 22, {5, 7, 10, 11, 14, 15, 17, 19, 20, 21}),
]
for (mod, degree, primroots) in CASES:
for i in range(mod):
self.assertEqual(i in primroots, ntt.is_primitive_root(i, degree, mod))
def test_is_primitive_root_prime_generator(self) -> None:
TRIALS: int = 1_000
for _ in range(TRIALS):
p: int = random.randrange(2, 10_000)
if not ntt.is_prime(p):
continue
totient: int = p - 1
val: int = random.randrange(p)
expect: bool = True
temp: int = 1
for _ in range(totient - 1):
temp = temp * val % p
expect = expect and (temp != 1)
temp = temp * val % p
expect = expect and (temp == 1)
actual: bool = ntt.is_primitive_root(val, totient, p)
self.assertEqual(expect, actual)
def test_reciprocal(self) -> None:
CASES: List[Tuple[int,int,int]] = [
( 2, 1, 1),
( 3, 1, 1),
( 3, 2, 2),
( 4, 1, 1),
( 4, 3, 3),
( 5, 1, 1),
( 5, 2, 3),
( 5, 3, 2),
( 5, 4, 4),
( 6, 1, 1),
( 6, 5, 5),
( 7, 1, 1),
( 7, 2, 4),
( 7, 3, 5),
( 7, 4, 2),
( 7, 5, 3),
( 7, 6, 6),
( 8, 1, 1),
( 8, 3, 3),
( 8, 5, 5),
( 8, 7, 7),
( 9, 1, 1),
( 9, 2, 5),
( 9, 4, 7),
( 9, 5, 2),
( 9, 7, 4),
( 9, 8, 8),
(10, 1, 1),
(10, 3, 7),
(10, 7, 3),
(10, 9, 9),
(11, 1, 1),
(11, 2, 6),
(11, 3, 4),
(11, 4, 3),
(11, 5, 9),
(11, 6, 2),
(11, 7, 8),
(11, 8, 7),
(11, 9, 5),
(11, 10, 10),
]
for (mod, x, y) in CASES:
self.assertEqual(y, ntt.reciprocal(x, mod))
TRIALS: int = 1_000
for _ in range(TRIALS):
p: int = random.randrange(2, 10_000)
if not ntt.is_prime(p):
continue
for _ in range(10):
x = random.randrange(1, p)
y = ntt.reciprocal(x, p)
self.assertTrue(0 <= y < p)
self.assertEqual(1, x * y % p)
def test_unique_prime_factors(self) -> None:
CASES: List[Tuple[int,List[int]]] = [
( 1, []),
( 2, [2]),
( 3, [3]),
( 4, [2]),
( 5, [5]),
( 6, [2, 3]),
( 7, [7]),
( 8, [2]),
( 9, [3]),
(10, [2, 5]),
(11, [11]),
(12, [2, 3]),
(13, [13]),
(14, [2, 7]),
(15, [3, 5]),
(16, [2]),
]
for (n, expect) in CASES:
actual: List[int] = ntt.unique_prime_factors(n)
self.assertEqual(expect, actual)
TRIALS: int = 1_000
for _ in range(TRIALS):
n = random.randrange(2, 10_000)
facts: List[int] = ntt.unique_prime_factors(n)
self.assertEqual(ntt.is_prime(n), (len(facts) == 1) and (facts[0] == n))
def test_is_prime(self) -> None:
CASES: List[Tuple[int,bool]] = [
( 2, True ),
( 3, True ),
( 4, False),
( 5, True ),
( 6, False),
( 7, True ),
( 8, False),
( 9, False),
(10, False),
(11, True ),
(12, False),
(13, True ),
(14, False),
(15, False),
(16, False),
]
for (n, expect) in CASES:
actual: bool = ntt.is_prime(n)
self.assertEqual(expect, actual)
def test_sqrt(self) -> None:
CASES: List[Tuple[int,int]] = [
(0, 0),
(1, 1),
(2, 1),
(3, 1),
(4, 2),
(5, 2),
(6, 2),
(7, 2),
(8, 2),
(9, 3),
]
for (x, y) in CASES:
self.assertEqual(y, ntt.sqrt(x))
TRIALS: int = 1_000
for _ in range(TRIALS):
x = random.randrange(10_000)
y = ntt.sqrt(x)
self.assertTrue(y**2 <= x < (y+1)**2)
if __name__ == "__main__":
unittest.main()
|
580e9479bdfd7b6412e8d3733893fabe46fab2b9
|
83e41659ff9a3d8b48c23ce1092117356154c54b
|
/tests/test_tlv.py
|
8e9f300c1b461ee5d0c89b0465dc5d160f39d9c6
|
[
"Apache-2.0"
] |
permissive
|
ikalchev/HAP-python
|
95a14068d93c663326788b7ccdc343ef787d6ab8
|
5f45a5e208ef33e37228e9fa9e5c08732d9816b2
|
refs/heads/dev
| 2023-08-13T00:29:41.795121
| 2023-07-31T19:11:39
| 2023-07-31T19:11:39
| 106,929,485
| 581
| 149
|
NOASSERTION
| 2023-09-03T09:55:04
| 2017-10-14T13:21:38
|
Python
|
UTF-8
|
Python
| false
| false
| 545
|
py
|
test_tlv.py
|
"""Tests for pyhap.tlv."""
import pytest
from pyhap import tlv
def test_tlv_round_trip():
"""Test tlv can round trip TLV8 data."""
message = tlv.encode(
b"\x01",
b"A",
b"\x01",
b"B",
b"\x02",
b"C",
)
decoded = tlv.decode(message)
assert decoded == {
b"\x01": b"AB",
b"\x02": b"C",
}
def test_tlv_invalid_pairs():
"""Test we encode fails with an odd amount of args."""
with pytest.raises(ValueError):
tlv.encode(b"\x01", b"A", b"\02")
|
ada58bc71b5af02adeb542b8cd1bea9bae044d90
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/L1Trigger/GlobalTriggerAnalyzer/test/L1GtDataEmulAnalyzer_cfg.py
|
2229a8359f25dd15c2ca8e47e961afdb25e50883
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 14,238
|
py
|
L1GtDataEmulAnalyzer_cfg.py
|
#
# cfg file for:
#
# Run the L1 GT emulator on the unpacked GCT and GMT data.
# Compare the GT data records with the GT emulated records
import FWCore.ParameterSet.Config as cms
# process
process = cms.Process("RunL1GtDataEmulAnalyzer")
# number of events to be processed and source file
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(50)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/afs/cern.ch/user/g/ghete/scratch0/CmsswTestFiles/testGt_DataEmulAnalyzer_source.root')
)
process.PoolSource.fileNames = ['/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/04AA7F35-C426-DD11-B047-001D09F2516D.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/04F935C6-C426-DD11-AE42-001D09F24637.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/0663785C-C626-DD11-8118-000423D94534.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/068D1384-C526-DD11-A027-000423D94A04.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/069F542D-C426-DD11-B4FD-001D09F290CE.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/088C9FC0-C426-DD11-80A5-000423D99CEE.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/0A89B10E-C526-DD11-886F-001D09F2532F.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/0E2F9E85-C626-DD11-B460-001D09F231B0.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/1012C6C0-C426-DD11-8E75-000423D6B444.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/1212CF82-C526-DD11-8840-001617E30D0A.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/14287A88-C526-DD11-978F-000423D99896.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/16AE8487-C526-DD11-88F5-001D09F24498.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/1A728D7F-C526-DD11-9363-001617C3B6E2.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/1C63F034-C426-DD11-98A0-001D09F24F65.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/1E4EC288-C526-DD11-94B0-001D09F2525D.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/20C9B509-C526-DD11-BD8C-001D09F244DE.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/2642ADCA-C426-DD11-A971-001D09F2906A.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/2A9FB786-C526-DD11-BB6D-001617C3B6CC.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/2C1C5209-C526-DD11-A4B5-001D09F24D67.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/2E4FB3C8-C426-DD11-91E0-0019B9F705A3.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/2E53B076-C526-DD11-AF83-001D09F2A465.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/325C9484-C626-DD11-9123-001D09F24F65.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/34D18586-C526-DD11-AE6E-001617C3B69C.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/3A4BB185-C526-DD11-A030-0019B9F705A3.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/3AFE8409-C526-DD11-9F2C-001617DBD332.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/3CDBC0B0-C626-DD11-9A8F-001D09F2525D.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/3E44DA7D-C526-DD11-8555-000423D951D4.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/4058E48A-C526-DD11-97FF-001617E30F56.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/44F9F281-C526-DD11-B6CE-000423D99AA2.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/48D9347F-C526-DD11-B90E-001617E30D52.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/4EF5B58B-C526-DD11-B608-001D09F2A49C.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/50E8D437-C426-DD11-A66B-001D09F251FE.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/5473F70C-C526-DD11-84D8-001D09F24DDA.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/5690A985-C626-DD11-8F78-001D09F2932B.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/58B44484-C526-DD11-BE92-000423D9939C.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/5C95A980-C526-DD11-AA27-001617DBD332.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/5CAA59AB-C626-DD11-9451-001D09F25456.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/5EA14E0A-C526-DD11-AF5C-001D09F28E80.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/5EADB005-C526-DD11-B7A1-001D09F24399.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/668DEA01-C526-DD11-B7DB-001D09F2546F.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/66F05B87-C526-DD11-ACAA-000423D94E70.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/6ABEE90D-C626-DD11-B9E9-0030487A18F2.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/6C142B89-C526-DD11-BC79-001617C3B706.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/6C36B389-C526-DD11-A4FF-001D09F28F25.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/6C6E8381-C526-DD11-949A-000423D98B28.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/6E15E00D-C526-DD11-8DBB-001D09F24EC0.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/6E28C8BD-C426-DD11-8219-000423D944F8.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/70E8F2C1-C426-DD11-9B3E-001D09F2503C.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/72CEF283-C526-DD11-B393-000423D98DC4.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/76D1AB05-C526-DD11-A4DB-001D09F2841C.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/7AAA5B7D-C526-DD11-9E9C-000423D944FC.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/7C1F29C1-C426-DD11-81E4-000423D944FC.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/7CDE918E-C526-DD11-8B51-0019B9F707D8.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/8037767E-C626-DD11-839B-001D09F2960F.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/8060D709-C526-DD11-B39E-001D09F251E0.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/82065F1A-C626-DD11-A0AC-001D09F231C9.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/84D88E85-C526-DD11-A46F-001617C3B6CE.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/8620AD09-C526-DD11-B0DC-001D09F28F11.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/863AE5CB-C426-DD11-8154-001D09F28E80.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/884FA985-C626-DD11-82A9-001D09F291D7.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/9692F0C6-C426-DD11-AD86-0019B9F72BFF.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/98FBFF87-C526-DD11-A0AE-001D09F24EC0.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/A8AB9B85-C626-DD11-BCDE-001D09F2305C.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/AA0E6689-C526-DD11-9FD8-001D09F24DA8.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/AE1B864B-C326-DD11-AF5A-000423D986C4.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/AE437109-C526-DD11-BC07-001D09F2438A.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/AEF6E086-C526-DD11-8326-0019B9F704D1.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/B4710DA7-C526-DD11-B659-001D09F23A6B.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/B4EF9581-C626-DD11-8E4A-001D09F28E80.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/BAA4138B-C526-DD11-870F-000423D944F0.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/BAB50884-C626-DD11-8418-001D09F2525D.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/C4147609-C526-DD11-A649-001D09F292D1.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/C6B5B17F-C526-DD11-8145-000423D99660.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/CA88975E-C526-DD11-8ED6-001617C3B76E.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/CEB69E7F-C526-DD11-9E94-000423D98BC4.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/CECA5B7C-C526-DD11-A158-001617DBD5AC.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/D0352C1C-C526-DD11-9B7B-001D09F29538.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/D2D36EC1-C426-DD11-B413-001D09F250AF.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/D42DF0D3-C526-DD11-9A01-001D09F2A49C.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/D6CAAA9D-C526-DD11-9F52-000423D98750.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/DC14738A-C526-DD11-8814-000423D99264.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/DCE0C480-C526-DD11-993D-000423D9997E.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/DEE7C481-C526-DD11-8E06-001617E30D12.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/E435D97C-C626-DD11-969E-001D09F251E0.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/E43DDA88-C526-DD11-B226-001D09F2AD4D.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/E839788B-C526-DD11-9487-000423D99BF2.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/EAA00680-C526-DD11-AEA9-000423D98800.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/F23E5C02-C526-DD11-81FC-001D09F23D1D.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/F2BD4C0B-C526-DD11-A42F-001D09F23A3E.root',
'/store/data/2008/5/20/T0ReReco-GlobalCruzet1-A-v1/0004/FA19908B-C526-DD11-A839-0019B9F730D2.root']
# load and configure modules
process.load("Configuration.StandardSequences.FakeConditions_cff")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("L1Trigger.Configuration.L1Config_cff")
# L1 menu
process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1x1032.L1Menu_CRUZET200805_gr7_muon_cff")
# Global Trigger emulator
import L1Trigger.GlobalTrigger.gtDigis_cfi
process.l1GtEmulDigis = L1Trigger.GlobalTrigger.gtDigis_cfi.gtDigis.clone()
# block GCT input and the technical triggers (only FDL and GMT active) 0x0101
process.l1GtParameters.DaqActiveBoards = 0x010d
# block GMT input (0xdd12)
#process.l1GtParameters.DaqActiveBoards = 0x00FF
# block both GCT and GMT (FDL and techTrig active)
#process.l1GtParameters.DaqActiveBoards = 0x0003
# input tag for GMT readout collection:
process.l1GtEmulDigis.GmtInputTag = 'gtDigis'
# input tag for GCT readout collections:
#process.l1GtEmulDigis.GctInputTag = 'gctDigis'
# logical flag to produce the L1 GT DAQ readout record
# if true, produce the record (default)
#process.l1GtEmulDigis.ProduceL1GtDaqRecord = False
# logical flag to produce the L1 GT EVM readout record
# if true, produce the record (default)
#process.l1GtEmulDigis.ProduceL1GtEvmRecord = False
# logical flag to produce the L1 GT object map record
# if true, produce the record (default)
#process.l1GtEmulDigis.ProduceL1GtObjectMapRecord = False
# logical flag to write the PSB content in the L1 GT DAQ record
# if true, write the PSB content in the record (default)
#process.l1GtEmulDigis.WritePsbL1GtDaqRecord = False
# logical flag to read the technical trigger records
# if true, it will read via getMany the available records (default)
#process.l1GtEmulDigis.ReadTechnicalTriggerRecords = False
# number of "bunch crossing in the event" (BxInEvent) to be emulated
# symmetric around L1Accept (BxInEvent = 0):
# 1 (BxInEvent = 0); 3 (F 0 1) (standard record); 5 (E F 0 1 2) (debug record)
# even numbers (except 0) "rounded" to the nearest lower odd number
# negative value: emulate TotalBxInEvent as given in EventSetup
#process.l1GtEmulDigis.EmulateBxInEvent = 3
# Global Trigger report
import L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi
process.l1GtTrigReportData = L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi.l1GtTrigReport.clone()
process.l1GtTrigReportData.L1GtRecordInputTag = 'gtDigis'
#
import L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi
process.l1GtTrigReportEmul = L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi.l1GtTrigReport.clone()
process.l1GtTrigReportEmul.L1GtRecordInputTag = 'l1GtEmulDigis'
#
# compare the L1 GT data and emulator digis
process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtDataEmulAnalyzer_cfi")
process.l1GtDataEmulAnalyzer.L1GtEmulInputTag = 'l1GtEmulDigis'
# paths to be run
process.p = cms.Path(process.l1GtEmulDigis*process.l1GtDataEmulAnalyzer*process.l1GtTrigReportData*process.l1GtTrigReportEmul)
# services
# Message Logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.enable = False
process.MessageLogger.cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO'),
INFO = cms.untracked.PSet(
#limit = cms.untracked.int32(-1)
limit = cms.untracked.int32(1000)
)#,
#threshold = cms.untracked.string('DEBUG'), ## DEBUG
#DEBUG = cms.untracked.PSet( ## DEBUG, all messages
#
# limit = cms.untracked.int32(-1)
#)
)
process.MessageLogger.debugModules = ['l1GtEmulDigis']
# histogram service
process.TFileService = cms.Service("TFileService",
fileName = cms.string('L1GtDataEmulAnalyzer.root')
)
# summary
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# output
process.outputL1GtDataEmul = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testGt_DataEmulAnalyzer_output.root'),
outputCommands = cms.untracked.vstring('drop *',
'keep *_l1GtDataDigis_*_*',
'keep *_l1GtEmulDigis_*_*',
'keep *_l1GctDataDigis_*_*')
)
process.outpath = cms.EndPath(process.outputL1GtDataEmul)
|
f857c21c816ef4e14195d504a07f7a26a9f52a48
|
35cd401ef876bec39366e5724bae71581c0cb658
|
/Python/klampt/control/system_id.py
|
2a53848b0bdb1e34cb521d23e5536931fc5bb574
|
[] |
permissive
|
krishauser/Klampt
|
bd450b8c67189b31abe4eb056707d50da3aa651b
|
dbaf38ca290a36fba9a8f4f9b6a49fda689f6585
|
refs/heads/master
| 2023-08-29T21:54:40.758699
| 2023-07-21T12:34:38
| 2023-07-21T12:34:38
| 13,944,923
| 327
| 98
|
BSD-3-Clause
| 2023-08-26T04:20:26
| 2013-10-29T02:46:08
|
C++
|
UTF-8
|
Python
| false
| false
| 16,416
|
py
|
system_id.py
|
import numpy as np
import math
def check_symmetry(A,tol=1e-7):
err = np.amax(np.abs(A - A.T))
if err >= tol:
print("Error in matrix symmetry? error %f"%(err,))
return False
return True
def sherman_woodbury_inplace(Ainv,v,w,c=1.0):
"""Computes (A+vcw^T)^-1 given A^-1, w, and w.
Does it inplace without creating a temp matrix"""
if v is w:
temp = np.dot(Ainv,v)
den = 1.0+np.dot(v,temp)*c
scale = c/den
for i in range(Ainv.shape[0]):
Ainv[i,:] -= (scale*temp[i])*temp
else:
AinvV = np.dot(Ainv,v)
AinvW = np.dot(Ainv,w)
scale = c/(1.0+np.dot(v,AinvW)*c)
for i in range(Ainv.shape[0]):
Ainv[i,:] -= (scale*AinvV[i])*AinvW
class OnlineLeastSquares:
"""Maintains solutions x to the L2-regularized least squares problem
min ||Ax-b||^2 + lambda ||x||^2 with dynamic updates to the rows
of the A matrix and b vector.
If the problem is degenerate, the degenerate flag is set to True and
the minimum norm ||x||^2 solution that satisfies Ax=b is returned.
For numerical stability, this class internally monitors the norm of
self.AtA and rescales it periodically. It maintains a parameter
and all the parameters are scaled by self.scale
- self.AtA = At*A / self.scale
- self.Atb = At*b / self.scale
- self.btb = bt*b / self.scale
- self.AtAinv = (At*A)^-1 * self.scale
By default, when ||self.AtA|| > 1e3, rescaling is performed.
"""
def __init__(self,n,regularizationLambda=0):
self.n = n
self.count = 0
self.sumWeight = 0
self.scale = 1
self.rescaleThreshold = 1e1
self.AtA = np.zeros((n,n))
self.Atb = np.zeros(n)
self.btb = 0
self.AtAinv = np.zeros((n,n))
self.degenerate = True
self.x = np.zeros(n)
self.regularizationLambda = regularizationLambda
if regularizationLambda > 0:
self.AtA = np.eye(n)*regularizationLambda
self.AtAinv = np.eye(n)*(1.0/regularizationLambda)
self.degenerate = False
self.sumWeight = 0
def setPrior(self,xPrior,priorWeight):
"""Can set a prior belief on x. Works similarly to regularization
but MUST be done before adding points. """
assert self.regularizationLambda == 0
if not isinstance(xPrior,np.ndarray):
xPrior = np.array(xPrior)
self.count = 1
self.sumWeight = priorWeight
self.scale = 1
self.AtA = np.eye(self.n)*priorWeight
self.AtAinv = np.eye(self.n)/priorWeight
self.Atb = xPrior*priorWeight
self.btb = np.dot(xPrior,xPrior)*priorWeight
self.degenerate = False
self.x = xPrior
def solution(self):
return self.x
def residualNorm2(self):
"""Returns the squared norm of the residual ||Ax-b||^2. Uses the
formula ||Ax-b||^2 = x^T At A x - 2 x^T At b + b^T b"""
r2 = (np.dot(self.x,np.dot(self.AtA,self.x)-2.0*self.Atb) + self.btb)*self.scale
if self.regularizationLambda > 0:
r2 -= self.regularizationLambda*np.dot(self.x,self.x)
return r2
def residualNorm(self):
"""Returns the norm of the residual ||Ax-b||. """
return math.sqrt(self.residualNorm2())
def standardError2(self):
"""Returns the square of the OLS estimate of the standard error."""
if self.count<=self.n:
return float('inf')
return self.residualNorm2()/self.sumWeight*(self.count / (self.count-self.n))
def standardError(self):
"""Returns the OLS estimate of the standard error."""
return math.sqrt(self.standardError2())
def solutionCovariance(self):
"""Returns the covariance matrix of the solution x"""
return self.standardError2()*self.AtAinv
def solutionStandardErrors(self):
"""Returns the standard errors of the solution x"""
s2 = self.standardError2()
res = [0]*self.n
for i in range(self.n):
try:
res[i] = math.sqrt(s2*self.AtAinv[i,i])
except ValueError:
res[i] = float('nan')
return res
def predictionAndVariance(self,a):
"""Returns the mean and variance of the prediction at the input
point a."""
b = np.dot(a,self.x)
vc = self.standardError2()
#x is distributed according to a gaussian with mean self.x and
#variance solutionCovariance. Dot product has variance
#Var(a^T x) = a^T Var(x) a
#add on the
return (b, vc * (1.0 + np.dot(a,np.dot(self.AtAinv,a))))
def setLambda(self,regularizationLambda):
"""Changes the regularization term (and maintains the solution).
Cost is O(n^3) """
self.AtA *= self.scale
self.AtA += (regularizationLambda-self.regularizationLambda)*np.eye(self.n)
self.AtA /= self.scale
try:
self.AtAinv = np.linalg.inv(self.AtA)
self.degenerate = False
except np.linalg.LinAlgError:
self.AtAinv = np.linalg.pinv(self.AtA)
self.degenerate = True
self.x = np.dot(self.AtAinv,self.Atb)
self.regularizationLambda = regularizationLambda
def add(self,a,b,weight=1.0):
"""Adds a new datapoint a^T x ~= b, weighted by the given weight,
and updates the solution x accordingly. The update runs in O(n^2)
time."""
assert len(a)==self.n
assert self.AtA.shape==(self.n,self.n)
assert self.AtAinv.shape==(self.n,self.n)
assert self.Atb.shape==(self.n,)
assert weight >= 0
if not isinstance(a,np.ndarray):
a = np.array(a)
self.count += 1
w = weight/self.scale
self.sumWeight += weight
self.Atb += (w*b)*a
self.btb += w*b*b
for i in range(self.n):
self.AtA[i,:] += (w*a[i])*a
#assert check_symmetry(self.AtA)
if not self.degenerate:
#sherman woodbury update
sherman_woodbury_inplace(self.AtAinv,a,a,w)
assert check_symmetry(self.AtAinv)
else:
self.calc_AtAinv()
self.x = np.dot(self.AtAinv,self.Atb)
self.checkRescale()
def checkRescale(self):
n = np.linalg.norm(self.AtA)
if n > self.rescaleThreshold:
self.scale *= n
self.AtA *= 1.0/n
self.Atb *= 1.0/n
self.btb *= 1.0/n
self.calc_AtAinv()
def discount(self,discountFactor,type='geometric'):
"""Discounts the existing data by the given discount factor (should
be < 1). Runs in time O(n^2) if the regularization weight is zero,
otherwise it is O(n^3). Future implementations might get this down
to O(n^2) in the latter case.
hyperbolic discounting takes the factor as a sort of drifting
contribution to the average"""
assert(discountFactor > 0)
if type == 'hyperbolic':
#avg' = avg*(1-alpha(N)) + alpha(N)*item
#if alpha(N) ~= 1/(N+1) then this converges to the true average
#sum' = (N+1)/N sum*(1-alpha(N)) + alpha(N)(N+1)*item
#sum' = sum*(1-alpha(N))/(N alpha(N)) + item
#if alpha is a constant, we get
#sum' = sum*(1/alpha-1)/N + item
self.discount((1.0/discountFactor - 1.0)/self.sumWeight)
return
if self.regularizationLambda == 0:
self.scale *= discountFactor
self.sumWeight *= discountFactor
else:
AtAreg = np.eye(self.n)*self.regularizationLambda/self.scale
self.AtA = self.AtA*discountFactor + AtAreg
self.Atb *= discountFactor
self.btb *= discountFactor
self.sumWeight *= discountFactor
self.calc_AtAinv()
self.x = np.dot(self.AtAinv,self.Atb)
return
def calc_AtAinv(self):
assert check_symmetry(self.AtA)
if self.count < self.n:
self.AtAinv = np.linalg.pinv(self.AtA)
self.degenerate = True
else:
try:
self.AtAinv = np.linalg.inv(self.AtA)
if check_symmetry(self.AtAinv):
#good solution
self.degenerate = False
else:
self.AtAinv = np.linalg.pinv(self.AtA)
self.degenerate = True
print("OnlineLS: degenerate matrix, inv failed to produce symmetric matrix")
except np.linalg.LinAlgError:
self.AtAinv = np.linalg.pinv(self.AtA)
self.degenerate = True
print("OnlineLS: degenerate matrix")
return
class LinearSystemID:
"""System identification for a system y = Ax + Bu + C with m state
variables and n inputs.
Usage first sets up the matrix structure (optional), then repeatedly calls
add(x,u,y). To retrieve the matrices, call :meth:`getModel`. To retrieve
a prediction, call :meth:`getOutput`
Supports freezing certain entries of the the matrix structure. This is done
using 3 pattern matrices, where None indicates a free value and a numeric
constant indicates a fixed value. The patterns must be frozen before
datapoints are consumed.
"""
def __init__(self,m,n):
self.m,self.n = m,n
self.coeffPattern = [None,None,None]
self.estimators = [OnlineLeastSquares(self.m+self.n+1) for i in range(m)]
def setPattern(self,Apattern,Bpattern,Cpattern):
"""The patterns are list-of-lists of size mxm, mxn, and a list of
size m, which indicate whether the (i,j)'th entry of the A,B, and C
matrices are fixed (respectively).
If Xpattern[i][j] = None, this indicates a free coefficient, but if
Xpattern[i][j] is numeric, it indicates that the coefficient is fixed
at that value.
"""
self.coeffPattern = [Apattern,Bpattern,Cpattern]
for i in range(self.m):
self._updateEstimatorSize(i)
def fixA(self,i,j,value):
"""Sets the i,j'th entry of the A matrix to a fixed value"""
if self.coeffPattern[0] == None:
m,n=self.m,self.n
self.coeffPattern[0] = [[None]*m for i in range(m)]
self.coeffPattern[0][i][j]=value
self._updateEstimatorSize(i)
def fixB(self,i,j,value):
"""Sets the i,j'th entry of the B matrix to a fixed value"""
if self.coeffPattern[1] == None:
m,n=self.m,self.n
self.coeffPattern = [[None]*n for i in range(m)]
self.coeffPattern[1][i][j]=value
self._updateEstimatorSize(i)
def fixC(self,i,value):
"""Sets the i'th entry of the C vector to a fixed value"""
if self.coeffPattern[2] == None:
m,n=self.m,self.n
self.coeffPattern[2] = [None]*m
self.coeffPattern[2][i]=value
self._updateEstimatorSize(i)
def add(self,x,u,y,weight=1.0):
"""Adds a new datapoint to the estimator"""
assert(len(y)==self.m)
assert(len(x)==self.m)
assert(len(u)==self.n)
if isinstance(x,np.ndarray): x = x.tolist()
if isinstance(u,np.ndarray): u = u.tolist()
xu1 = x + u + [1.0]
if self.coeffPattern == [None,None,None]:
for yi,e in zip(y,self.estimators):
e.add(xu1,yi,weight)
else:
#each row might have some fixed values
for i,(yi,e) in enumerate(zip(y,self.estimators)):
if e == None: continue
(xuc,constOffset) = self._toEstimator(i,x,u)
rhs = yi - constOffset
e.add(xuc,rhs,weight)
return
def discount(self,discountFactor,type='geometric'):
"""Reduces the effects of prior readings."""
for e in self.estimators:
e.discount(discountFactor,type)
return
def setModelPrior(self,A,B,C,priorWeight):
"""Adds in a prior belief for the model.
Must be called AFTER fixing coefficients and BEFORE adding any
datapoints."""
Cpattern = self.coeffPattern[2]
for i in range(self.m):
ai = A[i,:].tolist()
bi = B[i,:].tolist()
(xuc,constant) = self._toEstimator(i,ai,bi)
if Cpattern == None or Cpattern[i] == None:
xuc[-1] = C[i]
self.estimators[i].setPrior(np.array(xuc),priorWeight)
return
def getModel(self):
"""Returns the estimated triple (A,B,C) as numpy arrays"""
m,n = self.m,self.n
A = np.zeros((m,m))
B = np.zeros((m,n))
C = np.zeros(m)
Apattern,Bpattern,Cpattern = self.coeffPattern
for i,e in enumerate(self.estimators):
aofs = 0
bofs = m
cofs = m+n
if Apattern==None:
ai = e.x[aofs:m+aofs]
else:
bofs=aofs
ai = []
for j,pj in enumerate(Apattern[i]):
if pj == None:
ai.append(e.x[bofs])
bofs += 1
else:
ai.append(pj)
if Bpattern==None:
bi = e.x[bofs:n+bofs]
else:
cofs=bofs
bi = []
for j,pj in enumerate(Bpattern[i]):
if pj == None:
bi.append(e.x[cofs])
cofs += 1
else:
bi.append(pj)
if Cpattern==None:
ci = e.x[cofs]
cofs+=1
else:
if Cpattern[i] == None:
ci = e.x[cofs]
cofs+=1
else:
ci = Cpattern[i]
assert(cofs == e.n)
assert len(ai)==m
assert len(bi)==n
A[i,:] = ai
B[i,:] = bi
C[i] = ci
return (A,B,C)
def getOutput(self,x,u):
"""Returns the estimate A*x+B*u+c"""
assert(len(x)==self.m)
assert(len(u)==self.n)
if isinstance(x,np.ndarray): x = x.tolist()
if isinstance(u,np.ndarray): u = u.tolist()
dx = []
if self.coeffPattern == [None,None,None]:
xuc = np.array(x + u + [1.0])
for e in self.estimators:
dx.append(np.dot(e.x,xuc))
else:
for i,e in enumerate(self.estimators):
(xuc,constOffset) = self._toEstimator(i,x,u)
dx.append(np.dot(e.x,xuc)+constOffset)
return dx
def _updateEstimatorSize(self,index):
"""Helper."""
Apattern,Bpattern,Cpattern = self.coeffPattern
m,n = self.m,self.n
numFixed = 0
if Apattern!=None:
numFixed += len(v for v in Apattern[index] if v != None)
if Bpattern!=None:
numFixed += len(v for v in Bpattern[index] if v != None)
if Cpattern!=None:
if Cpattern[index]!=None:
numFixed += 1
if numFixed==m+n+1:
self.estimators[index]=None
else:
self.estimators[index]=OnlineLeastSquares(m+n+1-numFixed)
return
def _toEstimator(self,index,x,u):
"""Helper: Projects x,u to the pattern xe taken by the index'th
estimator. Returns the pair (xe,constant offset) where the index'th
row of Ax+Bu+C is equal to dot(xe,estimator.coeffs) + constOffset."""
Apattern,Bpattern,Cpattern = self.coeffPattern
xuc = []
constOffset = 0
if Apattern == None:
xuc += x
else:
xuc += [xj for (xj,pj) in zip(x,Apattern[index]) if pj == None]
constOffset += sum([xj*pj for (xj,pj) in zip(x,Apattern[index]) if pj != None])
if Bpattern == None:
xuc += u
else:
xuc += [uj for (uj,pj) in zip(u,Bpattern[index]) if pj == None]
constOffset += sum([uj*pj for (uj,pj) in zip(u,Bpattern[index]) if pj != None])
if Cpattern == None:
xuc += [1.0]
else:
constOffset = Cpattern[index]
return (xuc,constOffset)
|
b8dc5d9a1107916a5a638fdf9746aa6523a3628f
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/openvino/tools/mo/front/mxnet/arange_replacer.py
|
badcc43bcc3d0c31b914d3aa7db0310fa0328152
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
arange_replacer.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.mxnet.MXRepeatReplacer import MXRepeatReplacer
from openvino.tools.mo.ops.mxrepeat import MXRepeat
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.const import Const
class ArangeReplacer(FrontReplacementOp):
op = 'Range'
enabled = True
def run_before(self):
# replacement inserts MXRepeat operation, so we should execute its decomposition later
return [MXRepeatReplacer]
def replace_sub_graph(self, graph: Graph, match: dict):
node = match['op']
if not node.has_valid('start') or not node.has_valid('stop') or not node.has_valid('step'):
return
start_value = Const(graph, dict(value=node.start,
symbol_dict={'name': node.id + '/const_start'})).create_node()
limit_value = Const(graph, dict(value=node.stop,
symbol_dict={'name': node.id + '/const_limit'})).create_node()
delta_value = Const(graph, dict(value=node.step,
symbol_dict={'name': node.id + '/const_delta'})).create_node()
node.in_port(0).get_connection().set_source(start_value.out_port(0))
node.in_port(1).get_connection().set_source(limit_value.out_port(0))
node.in_port(2).get_connection().set_source(delta_value.out_port(0))
if node.has_valid('repeat') and node.repeat > 1:
rep = MXRepeat(graph, dict(name=node.id + '/mxrepeat', axis=0, repeats=node.repeat)).create_node()
node.out_port(0).get_destination().get_connection().set_source(rep.out_port(0))
rep.in_port(0).connect(node.out_port(0))
|
20d1c7b4a8198f2e295c8d53d29f8ed902cda0d8
|
29c0db83d834d8233f5ef87071f89245854e4c97
|
/lookyloo/modules/riskiq.py
|
885b1abe603c5e38619201a6e195263602f0adee
|
[
"BSD-3-Clause"
] |
permissive
|
Lookyloo/lookyloo
|
c3441df609a65cfe7e288924655bda87efd7bede
|
ff15f3a60aec4fc810d595b1b93704765cc897c0
|
refs/heads/main
| 2023-08-24T13:05:17.007043
| 2023-08-24T12:55:32
| 2023-08-24T12:55:32
| 98,115,418
| 252
| 43
|
NOASSERTION
| 2023-09-13T10:36:30
| 2017-07-23T17:53:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,956
|
py
|
riskiq.py
|
#!/usr/bin/env python3
import json
import logging
from datetime import date, datetime, timedelta
from typing import Any, Dict, Optional, Union, TYPE_CHECKING
from urllib.parse import urlparse
from passivetotal import AccountClient, DnsRequest, WhoisRequest # type: ignore
from requests import Response
from ..default import ConfigError, get_homedir, get_config
from ..exceptions import ModuleError
from ..helpers import get_cache_directory
if TYPE_CHECKING:
from ..capturecache import CaptureCache
class RiskIQError(ModuleError):
def __init__(self, response: Response):
self.response = response
class RiskIQ():
def __init__(self, config: Dict[str, Any]):
if not (config.get('user') and config.get('apikey')):
self.available = False
return
self.logger = logging.getLogger(f'{self.__class__.__name__}')
self.logger.setLevel(get_config('generic', 'loglevel'))
self.available = True
self.allow_auto_trigger = False
try:
# Check if account is working
test_client = AccountClient(username=config.get('user'), api_key=config.get('apikey'), exception_class=RiskIQError)
details = test_client.get_account_details()
except RiskIQError as e:
self.available = False
if hasattr(e, 'response'):
details = e.response.json()
if 'message' in details:
self.logger.warning(f'RiskIQ not available, {details["message"]}')
self.logger.warning(f'RiskIQ not available: {e}')
return
except Exception as e:
self.available = False
self.logger.warning(f'RiskIQ not available: {e}')
return
self.client_dns = DnsRequest(username=config.get('user'), api_key=config.get('apikey'), exception_class=RiskIQError)
self.client_whois = WhoisRequest(username=config.get('user'), api_key=config.get('apikey'), exception_class=RiskIQError)
if config.get('allow_auto_trigger'):
self.allow_auto_trigger = True
self.default_first_seen = config.get('default_first_seen_in_days', 5)
self.storage_dir_riskiq = get_homedir() / 'riskiq'
self.storage_dir_riskiq.mkdir(parents=True, exist_ok=True)
def get_passivedns(self, query: str) -> Optional[Dict[str, Any]]:
# The query can be IP or Hostname. For now, we only do it on domains.
url_storage_dir = get_cache_directory(self.storage_dir_riskiq, query, 'pdns')
if not url_storage_dir.exists():
return None
cached_entries = sorted(url_storage_dir.glob('*'), reverse=True)
if not cached_entries:
return None
with cached_entries[0].open() as f:
return json.load(f)
def capture_default_trigger(self, cache: 'CaptureCache', /, *, force: bool=False, auto_trigger: bool=False) -> Dict:
'''Run the module on all the nodes up to the final redirect'''
if not self.available:
return {'error': 'Module not available'}
if auto_trigger and not self.allow_auto_trigger:
return {'error': 'Auto trigger not allowed on module'}
if cache.url.startswith('file'):
return {'error': 'RiskIQ does not support files.'}
if cache.redirects:
hostname = urlparse(cache.redirects[-1]).hostname
else:
hostname = urlparse(cache.url).hostname
if not hostname:
return {'error': 'No hostname found.'}
self.pdns_lookup(hostname, force)
return {'success': 'Module triggered'}
def pdns_lookup(self, hostname: str, force: bool=False, first_seen: Optional[Union[date, datetime]]=None) -> None:
'''Lookup an hostname on RiskIQ Passive DNS
Note: force means re-fetch the entry RiskIQ even if we already did it today
'''
if not self.available:
raise ConfigError('RiskIQ not available, probably no API key')
if first_seen is None:
first_seen = date.today() - timedelta(days=self.default_first_seen)
if isinstance(first_seen, datetime):
first_seen = first_seen.date()
url_storage_dir = get_cache_directory(self.storage_dir_riskiq, hostname, 'pdns')
url_storage_dir.mkdir(parents=True, exist_ok=True)
riskiq_file = url_storage_dir / date.today().isoformat()
if not force and riskiq_file.exists():
return
pdns_info = self.client_dns.get_passive_dns(query=hostname, start=first_seen.isoformat())
if not pdns_info:
try:
url_storage_dir.rmdir()
except OSError:
# Not empty.
pass
return
pdns_info['results'] = sorted(pdns_info['results'], key=lambda k: k['lastSeen'], reverse=True)
with riskiq_file.open('w') as _f:
json.dump(pdns_info, _f)
|
243e7fb30137ca0d20ca7b77fe5321d4375dc23d
|
27b86f422246a78704e0e84983b2630533a47db6
|
/examples/addons/text_string_to_path.py
|
7fee75cb8f58b006dbdd03d0fc965405f5ca5106
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
text_string_to_path.py
|
# Copyright (c) 2021-2023, Manfred Moitzi
# License: MIT License
import pathlib
import ezdxf
from ezdxf import path, zoom
from ezdxf.fonts import fonts
from ezdxf.addons import text2path
from ezdxf.enums import TextEntityAlignment
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
# ------------------------------------------------------------------------------
# This example shows how to convert a text-string to outline paths.
#
# docs: https://ezdxf.mozman.at/docs/addons/text2path.html
# ------------------------------------------------------------------------------
def main():
doc = ezdxf.new()
doc.layers.new("OUTLINE")
doc.layers.new("FILLING")
msp = doc.modelspace()
attr = {"layer": "OUTLINE", "color": 1}
ff = fonts.FontFace(family="Noto Sans SC")
s = "Noto Sans SC 0123456789 %@ 中国文字"
align = TextEntityAlignment.LEFT
path.render_splines_and_polylines(
msp, text2path.make_paths_from_str(s, ff, align=align), dxfattribs=attr
)
attr["layer"] = "FILLING"
attr["color"] = 2
for hatch in text2path.make_hatches_from_str(s, ff, align=align, dxfattribs=attr):
msp.add_entity(hatch)
zoom.extents(msp)
doc.saveas(CWD / "text2path.dxf")
if __name__ == "__main__":
main()
|
93bb64cc939e266e6539139df41665c439bd6536
|
f8c5b73c9706470c4dd60d523096e18bc448a960
|
/certbot-dns-luadns/certbot_dns_luadns/_internal/tests/__init__.py
|
2688bd482276eae71927b3fe9d29931771741bfc
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
certbot/certbot
|
14ab43d76fcf0242d875d551f0d98334c43e7957
|
b1978ff18837e40d16eedf2090330af53d8ceaa5
|
refs/heads/master
| 2023-09-04T00:37:03.739195
| 2023-08-26T23:19:38
| 2023-08-26T23:19:38
| 26,516,210
| 18,581
| 3,265
|
NOASSERTION
| 2023-09-12T15:18:59
| 2014-11-12T02:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 31
|
py
|
__init__.py
|
"""certbot-dns-luadns tests"""
|
f31edebffcdea237c010211d0cd6177bfe016ef1
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/tests/test_models/test_editors/test_biggan/test_biggan_deep_discriminator.py
|
4cb169eb2301c3c82308932cd57812fd8f576c80
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
test_biggan_deep_discriminator.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
from mmagic.models.editors.biggan import BigGANDeepDiscriminator
from mmagic.registry import MODELS
class TestBigGANDeepDiscriminator(object):
@classmethod
def setup_class(cls):
num_classes = 1000
cls.default_config = dict(
type='BigGANDeepDiscriminator',
input_scale=128,
num_classes=num_classes,
base_channels=8)
cls.x = torch.randn((2, 3, 128, 128))
cls.label = torch.randint(0, num_classes, (2, ))
def test_biggan_deep_discriminator(self):
# test default settings
d = MODELS.build(self.default_config)
assert isinstance(d, BigGANDeepDiscriminator)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test different init types
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='N02'))
d = MODELS.build(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='xavier'))
d = MODELS.build(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0))
d = MODELS.build(cfg)
y = d(self.x, None)
assert y.shape == (2, 1)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
d = MODELS.build(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
d = MODELS.build(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_discriminator_cuda(self):
# test default settings
d = MODELS.build(self.default_config).cuda()
assert isinstance(d, BigGANDeepDiscriminator)
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test different init types
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='N02'))
d = MODELS.build(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='xavier'))
d = MODELS.build(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0))
d = MODELS.build(cfg).cuda()
y = d(self.x.cuda(), None)
assert y.shape == (2, 1)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
d = MODELS.build(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
d = MODELS.build(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
|
9fc38788cd1366d800f6d7b5f550a112f5d09ec6
|
54485e624e28529ff066d6d1ecab620c2ec14f36
|
/tests/test_jwt.py
|
f6e81275ad50d0d0cf03457a26f0309aad9b0e18
|
[
"MIT"
] |
permissive
|
ucfopen/canvasapi
|
c63be9655672a1cd1f51028c2619523224af80ec
|
02d42cba3b0fd22e780ac0a5e904ea84fbc0b58d
|
refs/heads/develop
| 2023-08-19T03:08:34.936379
| 2023-05-25T14:46:58
| 2023-05-25T14:46:58
| 73,851,042
| 503
| 183
|
MIT
| 2023-06-15T06:25:10
| 2016-11-15T20:09:41
|
Python
|
UTF-8
|
Python
| false
| false
| 546
|
py
|
test_jwt.py
|
import unittest
import requests_mock
from canvasapi import Canvas
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestJWT(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({"jwt": ["create_jwt"]}, m)
self.jwt = self.canvas.create_jwt()
# __str__()
def test__str__(self, m):
string = str(self.jwt)
self.assertIsInstance(string, str)
|
8b517fd811834a809f912c47478d73a2a4c29c2b
|
7378aaee27ef676db95dce7702c48f8643c63313
|
/grow/extensions/hooks/pre_deploy_hook.py
|
b2325f12e53c740ec362c07837d0150107d8fd42
|
[
"MIT"
] |
permissive
|
grow/grow
|
323fa25c7690643bf170cc4558fffdfbd406ac76
|
17471c436621ebfd978b51225fa4de05367a53e1
|
refs/heads/main
| 2023-06-15T09:51:08.288251
| 2022-07-21T16:19:33
| 2022-07-21T16:19:33
| 12,899,663
| 352
| 56
|
MIT
| 2023-02-08T02:35:36
| 2013-09-17T15:51:40
|
Python
|
UTF-8
|
Python
| false
| false
| 519
|
py
|
pre_deploy_hook.py
|
"""Base class for the pre deploy hook."""
from grow.extensions.hooks import base_hook
class PreDeployHook(base_hook.BaseHook):
"""Hook for pre deploy."""
KEY = 'pre_deploy'
NAME = 'Pre Deploy'
# pylint: disable=arguments-differ,unused-argument
def trigger(self, previous_result, rendered_doc, command, *_args, **_kwargs):
"""Trigger the pre deploy hook."""
if previous_result:
return previous_result
# Return None if nothing has changed.
return None
|
c5a49325f98a03cd1246168e638536861897662d
|
187414dcb264fb49d82507a099fd5fdca6e55e38
|
/python/pyspark/pandas/tests/indexes/test_category.py
|
d2405f6adb301f66e945992ddc7df54fafc636bf
|
[
"BSD-3-Clause",
"CC0-1.0",
"CDDL-1.1",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"LGPL-2.0-or-later",
"Python-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
apache/spark
|
8aeba2d80465a262acc95781ede105a5b5886f6d
|
60d8fc49bec5dae1b8cf39a0670cb640b430f520
|
refs/heads/master
| 2023-09-04T04:33:36.058199
| 2023-09-04T03:48:52
| 2023-09-04T03:48:52
| 17,165,658
| 39,983
| 32,449
|
Apache-2.0
| 2023-09-14T19:46:24
| 2014-02-25T08:00:08
|
Scala
|
UTF-8
|
Python
| false
| false
| 18,257
|
py
|
test_category.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from distutils.version import LooseVersion
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalIndexTestsMixin:
def test_categorical_index(self):
pidx = pd.CategoricalIndex([1, 2, 3])
psidx = ps.CategoricalIndex([1, 2, 3])
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pidx = pd.Index([1, 2, 3], dtype="category")
psidx = ps.Index([1, 2, 3], dtype="category")
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pdf = pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]),
},
index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True),
)
psdf = ps.from_pandas(pdf)
pidx = pdf.set_index("b").index
psidx = psdf.set_index("b").index
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pidx = pdf.set_index(["a", "b"]).index.get_level_values(0)
psidx = psdf.set_index(["a", "b"]).index.get_level_values(0)
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
with self.assertRaisesRegexp(TypeError, "Index.name must be a hashable type"):
ps.CategoricalIndex([1, 2, 3], name=[(1, 2, 3)])
with self.assertRaisesRegexp(
TypeError, "Cannot perform 'all' with this index type: CategoricalIndex"
):
ps.CategoricalIndex([1, 2, 3]).all()
def test_categories_setter(self):
pdf = pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]),
},
index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True),
)
psdf = ps.from_pandas(pdf)
pidx = pdf.index
psidx = psdf.index
pidx = pidx.rename_categories(["z", "y", "x"])
psidx = psidx.rename_categories(["z", "y", "x"])
self.assert_eq(pidx, psidx)
self.assert_eq(pdf, psdf)
with self.assertRaises(ValueError):
psidx.categories = [1, 2, 3, 4]
def test_add_categories(self):
pidx = pd.CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.add_categories(4), psidx.add_categories(4))
self.assert_eq(pidx.add_categories([4, 5]), psidx.add_categories([4, 5]))
self.assert_eq(pidx.add_categories([]), psidx.add_categories([]))
self.assertRaises(ValueError, lambda: psidx.add_categories(4, inplace=True))
self.assertRaises(ValueError, lambda: psidx.add_categories(3))
self.assertRaises(ValueError, lambda: psidx.add_categories([4, 4]))
def test_remove_categories(self):
pidx = pd.CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.remove_categories(2), psidx.remove_categories(2))
self.assert_eq(pidx.remove_categories([1, 3]), psidx.remove_categories([1, 3]))
self.assert_eq(pidx.remove_categories([]), psidx.remove_categories([]))
self.assert_eq(pidx.remove_categories([2, 2]), psidx.remove_categories([2, 2]))
self.assert_eq(pidx.remove_categories([1, 2, 3]), psidx.remove_categories([1, 2, 3]))
self.assert_eq(pidx.remove_categories(None), psidx.remove_categories(None))
self.assert_eq(pidx.remove_categories([None]), psidx.remove_categories([None]))
self.assertRaises(ValueError, lambda: psidx.remove_categories(4, inplace=True))
self.assertRaises(ValueError, lambda: psidx.remove_categories(4))
self.assertRaises(ValueError, lambda: psidx.remove_categories([4, None]))
def test_remove_unused_categories(self):
pidx = pd.CategoricalIndex([1, 4, 5, 3], categories=[4, 3, 2, 1])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.remove_unused_categories(), psidx.remove_unused_categories())
self.assertRaises(ValueError, lambda: psidx.remove_unused_categories(inplace=True))
def test_reorder_categories(self):
pidx = pd.CategoricalIndex([1, 2, 3])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.reorder_categories([1, 2, 3]), psidx.reorder_categories([1, 2, 3]))
self.assert_eq(
pidx.reorder_categories([1, 2, 3], ordered=True),
psidx.reorder_categories([1, 2, 3], ordered=True),
)
self.assert_eq(pidx.reorder_categories([3, 2, 1]), psidx.reorder_categories([3, 2, 1]))
self.assert_eq(
pidx.reorder_categories([3, 2, 1], ordered=True),
psidx.reorder_categories([3, 2, 1], ordered=True),
)
self.assertRaises(ValueError, lambda: psidx.reorder_categories([1, 2, 3], inplace=True))
self.assertRaises(ValueError, lambda: psidx.reorder_categories([1, 2]))
self.assertRaises(ValueError, lambda: psidx.reorder_categories([1, 2, 4]))
self.assertRaises(ValueError, lambda: psidx.reorder_categories([1, 2, 2]))
self.assertRaises(TypeError, lambda: psidx.reorder_categories(1))
def test_as_ordered_unordered(self):
pidx = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x"])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.as_ordered(), psidx.as_ordered())
self.assert_eq(pidx.as_unordered(), psidx.as_unordered())
self.assertRaises(ValueError, lambda: psidx.as_ordered(inplace=True))
self.assertRaises(ValueError, lambda: psidx.as_unordered(inplace=True))
def test_astype(self):
pidx = pd.Index(["a", "b", "c"])
psidx = ps.from_pandas(pidx)
self.assert_eq(psidx.astype("category"), pidx.astype("category"))
self.assert_eq(
psidx.astype(CategoricalDtype(["c", "a", "b"])),
pidx.astype(CategoricalDtype(["c", "a", "b"])),
)
pcidx = pidx.astype(CategoricalDtype(["c", "a", "b"]))
pscidx = psidx.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(pscidx.astype("category"), pcidx.astype("category"))
# CategoricalDtype is not updated if the dtype is same from pandas 1.3.
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
self.assert_eq(
pscidx.astype(CategoricalDtype(["b", "c", "a"])),
pcidx.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
pscidx.astype(CategoricalDtype(["b", "c", "a"])),
pcidx,
)
self.assert_eq(pscidx.astype(str), pcidx.astype(str))
def test_factorize(self):
pidx = pd.CategoricalIndex([1, 2, 3, None])
psidx = ps.from_pandas(pidx)
pcodes, puniques = pidx.factorize()
kcodes, kuniques = psidx.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pidx.factorize(use_na_sentinel=-2)
kcodes, kuniques = psidx.factorize(use_na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_append(self):
pidx1 = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x", "w"])
pidx2 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x", "w"])
pidx3 = pd.Index(["y", "x", "w", "z"])
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
self.assert_eq(psidx1.append(psidx2), pidx1.append(pidx2))
if LooseVersion(pd.__version__) >= LooseVersion("1.5.0"):
self.assert_eq(
psidx1.append(psidx3.astype("category")), pidx1.append(pidx3.astype("category"))
)
else:
expected_result = ps.CategoricalIndex(
["x", "y", "z", "y", "x", "w", "z"],
categories=["z", "y", "x", "w"],
ordered=False,
dtype="category",
)
self.assert_eq(psidx1.append(psidx3.astype("category")), expected_result)
# TODO: append non-categorical or categorical with a different category
self.assertRaises(NotImplementedError, lambda: psidx1.append(psidx3))
pidx4 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x"])
psidx4 = ps.from_pandas(pidx4)
self.assertRaises(NotImplementedError, lambda: psidx1.append(psidx4))
def test_union(self):
pidx1 = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x", "w"])
pidx2 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x", "w"])
pidx3 = pd.Index(["y", "x", "w", "z"])
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
self.assert_eq(psidx1.union(psidx2), pidx1.union(pidx2))
self.assert_eq(
psidx1.union(psidx3.astype("category")), pidx1.union(pidx3.astype("category"))
)
# TODO: union non-categorical or categorical with a different category
self.assertRaises(NotImplementedError, lambda: psidx1.union(psidx3))
pidx4 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x"])
psidx4 = ps.from_pandas(pidx4)
self.assertRaises(NotImplementedError, lambda: psidx1.union(psidx4))
def test_intersection(self):
pidx1 = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x", "w"])
pidx2 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x", "w"])
pidx3 = pd.Index(["y", "x", "w", "z"])
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psidx1.intersection(psidx2).sort_values(), pidx1.intersection(pidx2).sort_values()
)
self.assert_eq(
psidx1.intersection(psidx3.astype("category")).sort_values(),
pidx1.intersection(pidx3.astype("category")).sort_values(),
)
else:
self.assert_eq(
psidx1.intersection(psidx2).sort_values(),
pidx1.intersection(pidx2).set_categories(pidx1.categories).sort_values(),
)
self.assert_eq(
psidx1.intersection(psidx3.astype("category")).sort_values(),
pidx1.intersection(pidx3.astype("category"))
.set_categories(pidx1.categories)
.sort_values(),
)
# TODO: intersection non-categorical or categorical with a different category
self.assertRaises(NotImplementedError, lambda: psidx1.intersection(psidx3))
pidx4 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x"])
psidx4 = ps.from_pandas(pidx4)
self.assertRaises(NotImplementedError, lambda: psidx1.intersection(psidx4))
def test_insert(self):
pidx = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x", "w"])
psidx = ps.from_pandas(pidx)
self.assert_eq(psidx.insert(1, "w"), pidx.insert(1, "w"))
def test_rename_categories(self):
pidx = pd.CategoricalIndex(["a", "b", "c", "d"])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.rename_categories([0, 1, 3, 2]), psidx.rename_categories([0, 1, 3, 2]))
self.assert_eq(
pidx.rename_categories({"a": "A", "c": "C"}),
psidx.rename_categories({"a": "A", "c": "C"}),
)
self.assert_eq(
pidx.rename_categories(lambda x: x.upper()),
psidx.rename_categories(lambda x: x.upper()),
)
self.assertRaises(
TypeError,
lambda: psidx.rename_categories(None),
)
self.assertRaises(
TypeError,
lambda: psidx.rename_categories(1),
)
self.assertRaises(
TypeError,
lambda: psidx.rename_categories("x"),
)
self.assertRaises(
ValueError,
lambda: psidx.rename_categories({"b": "B", "c": "C"}, inplace=True),
)
def test_set_categories(self):
pidx = pd.CategoricalIndex(["a", "b", "c", "d"])
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.set_categories(["a", "c", "b", "o"]),
psidx.set_categories(["a", "c", "b", "o"]),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b"]),
psidx.set_categories(["a", "c", "b"]),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b", "d", "e"]),
psidx.set_categories(["a", "c", "b", "d", "e"]),
)
self.assert_eq(
pidx.set_categories([0, 1, 3, 2], rename=True),
psidx.set_categories([0, 1, 3, 2], rename=True),
)
self.assert_eq(
pidx.set_categories([0, 1, 3], rename=True),
psidx.set_categories([0, 1, 3], rename=True),
)
self.assert_eq(
pidx.set_categories([0, 1, 3, 2, 4], rename=True),
psidx.set_categories([0, 1, 3, 2, 4], rename=True),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b", "o"], ordered=True),
psidx.set_categories(["a", "c", "b", "o"], ordered=True),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b"], ordered=True),
psidx.set_categories(["a", "c", "b"], ordered=True),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b", "d", "e"], ordered=True),
psidx.set_categories(["a", "c", "b", "d", "e"], ordered=True),
)
self.assertRaisesRegex(
ValueError,
"cannot use inplace with CategoricalIndex",
lambda: psidx.set_categories(["a", "c", "b", "o"], inplace=True),
)
def test_map(self):
pidxs = [pd.CategoricalIndex([1, 2, 3]), pd.CategoricalIndex([1, 2, 3], ordered=True)]
psidxs = [ps.from_pandas(pidx) for pidx in pidxs]
for pidx, psidx in zip(pidxs, psidxs):
# Apply dict
self.assert_eq(
pidx.map({1: "one", 2: "two", 3: "three"}),
psidx.map({1: "one", 2: "two", 3: "three"}),
)
self.assert_eq(
pidx.map({1: "one", 2: "two", 3: "one"}),
psidx.map({1: "one", 2: "two", 3: "one"}),
)
self.assert_eq(
pidx.map({1: "one", 2: "two"}),
psidx.map({1: "one", 2: "two"}),
)
self.assert_eq(
pidx.map({1: "one", 2: "two"}),
psidx.map({1: "one", 2: "two"}),
)
self.assert_eq(
pidx.map({1: 10, 2: 20}),
psidx.map({1: 10, 2: 20}),
)
# Apply lambda
self.assert_eq(
pidx.map(lambda id: id + 1),
psidx.map(lambda id: id + 1),
)
self.assert_eq(
pidx.map(lambda id: id + 1.1),
psidx.map(lambda id: id + 1.1),
)
self.assert_eq(
pidx.map(lambda id: "{id} + 1".format(id=id)),
psidx.map(lambda id: "{id} + 1".format(id=id)),
)
# Apply series
pser = pd.Series(["one", "two", "three"], index=[1, 2, 3])
self.assert_eq(
pidx.map(pser),
psidx.map(pser),
)
pser = pd.Series(["one", "two", "three"])
self.assert_eq(
pidx.map(pser),
psidx.map(pser),
)
self.assert_eq(
pidx.map(pser),
psidx.map(pser),
)
pser = pd.Series([1, 2, 3])
self.assert_eq(
pidx.map(pser),
psidx.map(pser),
)
self.assertRaises(
TypeError,
lambda: psidx.map({1: 1, 2: 2.0, 3: "three"}),
)
class CategoricalIndexTests(CategoricalIndexTestsMixin, PandasOnSparkTestCase, TestUtils):
pass
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.indexes.test_category import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
bed4c963679b3193b391c6b4efb1b6871554191e
|
21439e67c9fe3d43ce31c2593fab4b905a3debb3
|
/cookiecutter-module/hooks/post_gen_project.py
|
e028d662d1fa61ec8c916752102ead1bb7798e36
|
[] |
no_license
|
tryton/tryton
|
625b7a7658a342611e00f331cf1f774d77facd5b
|
804476f31802a1be096a4783e4f1accfd0c5c3f1
|
refs/heads/main
| 2023-08-28T07:52:32.917099
| 2023-08-16T16:24:32
| 2023-08-16T16:24:32
| 10,222,356
| 185
| 80
| null | 2016-06-03T01:51:02
| 2013-05-22T14:51:18
|
Python
|
UTF-8
|
Python
| false
| false
| 161
|
py
|
post_gen_project.py
|
import os
import shutil
try:
os.symlink('doc/index.rst', 'README.rst')
except (AttributeError, OSError):
shutil.copyfile('doc/index.rst', 'README.rst')
|
60e8f60e88d5b96ed66ef9f3ec4c9f267aef37e4
|
568a2667a1b6ec33a0dec9ac01844ef74e11ab2b
|
/tests/components/threshold_eroder/test_threshold_eroder.py
|
2beedb76eb322a2822e22cd9ede73838889b7a36
|
[
"MIT"
] |
permissive
|
landlab/landlab
|
0bcc9b7b1d8c4d7f79bad687e1526b80ebc83728
|
1cd72e5832ece1aa922cd1b239e2e94ed0f11f8b
|
refs/heads/master
| 2023-08-31T07:24:21.545523
| 2023-08-29T18:51:06
| 2023-08-29T18:51:06
| 19,599,383
| 326
| 313
|
MIT
| 2023-09-14T19:12:23
| 2014-05-09T04:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,174
|
py
|
test_threshold_eroder.py
|
#! /usr/bin/env python
"""Unit tests for landlab.components.threshold_eroder.py"""
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from landlab import HexModelGrid, RasterModelGrid
from landlab.components import (
FlowAccumulator,
FlowDirectorSteepest,
PriorityFloodFlowRouter,
ThresholdEroder,
)
try:
PriorityFloodFlowRouter.load_richdem()
except ModuleNotFoundError:
with_richdem = False
else:
with_richdem = True
@pytest.mark.skipif(not with_richdem, reason="richdem is not installed")
def test_topography_rasterGrid():
# %%
mg = RasterModelGrid((5, 5))
mg.set_closed_boundaries_at_grid_edges(False, False, False, False)
z = np.array(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 10.0, 1.0, 0.0]
+ [0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
)
_ = mg.add_field("topographic__elevation", z, at="node")
# Instantiate Flow director (steepest slope type) and Threshold Eroder
fdir = PriorityFloodFlowRouter(mg)
th_ero = ThresholdEroder(mg, slope_crit=0.6)
# Run the components for ten short timepsteps
for _t in range(2):
fdir.run_one_step()
th_ero.run_one_step()
# Check final topography
assert_array_almost_equal(
mg.at_node["topographic__elevation"],
np.array(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6, 0.6, 0.6, 0.0, 0.0, 0.6, 1.2, 0.6]
+ [0.0, 0.0, 0.6, 0.6, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0]
),
verbose=True,
)
@pytest.mark.skipif(not with_richdem, reason="richdem is not installed")
def test_topo_soil_rasterGrid():
# %%
mg = RasterModelGrid((5, 5))
mg.set_closed_boundaries_at_grid_edges(False, False, False, False)
z = np.array(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0]
+ [0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
)
topo = mg.add_zeros("topographic__elevation", at="node")
bed = mg.add_field("bedrock__elevation", z, at="node")
soil = mg.add_ones("soil__depth", at="node")
soil[mg.boundary_nodes] = 0
topo[:] = soil + bed
# Instantiate Flow director (steepest slope type) and Threshold Eroder
fdir = PriorityFloodFlowRouter(mg)
th_ero = ThresholdEroder(mg, slope_crit=0.6)
# Run the components for ten short timepsteps
for _t in range(2):
fdir.run_one_step()
th_ero.run_one_step()
# Check final topography
assert_array_almost_equal(
mg.at_node["bedrock__elevation"],
np.array(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6, 0.6, 0.6, 0.0, 0.0, 0.6, 1, 0.6]
+ [0.0, 0.0, 0.6, 0.6, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0]
),
verbose=True,
)
assert_array_almost_equal(
mg.at_node["soil__depth"],
np.array(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.0]
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
),
verbose=True,
)
# %%
def test_topography_hexGrid():
# %%
hmg = HexModelGrid((8, 8))
topo = hmg.add_zeros("topographic__elevation", at="node")
topo[hmg.core_nodes] += 100
# Instantiate Flow director (steepest slope type) and Threshold Eroder
fdir = FlowDirectorSteepest(hmg)
fa = FlowAccumulator(hmg)
th_ero = ThresholdEroder(hmg, slope_crit=0.6)
# Run the components for ten short timepsteps
for _t in range(5):
fdir.run_one_step()
fa.run_one_step()
th_ero.run_one_step()
assert_array_almost_equal(
hmg.at_node["topographic__elevation"],
np.array(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6, 0.6, 0.6, 0.6, 0.6]
+ [0.6, 0.6, 0.0, 0.0, 0.6, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 0.6, 0.0, 0.0]
+ [0.6, 1.2, 1.8, 1.8, 1.8, 1.8, 1.8, 1.2, 0.6, 0.0, 0.0, 0.6, 1.2, 1.8]
+ [1.8, 1.8, 1.8, 1.8, 1.8, 1.2, 0.6, 0.0, 0.0, 0.6, 1.2, 1.2, 1.2, 1.2]
+ [1.2, 1.2, 1.2, 0.6, 0.0, 0.0, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6]
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
),
)
|
33b7274dd3933acd29495f6124c3fc94fea9580d
|
5b9fe71c2efe0139205020b038f7d31b6a5ede87
|
/lux/action/univariate.py
|
1e6d357e07aae555fcd05fd92ba75ba52e19858c
|
[
"Apache-2.0"
] |
permissive
|
lux-org/lux
|
7a7c8534eec5d2b2114b1a95e64497cf9b52871a
|
972e5ec24991483370dda67de6bb1e354bcf8ca6
|
refs/heads/master
| 2023-08-21T04:05:51.279103
| 2023-07-04T23:34:35
| 2023-07-04T23:34:35
| 232,480,726
| 4,811
| 377
|
Apache-2.0
| 2023-07-12T17:45:37
| 2020-01-08T04:53:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,302
|
py
|
univariate.py
|
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lux.interestingness.interestingness import interestingness
from lux.vis.VisList import VisList
import lux
from lux.utils import utils
def univariate(ldf, *args):
"""
Generates bar chart distributions of different attributes in the dataframe.
Parameters
----------
ldf : lux.core.frame
LuxDataFrame with underspecified intent.
data_type_constraint: str
Controls the type of distribution chart that will be rendered.
Returns
-------
recommendations : Dict[str,obj]
object with a collection of visualizations that result from the Distribution action.
"""
import numpy as np
if len(args) == 0:
data_type_constraint = "quantitative"
else:
data_type_constraint = args[0][0]
filter_specs = utils.get_filter_specs(ldf._intent)
ignore_rec_flag = False
if data_type_constraint == "quantitative":
possible_attributes = [
c for c in ldf.columns if ldf.data_type[c] == "quantitative" and c != "Number of Records"
]
intent = [lux.Clause(possible_attributes)]
intent.extend(filter_specs)
examples = ""
if len(possible_attributes) >= 1:
examples = f" (e.g., {possible_attributes[0]})"
recommendation = {
"action": "Distribution",
"description": "Show univariate histograms of <p class='highlight-descriptor'>quantitative</p> attributes.",
"long_description": f"Distribution displays univariate histogram distributions of all quantitative attributes{examples}. Visualizations are ranked from most to least skewed.",
}
# Doesn't make sense to generate a histogram if there is less than 5 datapoints (pre-aggregated)
if len(ldf) < 5:
ignore_rec_flag = True
elif data_type_constraint == "nominal":
possible_attributes = [
c for c in ldf.columns if ldf.data_type[c] == "nominal" and c != "Number of Records"
]
examples = ""
if len(possible_attributes) >= 1:
examples = f" (e.g., {possible_attributes[0]})"
intent = [lux.Clause(possible_attributes)]
intent.extend(filter_specs)
recommendation = {
"action": "Occurrence",
"description": "Show frequency of occurrence for <p class='highlight-descriptor'>categorical</p> attributes.",
"long_description": f"Occurence displays bar charts of counts for all categorical attributes{examples}. Visualizations are ranked from most to least uneven across the bars. ",
}
elif data_type_constraint == "geographical":
possible_attributes = [
c for c in ldf.columns if ldf.data_type[c] == "geographical" and c != "Number of Records"
]
examples = ""
if len(possible_attributes) >= 1:
examples = f" (e.g., {possible_attributes[0]})"
intent = [lux.Clause("?", data_type="geographical"), lux.Clause("?", data_model="measure")]
intent.extend(filter_specs)
recommendation = {
"action": "Geographical",
"description": "Show choropleth maps of <p class='highlight-descriptor'>geographic</p> attributes",
"long_description": f"Occurence displays choropleths of averages for some geographic attribute{examples}. Visualizations are ranked by diversity of the geographic attribute.",
}
if ignore_rec_flag:
recommendation["collection"] = []
return recommendation
vlist = VisList(intent, ldf)
for vis in vlist:
vis.score = interestingness(vis, ldf)
vlist.sort()
recommendation["collection"] = vlist
return recommendation
|
9c16fdece1176d9e326daaa984734da45b828a24
|
b9566949b9499e48e85504c96910797ea4b397f3
|
/adversarialbox/attacks/base.py
|
bb2371f01d9678d47b5e01c170ccddcbe8ada822
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
advboxes/AdvBox
|
7b1f4c3199c81a20d435a9a631d850c00d17df24
|
9e47e2f150bd4b33681ba2d66fcfb5f512a3bafc
|
refs/heads/master
| 2023-07-31T10:54:52.864390
| 2022-08-08T02:56:23
| 2022-08-08T02:56:23
| 143,987,419
| 965
| 180
|
Apache-2.0
| 2023-02-15T19:57:27
| 2018-08-08T08:55:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
base.py
|
"""
The base model of the model.
"""
from builtins import object
import logging
from abc import ABCMeta
from abc import abstractmethod
import numpy as np
from future.utils import with_metaclass
class Attack(with_metaclass(ABCMeta, object)):
"""
Abstract base class for adversarial attacks. `Attack` represent an
adversarial attack which search an adversarial example. subclass should
implement the _apply() method.
Args:
model(Model): an instance of the class adversarialbox.base.Model.
"""
def __init__(self, model):
self.model = model
def __call__(self, adversary, **kwargs):
"""
Generate the adversarial sample.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments.
"""
self._preprocess(adversary)
return self._apply(adversary, **kwargs)
@abstractmethod
def _apply(self, adversary, **kwargs):
"""
Search an adversarial example.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments.
"""
raise NotImplementedError
def _preprocess(self, adversary):
"""
Preprocess the adversary object.
:param adversary: adversary
:return: None
"""
#assert self.model.channel_axis() == adversary.original.ndim
if adversary.original_label is None:
adversary.original_label = np.argmax(
self.model.predict(adversary.original))
if adversary.is_targeted_attack and adversary.target_label is None:
if adversary.target is None:
raise ValueError(
'When adversary.is_targeted_attack is true, '
'adversary.target_label or adversary.target must be set.')
else:
adversary.target_label = np.argmax(
self.model.predict(adversary.target))
logging.info('adversary:'
'\n original_label: {}'
'\n target_label: {}'
'\n is_targeted_attack: {}'
''.format(adversary.original_label, adversary.target_label,
adversary.is_targeted_attack))
|
dcb55c37d7e6188ff9034a88d9092e3deb228860
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/tests/indexes/base_class/test_pickle.py
|
c670921decb78808fa54a35c45e3d2d15ab57a67
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 309
|
py
|
test_pickle.py
|
from pandas import Index
import pandas._testing as tm
def test_pickle_preserves_object_dtype():
# GH#43188, GH#43155 don't infer numeric dtype
index = Index([1, 2, 3], dtype=object)
result = tm.round_trip_pickle(index)
assert result.dtype == object
tm.assert_index_equal(index, result)
|
4390cdfe74e8fe5fe4cb96e8479aa0c6a1c48630
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayMerchantQipanCrowdQueryModel.py
|
833d4e28be6b7201d8178bd4efcd0bfc92a6cafd
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
AlipayMerchantQipanCrowdQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMerchantQipanCrowdQueryModel(object):
def __init__(self):
self._crowd_code = None
self._external_crowd_code = None
@property
def crowd_code(self):
return self._crowd_code
@crowd_code.setter
def crowd_code(self, value):
self._crowd_code = value
@property
def external_crowd_code(self):
return self._external_crowd_code
@external_crowd_code.setter
def external_crowd_code(self, value):
self._external_crowd_code = value
def to_alipay_dict(self):
params = dict()
if self.crowd_code:
if hasattr(self.crowd_code, 'to_alipay_dict'):
params['crowd_code'] = self.crowd_code.to_alipay_dict()
else:
params['crowd_code'] = self.crowd_code
if self.external_crowd_code:
if hasattr(self.external_crowd_code, 'to_alipay_dict'):
params['external_crowd_code'] = self.external_crowd_code.to_alipay_dict()
else:
params['external_crowd_code'] = self.external_crowd_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMerchantQipanCrowdQueryModel()
if 'crowd_code' in d:
o.crowd_code = d['crowd_code']
if 'external_crowd_code' in d:
o.external_crowd_code = d['external_crowd_code']
return o
|
9facdb2a66bc9b51d7d45efaba6197fef8db47d4
|
302ce5ab1045ee93845608c96580c63d54d730af
|
/src/spikeinterface/core/tests/test_basesorting.py
|
0bdd9aecddb368eb00940397244b080fff3964c8
|
[
"MIT"
] |
permissive
|
SpikeInterface/spikeinterface
|
f900b62720860b2881d2e6b5fa4441e0e560f625
|
ee2237b3f5ce2347b2ec9df90e97b0ee6c738dcf
|
refs/heads/main
| 2023-09-02T11:27:54.687021
| 2023-09-01T13:48:29
| 2023-09-01T13:48:29
| 196,581,117
| 295
| 133
|
MIT
| 2023-09-14T19:12:16
| 2019-07-12T13:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,768
|
py
|
test_basesorting.py
|
"""
test for BaseSorting are done with NpzSortingExtractor.
but check only for BaseRecording general methods.
"""
import shutil
from pathlib import Path
import numpy as np
import pytest
from numpy.testing import assert_raises
from spikeinterface.core import (
NpzSortingExtractor,
NumpyRecording,
NumpySorting,
SharedMemorySorting,
NpzFolderSorting,
NumpyFolderSorting,
create_sorting_npz,
generate_sorting,
load_extractor,
)
from spikeinterface.core.base import BaseExtractor
from spikeinterface.core.testing import check_sorted_arrays_equal, check_sortings_equal
if hasattr(pytest, "global_test_folder"):
cache_folder = pytest.global_test_folder / "core"
else:
cache_folder = Path("cache_folder") / "core"
def test_BaseSorting():
num_seg = 2
file_path = cache_folder / "test_BaseSorting.npz"
file_path.parent.mkdir(exist_ok=True)
create_sorting_npz(num_seg, file_path)
sorting = NpzSortingExtractor(file_path)
# print(sorting)
assert sorting.get_num_segments() == 2
assert sorting.get_num_units() == 3
# annotations / properties
sorting.annotate(yep="yop")
assert sorting.get_annotation("yep") == "yop"
sorting.set_property("amplitude", [-20, -40.0, -55.5])
values = sorting.get_property("amplitude")
assert np.all(values == [-20, -40.0, -55.5])
# dump/load dict
d = sorting.to_dict(include_annotations=True, include_properties=True)
sorting2 = BaseExtractor.from_dict(d)
sorting3 = load_extractor(d)
check_sortings_equal(sorting, sorting2, check_annotations=True, check_properties=True)
check_sortings_equal(sorting, sorting3, check_annotations=True, check_properties=True)
# dump/load json
sorting.dump_to_json(cache_folder / "test_BaseSorting.json")
sorting2 = BaseExtractor.load(cache_folder / "test_BaseSorting.json")
sorting3 = load_extractor(cache_folder / "test_BaseSorting.json")
check_sortings_equal(sorting, sorting2, check_annotations=True, check_properties=False)
check_sortings_equal(sorting, sorting3, check_annotations=True, check_properties=False)
# dump/load pickle
sorting.dump_to_pickle(cache_folder / "test_BaseSorting.pkl")
sorting2 = BaseExtractor.load(cache_folder / "test_BaseSorting.pkl")
sorting3 = load_extractor(cache_folder / "test_BaseSorting.pkl")
check_sortings_equal(sorting, sorting2, check_annotations=True, check_properties=True)
check_sortings_equal(sorting, sorting3, check_annotations=True, check_properties=True)
# cache old format : npz_folder
folder = cache_folder / "simple_sorting_npz_folder"
sorting.set_property("test", np.ones(len(sorting.unit_ids)))
sorting.save(folder=folder, format="npz_folder")
sorting2 = BaseExtractor.load_from_folder(folder)
assert isinstance(sorting2, NpzFolderSorting)
# cache new format : numpy_folder
folder = cache_folder / "simple_sorting_numpy_folder"
sorting.set_property("test", np.ones(len(sorting.unit_ids)))
sorting.save(folder=folder, format="numpy_folder")
sorting2 = BaseExtractor.load_from_folder(folder)
assert isinstance(sorting2, NumpyFolderSorting)
# but also possible
sorting3 = BaseExtractor.load(folder)
check_sortings_equal(sorting, sorting2, check_annotations=True, check_properties=True)
check_sortings_equal(sorting, sorting3, check_annotations=True, check_properties=True)
# save to memory
sorting4 = sorting.save(format="memory")
check_sortings_equal(sorting, sorting4, check_annotations=True, check_properties=True)
with pytest.warns(DeprecationWarning):
num_spikes = sorting.get_all_spike_trains()
# print(spikes)
spikes = sorting.to_spike_vector()
# print(spikes)
assert sorting._cached_spike_vector is not None
spikes = sorting.to_spike_vector(extremum_channel_inds={0: 15, 1: 5, 2: 18})
# print(spikes)
num_spikes_per_unit = sorting.count_num_spikes_per_unit()
total_spikes = sorting.count_total_num_spikes()
# select units
keep_units = [0, 1]
sorting_select = sorting.select_units(unit_ids=keep_units)
for unit in sorting_select.get_unit_ids():
assert unit in keep_units
# remove empty units
empty_units = [1, 3]
sorting_empty = generate_sorting(empty_units=empty_units)
sorting_clean = sorting_empty.remove_empty_units()
for unit in sorting_clean.get_unit_ids():
assert unit not in empty_units
sorting4 = sorting.to_numpy_sorting()
sorting5 = sorting.to_multiprocessing(n_jobs=2)
# create a clone with the same share mem buffer
sorting6 = load_extractor(sorting5.to_dict())
assert isinstance(sorting6, SharedMemorySorting)
del sorting6
del sorting5
def test_npy_sorting():
sfreq = 10
spike_times_0 = {
"0": np.array([0, 1, 9]), # Max sample idx is 9 for a rec of length 10
"1": np.array([2, 5]),
}
spike_times_1 = {
"0": np.array([0, 1]),
"1": np.array([], dtype="int64"),
}
sorting = NumpySorting.from_unit_dict(
[spike_times_0, spike_times_1],
sfreq,
)
assert sorting.get_num_segments() == 2
assert set(sorting.get_unit_ids()) == set(["0", "1"])
check_sorted_arrays_equal(sorting.get_unit_spike_train(segment_index=0, unit_id="1"), [2, 5])
# Check registering a recording
seg_nframes = [10, 5]
rec = NumpyRecording([np.zeros((nframes, 10)) for nframes in seg_nframes], sampling_frequency=sfreq)
sorting.register_recording(rec)
assert sorting.get_num_samples(segment_index=0) == 10
assert sorting.get_num_samples(segment_index=1) == 5
assert sorting.get_total_samples() == 15
# Registering too short a recording raises a warning
seg_nframes = [9, 5]
rec = NumpyRecording([np.zeros((nframes, 10)) for nframes in seg_nframes], sampling_frequency=sfreq)
# assert_raises(Exception, sorting.register_recording, rec)
with pytest.warns(UserWarning):
sorting.register_recording(rec)
# Registering a rec with too many segments
seg_nframes = [9, 5, 10]
rec = NumpyRecording([np.zeros((nframes, 10)) for nframes in seg_nframes], sampling_frequency=sfreq)
assert_raises(Exception, sorting.register_recording, rec)
def test_empty_sorting():
sorting = NumpySorting.from_unit_dict({}, 30000)
assert len(sorting.unit_ids) == 0
with pytest.warns(DeprecationWarning):
spikes = sorting.get_all_spike_trains()
assert len(spikes) == 1
assert len(spikes[0][0]) == 0
assert len(spikes[0][1]) == 0
spikes = sorting.to_spike_vector()
assert spikes.shape == (0,)
if __name__ == "__main__":
test_BaseSorting()
test_npy_sorting()
test_empty_sorting()
|
681459bd852417f31107b9d2f98ba23dc1a834bc
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/native_client/tests/glibc_socket_wrappers/nacl.scons
|
7b6cf18f91a141ed0b35f92db234a7f3a8ec3632
|
[
"BSD-3-Clause",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"LZMA-exception",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-protobuf",
"bzip2-1.0.6",
"Spencer-94",
"NCSA",
"LicenseRef-scancode-nilsson-historical",
"CC0-1.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"NTP",
"GPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"GFDL-1.1-only",
"W3C",
"LicenseRef-scancode-python-cwi",
"GCC-exception-3.1",
"BSL-1.0",
"Python-2.0",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"CPL-1.0",
"GFDL-1.1-or-later",
"W3C-19980720",
"LGPL-2.0-only",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-ietf",
"SAX-PD",
"LicenseRef-scancode-x11-hanson",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"PSF-2.0",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"SunPro",
"MPL-1.1"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 866
|
scons
|
nacl.scons
|
# -*- python -*-
# Copyright 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
Import('env')
# http://code.google.com/p/nativeclient/issues/detail?id=2755
if env.Bit('bitcode'):
Return()
# There is no sock64 in newlib
if not env.Bit('nacl_glibc'):
Return()
# This tests the obsolete nacl-glibc ad hoc mechanism for overriding
# its socket-related functions. The new glibc (currently ARM only)
# will only support the nacl_interface_ext_supply mechanism.
if not env.Bit('build_x86'):
Return()
test_sock_nexe = env.ComponentProgram('test_sock', 'test_sock.c')
test_sock = env.CommandSelLdrTestNacl(
'test_sock.out',
test_sock_nexe,
args=[env.File('test_sock_data')])
env.AddNodeToTestSuite(test_sock, ['small_tests'], 'run_sock_test')
|
e2e5c5e16c5cf863ad86a6bfbf12514dc7ee6719
|
507254bb9775659f7c44f2943edb489f555340e2
|
/coherence/backends/dvbd_storage.py
|
f9e1228e4cfc65ff973a8aa33bc4ef91d9ee6d4d
|
[
"MIT"
] |
permissive
|
coherence-project/Coherence
|
eb8cf5d131577bd0581257ed48ccaf212433eff8
|
88016204c7778bf0d3ad1ae331b4d8fd725dd2af
|
refs/heads/develop
| 2021-01-15T15:31:39.768169
| 2018-06-29T07:16:09
| 2018-06-29T07:16:09
| 3,315,826
| 115
| 41
|
MIT
| 2018-06-29T07:23:47
| 2012-01-31T15:31:31
|
Python
|
UTF-8
|
Python
| false
| false
| 28,365
|
py
|
dvbd_storage.py
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
from datetime import datetime
import urllib
from twisted.internet import reactor, defer
from twisted.python import failure, util
from twisted.python.filepath import FilePath
from coherence.upnp.core import DIDLLite
import dbus
import dbus.service
import coherence.extern.louie as louie
from coherence.backend import BackendItem, BackendStore
DEFAULT_NAME = 'TV'
ROOT_CONTAINER_ID = 0
RECORDINGS_CONTAINER_ID = 100
CHANNELS_CONTAINER_ID = 200
CHANNEL_GROUPS_CONTAINER_ID = 300
BASE_CHANNEL_GROUP_ID = 1000
BUS_NAME = 'org.gnome.DVB'
RECORDINGSSTORE_OBJECT_PATH = '/org/gnome/DVB/RecordingsStore'
MANAGER_OBJECT_PATH = '/org/gnome/DVB/Manager'
class Container(BackendItem):
logCategory = 'dvbd_store'
def __init__(self, id, parent_id, name, store=None, children_callback=None, container_class=DIDLLite.Container):
BackendItem.__init__(self)
self.id = id
self.parent_id = parent_id
self.name = name
self.mimetype = 'directory'
self.item = container_class(id, parent_id, self.name)
self.item.childCount = 0
self.update_id = 0
if children_callback != None:
self.children = children_callback
else:
self.children = util.OrderedDict()
if store != None:
self.get_url = lambda: store.urlbase + str(self.id)
def add_child(self, child):
id = child.id
if isinstance(child.id, basestring):
_, id = child.id.split('.')
self.children[id] = child
if self.item.childCount != None:
self.item.childCount += 1
def get_children(self, start=0, end=0):
self.info("container.get_children %r %r", start, end)
if callable(self.children):
return self.children(start, end - start)
else:
children = self.children.values()
if end == 0:
return children[start:]
else:
return children[start:end]
def remove_children(self):
if not callable(self.children):
self.children = util.OrderedDict()
self.item.childCount = 0
def get_child_count(self):
if self.item.childCount != None:
return self.item.childCount
if callable(self.children):
return len(self.children())
else:
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
class Channel(BackendItem):
logCategory = 'dvbd_store'
def __init__(self, store,
id, parent_id,
name, url, network,
mimetype):
BackendItem.__init__(self)
self.store = store
self.id = 'channel.%s' % id
self.parent_id = parent_id
self.real_id = id
self.name = unicode(name)
self.network = unicode(network)
self.stream_url = url
self.mimetype = str(mimetype)
def get_children(self, start=0, end=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.debug("Channel get_item %r @ %r", self.id, self.parent_id)
item = DIDLLite.VideoBroadcast(self.id, self.parent_id)
item.title = self.name
res = DIDLLite.Resource(self.stream_url, 'rtsp-rtp-udp:*:%s:*' % self.mimetype)
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.name
class Recording(BackendItem):
logCategory = 'dvbd_store'
def __init__(self, store,
id, parent_id,
file, title,
date, duration,
mimetype):
BackendItem.__init__(self)
self.store = store
self.id = 'recording.%s' % id
self.parent_id = parent_id
self.real_id = id
path = unicode(file)
# make sure path is an absolute local path (and not an URL)
if path.startswith("file://"):
path = path[7:]
self.location = FilePath(path)
self.title = unicode(title)
self.mimetype = str(mimetype)
self.date = datetime.fromtimestamp(int(date))
self.duration = int(duration)
try:
self.size = self.location.getsize()
except Exception, msg:
self.size = 0
self.bitrate = 0
self.url = self.store.urlbase + str(self.id)
def get_children(self, start=0, end=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.debug("Recording get_item %r @ %r", self.id, self.parent_id)
# create item
item = DIDLLite.VideoBroadcast(self.id, self.parent_id)
item.date = self.date
item.title = self.title
# add http resource
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
if self.size > 0:
res.size = self.size
if self.duration > 0:
res.duration = str(self.duration)
if self.bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
# add internal resource
res = DIDLLite.Resource('file://' + urllib.quote(self.get_path()), 'internal:%s:%s:*' % (self.store.server.coherence.hostname, self.mimetype))
if self.size > 0:
res.size = self.size
if self.duration > 0:
res.duration = str(self.duration)
if self.bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.title
def get_url(self):
return self.url
def get_path(self):
return self.location.path
class DVBDStore(BackendStore):
""" this is a backend to the DVB Daemon
http://www.k-d-w.org/node/42
"""
implements = ['MediaServer']
logCategory = 'dvbd_store'
def __init__(self, server, **kwargs):
if server.coherence.config.get('use_dbus', 'no') != 'yes':
raise Exception('this backend needs use_dbus enabled in the configuration')
BackendStore.__init__(self, server, **kwargs)
self.config = kwargs
self.name = kwargs.get('name', DEFAULT_NAME)
self.update_id = 0
self.channel_groups = []
if kwargs.get('enable_destroy', 'no') == 'yes':
self.upnp_DestroyObject = self.hidden_upnp_DestroyObject
self.bus = dbus.SessionBus()
dvb_daemon_recordingsStore = self.bus.get_object(BUS_NAME, RECORDINGSSTORE_OBJECT_PATH)
dvb_daemon_manager = self.bus.get_object(BUS_NAME, MANAGER_OBJECT_PATH)
self.store_interface = dbus.Interface(dvb_daemon_recordingsStore, 'org.gnome.DVB.RecordingsStore')
self.manager_interface = dbus.Interface(dvb_daemon_manager, 'org.gnome.DVB.Manager')
dvb_daemon_recordingsStore.connect_to_signal('Changed', self.recording_changed,
dbus_interface='org.gnome.DVB.RecordingsStore')
self.containers = {}
self.containers[ROOT_CONTAINER_ID] = \
Container(ROOT_CONTAINER_ID, -1, self.name, store=self)
self.containers[RECORDINGS_CONTAINER_ID] = \
Container(RECORDINGS_CONTAINER_ID, ROOT_CONTAINER_ID, 'Recordings', store=self)
self.containers[CHANNELS_CONTAINER_ID] = \
Container(CHANNELS_CONTAINER_ID, ROOT_CONTAINER_ID, 'Channels', store=self)
self.containers[CHANNEL_GROUPS_CONTAINER_ID] = \
Container(CHANNEL_GROUPS_CONTAINER_ID, ROOT_CONTAINER_ID, 'Channel Groups',
store=self)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[RECORDINGS_CONTAINER_ID])
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[CHANNELS_CONTAINER_ID])
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[CHANNEL_GROUPS_CONTAINER_ID])
def query_finished(r):
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def query_failed(error):
self.error("ERROR: %s", error)
louie.send('Coherence.UPnP.Backend.init_failed', None, backend=self, msg=error)
# get_device_groups is called after get_channel_groups,
# because we need channel groups first
channel_d = self.get_channel_groups()
channel_d.addCallback(self.get_device_groups)
channel_d.addErrback(query_failed)
d = defer.DeferredList((channel_d, self.get_recordings()))
d.addCallback(query_finished)
d.addErrback(query_failed)
def __repr__(self):
return "DVBDStore"
def get_by_id(self, id):
self.info("looking for id %r", id)
if isinstance(id, basestring):
id = id.split('@', 1)
id = id[0]
item = None
try:
id = int(id)
item = self.containers[id]
except (ValueError, KeyError):
try:
type, id = id.split('.')
if type == 'recording':
return self.containers[RECORDINGS_CONTAINER_ID].children[id]
except (ValueError, KeyError):
return None
return item
def recording_changed(self, id, mode):
self.containers[RECORDINGS_CONTAINER_ID].remove_children()
def handle_result(r):
self.debug("recording changed, handle_result: %s",
self.containers[RECORDINGS_CONTAINER_ID].update_id)
self.containers[RECORDINGS_CONTAINER_ID].update_id += 1
if(self.server and
hasattr(self.server, 'content_directory_server')):
if hasattr(self, 'update_id'):
self.update_id += 1
self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id)
value = (RECORDINGS_CONTAINER_ID, self.containers[RECORDINGS_CONTAINER_ID].update_id)
self.debug("ContainerUpdateIDs new value: %s", value)
self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
def handle_error(error):
self.error("ERROR: %s", error)
return error
d = self.get_recordings()
d.addCallback(handle_result)
d.addErrback(handle_error)
def get_recording_details(self, id):
self.debug("GET RECORDING DETAILS")
def process_details(data):
self.debug("GOT RECORDING DETAILS %s", data)
rid, name, desc, length, start, channel, location = data
if len(name) == 0:
name = 'Recording ' + str(rid)
return {'id': rid, 'name': name, 'path': location, 'date': start, 'duration': length}
def handle_error(error):
self.error("ERROR: %s", error)
return error
d = defer.Deferred()
d.addCallback(process_details)
d.addErrback(handle_error)
self.store_interface.GetAllInformations(id,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.errback(x))
return d
def get_recordings(self):
self.debug("GET RECORDINGS")
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_query_result(ids):
self.debug("GOT RECORDINGS: %s", ids)
if len(ids) == 0:
return []
l = []
for id in ids:
l.append(self.get_recording_details(id))
dl = defer.DeferredList(l)
return dl
def process_details(results):
#print 'process_details', results
for result, recording in results:
#print result, recording['name']
if result == True:
#print "add", recording['id'], recording['name'], recording['path'], recording['date'], recording['duration']
video_item = Recording(self,
recording['id'],
RECORDINGS_CONTAINER_ID,
recording['path'],
recording['name'],
recording['date'],
recording['duration'],
'video/mpegts')
self.containers[RECORDINGS_CONTAINER_ID].add_child(video_item)
d = defer.Deferred()
d.addCallback(process_query_result)
d.addCallback(process_details)
d.addErrback(handle_error)
d.addErrback(handle_error)
self.store_interface.GetRecordings(reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def get_channel_details(self, channelList_interface, id):
self.debug("GET CHANNEL DETAILS %s", id)
def get_name(id):
d = defer.Deferred()
channelList_interface.GetChannelName(id,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.errback(x))
return d
def get_network(id):
d = defer.Deferred()
channelList_interface.GetChannelNetwork(id,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.errback(x))
return d
def get_url(id):
d = defer.Deferred()
channelList_interface.GetChannelURL(id,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.errback(x))
return d
def process_details(r, id):
self.debug("GOT DETAILS %d: %s", id, r)
name = r[0][1]
network = r[1][1]
url = r[2][1]
return {'id': id, 'name': name.encode('latin-1'), 'network': network, 'url': url}
def handle_error(error):
return error
dl = defer.DeferredList((get_name(id), get_network(id), get_url(id)))
dl.addCallback(process_details, id)
dl.addErrback(handle_error)
return dl
def get_channelgroup_members(self, channel_items, channelList_interface):
self.debug("GET ALL CHANNEL GROUP MEMBERS")
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_getChannelsOfGroup(results, group_id):
for channel_id in results:
channel_id = int(channel_id)
if channel_id in channel_items:
item = channel_items[channel_id]
container_id = BASE_CHANNEL_GROUP_ID + group_id
self.containers[container_id].add_child(item)
def get_members(channelList_interface, group_id):
self.debug("GET CHANNEL GROUP MEMBERS %d", group_id)
d = defer.Deferred()
d.addCallback(process_getChannelsOfGroup, group_id)
d.addErrback(handle_error)
channelList_interface.GetChannelsOfGroup(group_id,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.callback(x))
return d
l = []
for group_id, group_name in self.channel_groups:
l.append(get_members(channelList_interface, group_id))
dl = defer.DeferredList(l)
return dl
def get_tv_channels(self, channelList_interface):
self.debug("GET TV CHANNELS")
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_getChannels_result(channels, channelList_interface):
self.debug("GetChannels: %s", channels)
if len(channels) == 0:
return []
l = []
for channel_id in channels:
l.append(self.get_channel_details(channelList_interface, channel_id))
dl = defer.DeferredList(l)
return dl
def process_details(results):
self.debug('GOT DEVICE GROUP DETAILS %s', results)
channels = {}
for result, channel in results:
#print channel
if result == True:
name = unicode(channel['name'], errors='ignore')
#print "add", name, channel['url']
video_item = Channel(self,
channel['id'],
CHANNELS_CONTAINER_ID,
name,
channel['url'],
channel['network'],
'video/mpegts')
self.containers[CHANNELS_CONTAINER_ID].add_child(video_item)
channels[int(channel['id'])] = video_item
return channels
d = defer.Deferred()
d.addCallback(process_getChannels_result, channelList_interface)
d.addCallback(process_details)
d.addCallback(self.get_channelgroup_members, channelList_interface)
d.addErrback(handle_error)
d.addErrback(handle_error)
d.addErrback(handle_error)
channelList_interface.GetTVChannels(reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def get_deviceGroup_details(self, devicegroup_interface):
self.debug("GET DEVICE GROUP DETAILS")
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_getChannelList_result(result):
self.debug("GetChannelList: %s", result)
dvbd_channelList = self.bus.get_object(BUS_NAME, result)
channelList_interface = dbus.Interface(dvbd_channelList, 'org.gnome.DVB.ChannelList')
return self.get_tv_channels(channelList_interface)
d = defer.Deferred()
d.addCallback(process_getChannelList_result)
d.addErrback(handle_error)
devicegroup_interface.GetChannelList(reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def get_device_groups(self, results):
self.debug("GET DEVICE GROUPS")
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_query_result(ids):
self.debug("GetRegisteredDeviceGroups: %s", ids)
if len(ids) == 0:
return
l = []
for group_object_path in ids:
dvbd_devicegroup = self.bus.get_object(BUS_NAME, group_object_path)
devicegroup_interface = dbus.Interface(dvbd_devicegroup, 'org.gnome.DVB.DeviceGroup')
l.append(self.get_deviceGroup_details(devicegroup_interface))
dl = defer.DeferredList(l)
return dl
d = defer.Deferred()
d.addCallback(process_query_result)
d.addErrback(handle_error)
self.manager_interface.GetRegisteredDeviceGroups(reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def get_channel_groups(self):
self.debug("GET CHANNEL GROUPS")
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_GetChannelGroups_result(data):
self.debug("GOT CHANNEL GROUPS %s", data)
for group in data:
self.channel_groups.append(group) # id, name
container_id = BASE_CHANNEL_GROUP_ID + group[0]
group_item = Container(container_id, CHANNEL_GROUPS_CONTAINER_ID,
group[1], store=self)
self.containers[container_id] = group_item
self.containers[CHANNEL_GROUPS_CONTAINER_ID].add_child(group_item)
d = defer.Deferred()
d.addCallback(process_GetChannelGroups_result)
d.addErrback(handle_error)
self.manager_interface.GetChannelGroups(reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def upnp_init(self):
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:video/mpegts:*',
'internal:%s:video/mpegts:*' % self.server.coherence.hostname, ],
'rtsp-rtp-udp:*:video/mpegts:*', )
def hidden_upnp_DestroyObject(self, *args, **kwargs):
ObjectID = kwargs['ObjectID']
item = self.get_by_id(ObjectID)
if item == None:
return failure.Failure(errorCode(701))
def handle_success(deleted):
print 'deleted', deleted, kwargs['ObjectID']
if deleted == False:
return failure.Failure(errorCode(715))
return {}
def handle_error(error):
return failure.Failure(errorCode(701))
d = defer.Deferred()
self.store_interface.Delete(int(item.real_id),
reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
d.addCallback(handle_success)
d.addErrback(handle_error)
return d
class DVBDScheduledRecording(BackendStore):
logCategory = 'dvbd_store'
def __init__(self, server, **kwargs):
if server.coherence.config.get('use_dbus', 'no') != 'yes':
raise Exception('this backend needs use_dbus enabled in the configuration')
BackendStore.__init__(self, server, **kwargs)
self.state_update_id = 0
self.bus = dbus.SessionBus()
# We have one ScheduleRecording service for each device group
# TODO use one ScheduledRecording for each device group
self.device_group_interface = None
dvbd_recorder = self.device_group_interface.GetRecorder()
self.recorder_interface = self.bus.get_object(BUS_NAME, dvdb_recorder)
def __repr__(self):
return "DVBDScheduledRecording"
def get_timer_details(self, tid):
self.debug("GET TIMER DETAILS %d", tid)
def handle_error(error):
self.error("ERROR: %s", error)
return error
def get_infos(tid):
d = defer.Callback()
self.recorder_interface.GetAllInformations(tid,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.errback(x))
return d
def get_start_time(tid):
d = defer.Callback()
self.recorder_interface.GetStartTime(tid,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.errback(x))
return d
def process_details(results):
tid, duration, active, channel_name, title = results[0][1]
start = results[1][1]
start_datetime = datetime(*start)
# TODO return what we actually need
# FIXME we properly want the channel id here rather than the name
return {'id': tid, 'duration': duration, 'channel': channel,
'start': start_datetime}
d = defer.DeferredList((self.get_infos(tid), self.get_start_time(tid)))
d.addCallback(process_details)
d.addErrback(handle_error)
return d
def get_timers(self):
self.debug("GET TIMERS")
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_GetTimers_results(timer_ids):
l = []
for tid in timer_ids:
l.append(self.get_timer_details(tid))
dl = defer.DeferredList(l)
return dl
d = defer.Deferred()
d.addCallback(process_GetTimers_result)
d.addErrback(handle_error)
self.recorder_interface.GetTimers(reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def add_timer(self, channel_id, start_datetime, duration):
self.debug("ADD TIMER")
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_AddTimer_result(timer_id):
self.state_update_id += 1
return timer_id
d = defer.Deferred()
d.addCallback(process_AddTimer_result)
d.addErrback(handle_error)
self.recorder_interface.AddTimer(channel_id, start_datetime.year,
start_datetime.month, start_datetime.day, start_datetime.hour,
start_datetime.minute, duration,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.errback(x))
return d
def delete_timer(self, tid):
self.debug("DELETE TIMER %d", tid)
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_DeleteTimer_result(success):
if not success:
# TODO: return 704
return
self.state_update_id += 1
d = defer.Deferred()
d.addCallback(process_DeleteTimer_result)
d.addErrback(handle_error)
self.recorder_interface.DeleteTimer(tid,
reply_handler=lambda x, success: d.callback(x),
error_handler=lambda x, success: d.errback(x))
return d
def upnp_GetPropertyList(self, *args, **kwargs):
pass
def upnp_GetAllowedValues(self, *args, **kwargs):
pass
def upnp_GetStateUpdateID(self, *args, **kwargs):
return self.state_update_id
def upnp_BrowseRecordSchedules(self, *args, **kwargs):
schedules = []
sched = RecordSchedule() # ChannelID, StartDateTime, Duration
return self.get_timers()
def upnp_BrowseRecordTasks(self, *args, **kwargs):
rec_sched_id = int(kwargs['RecordScheduleID'])
tasks = []
task = RecordTask() # ScheduleID, ChannelID, StartDateTime, Duration
return self.get_timer_details(rec_sched_id)
def upnp_CreateRecordSchedule(self, *args, **kwargs):
schedule = kwargs['RecordScheduleParts']
channel_id = schedule.getChannelID()
# returns a python datetime object
start = schedule.getStartDateTime()
# duration in minutes
duration = schedule.getDuration()
return self.add_timer(channel_id, start, duration)
def upnp_DeleteRecordSchedule(self, *args, **kwargs):
rec_sched_id = int(kwargs['RecordScheduleID'])
def handle_error(error):
self.error("ERROR: %s", error)
return error
def process_IsTimerActive_result(is_active, rec_sched_id):
if is_active:
# TODO: Return 705
return
else:
return self.delete_timer(rec_sched_id)
d = defer.Deferred()
d.addCallback(process_IsTimerActive_result, rec_sched_id)
d.addErrback(handle_error)
self.recorder_interface.IsTimerActive(rec_sched_id,
reply_handler=lambda x: d.callback(x),
error_handler=lambda x: d.errback(x))
return d
def upnp_GetRecordSchedule(self, *args, **kwargs):
rec_sched_id = int(kwargs['RecordScheduleID'])
return self.get_timer_details(rec_sched_id)
def upnp_GetRecordTask(self, *args, **kwargs):
rec_task_id = int(kwargs['RecordTaskID'])
return self.get_timer_details(rec_task_id)
|
84f90289f109e1d037317bf72d8190021701622e
|
eb7afa613940f5a3f202352a94dd996edcb6bed5
|
/boto3_type_annotations_with_docs/boto3_type_annotations/mturk/client.py
|
3db4c726caafb3c562de804e3a03decbd9c7aa74
|
[
"MIT"
] |
permissive
|
alliefitter/boto3_type_annotations
|
e4da614e27a1d2ad3c9c653c50b8e30108180da5
|
2a88aa562b1aee6e8a6cc30402980884b3707fbb
|
refs/heads/master
| 2020-04-05T22:05:12.689913
| 2019-11-28T03:32:13
| 2019-11-28T03:32:13
| 157,244,330
| 131
| 11
|
MIT
| 2023-04-21T17:17:03
| 2018-11-12T16:38:57
|
Python
|
UTF-8
|
Python
| false
| false
| 233,975
|
py
|
client.py
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def accept_qualification_request(self, QualificationRequestId: str, IntegerValue: int = None) -> Dict:
"""
The ``AcceptQualificationRequest`` operation approves a Worker's request for a Qualification.
Only the owner of the Qualification type can grant a Qualification request for that type.
A successful request for the ``AcceptQualificationRequest`` operation returns with no errors and an empty body.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/AcceptQualificationRequest>`_
**Request Syntax**
::
response = client.accept_qualification_request(
QualificationRequestId='string',
IntegerValue=123
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type QualificationRequestId: string
:param QualificationRequestId: **[REQUIRED]**
The ID of the Qualification request, as returned by the ``GetQualificationRequests`` operation.
:type IntegerValue: integer
:param IntegerValue:
The value of the Qualification. You can omit this value if you are using the presence or absence of the Qualification as the basis for a HIT requirement.
:rtype: dict
:returns:
"""
pass
def approve_assignment(self, AssignmentId: str, RequesterFeedback: str = None, OverrideRejection: bool = None) -> Dict:
"""
The ``ApproveAssignment`` operation approves the results of a completed assignment.
Approving an assignment initiates two payments from the Requester's Amazon.com account
* The Worker who submitted the results is paid the reward specified in the HIT.
* Amazon Mechanical Turk fees are debited.
If the Requester's account does not have adequate funds for these payments, the call to ApproveAssignment returns an exception, and the approval is not processed. You can include an optional feedback message with the approval, which the Worker can see in the Status section of the web site.
You can also call this operation for assignments that were previous rejected and approve them by explicitly overriding the previous rejection. This only works on rejected assignments that were submitted within the previous 30 days and only if the assignment's related HIT has not been deleted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ApproveAssignment>`_
**Request Syntax**
::
response = client.approve_assignment(
AssignmentId='string',
RequesterFeedback='string',
OverrideRejection=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type AssignmentId: string
:param AssignmentId: **[REQUIRED]**
The ID of the assignment. The assignment must correspond to a HIT created by the Requester.
:type RequesterFeedback: string
:param RequesterFeedback:
A message for the Worker, which the Worker can see in the Status section of the web site.
:type OverrideRejection: boolean
:param OverrideRejection:
A flag indicating that an assignment should be approved even if it was previously rejected. Defaults to ``False`` .
:rtype: dict
:returns:
"""
pass
def associate_qualification_with_worker(self, QualificationTypeId: str, WorkerId: str, IntegerValue: int = None, SendNotification: bool = None) -> Dict:
"""
The ``AssociateQualificationWithWorker`` operation gives a Worker a Qualification. ``AssociateQualificationWithWorker`` does not require that the Worker submit a Qualification request. It gives the Qualification directly to the Worker.
You can only assign a Qualification of a Qualification type that you created (using the ``CreateQualificationType`` operation).
.. note::
Note: ``AssociateQualificationWithWorker`` does not affect any pending Qualification requests for the Qualification by the Worker. If you assign a Qualification to a Worker, then later grant a Qualification request made by the Worker, the granting of the request may modify the Qualification score. To resolve a pending Qualification request without affecting the Qualification the Worker already has, reject the request with the ``RejectQualificationRequest`` operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/AssociateQualificationWithWorker>`_
**Request Syntax**
::
response = client.associate_qualification_with_worker(
QualificationTypeId='string',
WorkerId='string',
IntegerValue=123,
SendNotification=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type QualificationTypeId: string
:param QualificationTypeId: **[REQUIRED]**
The ID of the Qualification type to use for the assigned Qualification.
:type WorkerId: string
:param WorkerId: **[REQUIRED]**
The ID of the Worker to whom the Qualification is being assigned. Worker IDs are included with submitted HIT assignments and Qualification requests.
:type IntegerValue: integer
:param IntegerValue:
The value of the Qualification to assign.
:type SendNotification: boolean
:param SendNotification:
Specifies whether to send a notification email message to the Worker saying that the qualification was assigned to the Worker. Note: this is true by default.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_additional_assignments_for_hit(self, HITId: str, NumberOfAdditionalAssignments: int, UniqueRequestToken: str = None) -> Dict:
"""
The ``CreateAdditionalAssignmentsForHIT`` operation increases the maximum number of assignments of an existing HIT.
To extend the maximum number of assignments, specify the number of additional assignments.
.. note::
* HITs created with fewer than 10 assignments cannot be extended to have 10 or more assignments. Attempting to add assignments in a way that brings the total number of assignments for a HIT from fewer than 10 assignments to 10 or more assignments will result in an ``AWS.MechanicalTurk.InvalidMaximumAssignmentsIncrease`` exception.
* HITs that were created before July 22, 2015 cannot be extended. Attempting to extend HITs that were created before July 22, 2015 will result in an ``AWS.MechanicalTurk.HITTooOldForExtension`` exception.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/CreateAdditionalAssignmentsForHIT>`_
**Request Syntax**
::
response = client.create_additional_assignments_for_hit(
HITId='string',
NumberOfAdditionalAssignments=123,
UniqueRequestToken='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type HITId: string
:param HITId: **[REQUIRED]**
The ID of the HIT to extend.
:type NumberOfAdditionalAssignments: integer
:param NumberOfAdditionalAssignments: **[REQUIRED]**
The number of additional assignments to request for this HIT.
:type UniqueRequestToken: string
:param UniqueRequestToken:
A unique identifier for this request, which allows you to retry the call on error without extending the HIT multiple times. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the extend HIT already exists in the system from a previous call using the same ``UniqueRequestToken`` , subsequent calls will return an error with a message containing the request ID.
:rtype: dict
:returns:
"""
pass
def create_hit(self, LifetimeInSeconds: int, AssignmentDurationInSeconds: int, Reward: str, Title: str, Description: str, MaxAssignments: int = None, AutoApprovalDelayInSeconds: int = None, Keywords: str = None, Question: str = None, RequesterAnnotation: str = None, QualificationRequirements: List = None, UniqueRequestToken: str = None, AssignmentReviewPolicy: Dict = None, HITReviewPolicy: Dict = None, HITLayoutId: str = None, HITLayoutParameters: List = None) -> Dict:
"""
The ``CreateHIT`` operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to ``CreateHIT`` , a new HIT is created for you, with a new ``HITTypeID`` . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the ``CreateHITType`` operation and then call the ``CreateHITWithHITType`` operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the ``Question`` parameter that fully specifies the contents of the HIT, or by providing a ``HitLayoutId`` and associated ``HitLayoutParameters`` .
.. note::
If a HIT is created with 10 or more maximum assignments, there is an additional fee. For more information, see `Amazon Mechanical Turk Pricing <https://requester.mturk.com/pricing>`__ .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/CreateHIT>`_
**Request Syntax**
::
response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
**Response Structure**
- *(dict) --*
- **HIT** *(dict) --*
Contains the newly created HIT data. For a description of the HIT data structure as it appears in responses, see the HIT Data Structure documentation.
- **HITId** *(string) --*
A unique identifier for the HIT.
- **HITTypeId** *(string) --*
The ID of the HIT type of this HIT
- **HITGroupId** *(string) --*
The ID of the HIT Group of this HIT.
- **HITLayoutId** *(string) --*
The ID of the HIT Layout of this HIT.
- **CreationTime** *(datetime) --*
The date and time the HIT was created.
- **Title** *(string) --*
The title of the HIT.
- **Description** *(string) --*
A general description of the HIT.
- **Question** *(string) --*
The data the Worker completing the HIT uses produce the results. This is either either a QuestionForm, HTMLQuestion or an ExternalQuestion data structure.
- **Keywords** *(string) --*
One or more words or phrases that describe the HIT, separated by commas. Search terms similar to the keywords of a HIT are more likely to have the HIT in the search results.
- **HITStatus** *(string) --*
The status of the HIT and its assignments. Valid Values are Assignable | Unassignable | Reviewable | Reviewing | Disposed.
- **MaxAssignments** *(integer) --*
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
- **Reward** *(string) --*
A string representing a currency amount.
- **AutoApprovalDelayInSeconds** *(integer) --*
The amount of time, in seconds, after the Worker submits an assignment for the HIT that the results are automatically approved by Amazon Mechanical Turk. This is the amount of time the Requester has to reject an assignment submitted by a Worker before the assignment is auto-approved and the Worker is paid.
- **Expiration** *(datetime) --*
The date and time the HIT expires.
- **AssignmentDurationInSeconds** *(integer) --*
The length of time, in seconds, that a Worker has to complete the HIT after accepting it.
- **RequesterAnnotation** *(string) --*
An arbitrary data field the Requester who created the HIT can use. This field is visible only to the creator of the HIT.
- **QualificationRequirements** *(list) --*
Conditions that a Worker's Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --*
The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include "Accept" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), "PreviewAndAccept" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and "DiscoverPreviewAndAccept" (Worker cannot accept, preview, or see the HIT in their search results). It's possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
- **HITReviewStatus** *(string) --*
Indicates the review status of the HIT. Valid Values are NotReviewed | MarkedForReview | ReviewedAppropriate | ReviewedInappropriate.
- **NumberOfAssignmentsPending** *(integer) --*
The number of assignments for this HIT that are being previewed or have been accepted by Workers, but have not yet been submitted, returned, or abandoned.
- **NumberOfAssignmentsAvailable** *(integer) --*
The number of assignments for this HIT that are available for Workers to accept.
- **NumberOfAssignmentsCompleted** *(integer) --*
The number of assignments for this HIT that have been approved or rejected.
:type MaxAssignments: integer
:param MaxAssignments:
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds:
The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: **[REQUIRED]**
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: **[REQUIRED]**
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: **[REQUIRED]**
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: **[REQUIRED]**
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords:
One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: **[REQUIRED]**
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question:
The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation:
An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester\'s application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements:
Conditions that a Worker\'s Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --* **[REQUIRED]**
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --* **[REQUIRED]**
The kind of comparison to make against a Qualification\'s value. You can compare a Qualification\'s value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user\'s profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification\'s value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification\'s value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --* **[REQUIRED]**
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker\'s Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT\'s question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include \"Accept\" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), \"PreviewAndAccept\" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and \"DiscoverPreviewAndAccept\" (Worker cannot accept, preview, or see the HIT in their search results). It\'s possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
:type UniqueRequestToken: string
:param UniqueRequestToken:
A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
.. note::
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy:
The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
- **PolicyName** *(string) --* **[REQUIRED]**
Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
- **Parameters** *(list) --*
Name of the parameter from the Review policy.
- *(dict) --*
Name of the parameter from the Review policy.
- **Key** *(string) --*
Name of the parameter from the list of Review Polices.
- **Values** *(list) --*
The list of values of the Parameter
- *(string) --*
- **MapEntries** *(list) --*
List of ParameterMapEntry objects.
- *(dict) --*
This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Key** *(string) --*
The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Values** *(list) --*
The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
- *(string) --*
:type HITReviewPolicy: dict
:param HITReviewPolicy:
The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
- **PolicyName** *(string) --* **[REQUIRED]**
Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
- **Parameters** *(list) --*
Name of the parameter from the Review policy.
- *(dict) --*
Name of the parameter from the Review policy.
- **Key** *(string) --*
Name of the parameter from the list of Review Polices.
- **Values** *(list) --*
The list of values of the Parameter
- *(string) --*
- **MapEntries** *(list) --*
List of ParameterMapEntry objects.
- *(dict) --*
This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Key** *(string) --*
The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Values** *(list) --*
The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
- *(string) --*
:type HITLayoutId: string
:param HITLayoutId:
The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters:
If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
- *(dict) --*
The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
- **Name** *(string) --* **[REQUIRED]**
The name of the parameter in the HITLayout.
- **Value** *(string) --* **[REQUIRED]**
The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:returns:
"""
pass
def create_hit_type(self, AssignmentDurationInSeconds: int, Reward: str, Title: str, Description: str, AutoApprovalDelayInSeconds: int = None, Keywords: str = None, QualificationRequirements: List = None) -> Dict:
"""
The ``CreateHITType`` operation creates a new HIT type. This operation allows you to define a standard set of HIT properties to use when creating HITs. If you register a HIT type with values that match an existing HIT type, the HIT type ID of the existing type will be returned.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/CreateHITType>`_
**Request Syntax**
::
response = client.create_hit_type(
AutoApprovalDelayInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
]
)
**Response Syntax**
::
{
'HITTypeId': 'string'
}
**Response Structure**
- *(dict) --*
- **HITTypeId** *(string) --*
The ID of the newly registered HIT type.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds:
The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: **[REQUIRED]**
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: **[REQUIRED]**
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: **[REQUIRED]**
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords:
One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: **[REQUIRED]**
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type QualificationRequirements: list
:param QualificationRequirements:
Conditions that a Worker\'s Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --* **[REQUIRED]**
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --* **[REQUIRED]**
The kind of comparison to make against a Qualification\'s value. You can compare a Qualification\'s value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user\'s profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification\'s value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification\'s value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --* **[REQUIRED]**
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker\'s Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT\'s question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include \"Accept\" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), \"PreviewAndAccept\" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and \"DiscoverPreviewAndAccept\" (Worker cannot accept, preview, or see the HIT in their search results). It\'s possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
:rtype: dict
:returns:
"""
pass
def create_hit_with_hit_type(self, HITTypeId: str, LifetimeInSeconds: int, MaxAssignments: int = None, Question: str = None, RequesterAnnotation: str = None, UniqueRequestToken: str = None, AssignmentReviewPolicy: Dict = None, HITReviewPolicy: Dict = None, HITLayoutId: str = None, HITLayoutParameters: List = None) -> Dict:
"""
The ``CreateHITWithHITType`` operation creates a new Human Intelligence Task (HIT) using an existing HITTypeID generated by the ``CreateHITType`` operation.
This is an alternative way to create HITs from the ``CreateHIT`` operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHITWithHITType also supports several ways to provide question data: by providing a value for the ``Question`` parameter that fully specifies the contents of the HIT, or by providing a ``HitLayoutId`` and associated ``HitLayoutParameters`` .
.. note::
If a HIT is created with 10 or more maximum assignments, there is an additional fee. For more information, see `Amazon Mechanical Turk Pricing <https://requester.mturk.com/pricing>`__ .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/CreateHITWithHITType>`_
**Request Syntax**
::
response = client.create_hit_with_hit_type(
HITTypeId='string',
MaxAssignments=123,
LifetimeInSeconds=123,
Question='string',
RequesterAnnotation='string',
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
**Response Structure**
- *(dict) --*
- **HIT** *(dict) --*
Contains the newly created HIT data. For a description of the HIT data structure as it appears in responses, see the HIT Data Structure documentation.
- **HITId** *(string) --*
A unique identifier for the HIT.
- **HITTypeId** *(string) --*
The ID of the HIT type of this HIT
- **HITGroupId** *(string) --*
The ID of the HIT Group of this HIT.
- **HITLayoutId** *(string) --*
The ID of the HIT Layout of this HIT.
- **CreationTime** *(datetime) --*
The date and time the HIT was created.
- **Title** *(string) --*
The title of the HIT.
- **Description** *(string) --*
A general description of the HIT.
- **Question** *(string) --*
The data the Worker completing the HIT uses produce the results. This is either either a QuestionForm, HTMLQuestion or an ExternalQuestion data structure.
- **Keywords** *(string) --*
One or more words or phrases that describe the HIT, separated by commas. Search terms similar to the keywords of a HIT are more likely to have the HIT in the search results.
- **HITStatus** *(string) --*
The status of the HIT and its assignments. Valid Values are Assignable | Unassignable | Reviewable | Reviewing | Disposed.
- **MaxAssignments** *(integer) --*
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
- **Reward** *(string) --*
A string representing a currency amount.
- **AutoApprovalDelayInSeconds** *(integer) --*
The amount of time, in seconds, after the Worker submits an assignment for the HIT that the results are automatically approved by Amazon Mechanical Turk. This is the amount of time the Requester has to reject an assignment submitted by a Worker before the assignment is auto-approved and the Worker is paid.
- **Expiration** *(datetime) --*
The date and time the HIT expires.
- **AssignmentDurationInSeconds** *(integer) --*
The length of time, in seconds, that a Worker has to complete the HIT after accepting it.
- **RequesterAnnotation** *(string) --*
An arbitrary data field the Requester who created the HIT can use. This field is visible only to the creator of the HIT.
- **QualificationRequirements** *(list) --*
Conditions that a Worker's Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --*
The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include "Accept" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), "PreviewAndAccept" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and "DiscoverPreviewAndAccept" (Worker cannot accept, preview, or see the HIT in their search results). It's possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
- **HITReviewStatus** *(string) --*
Indicates the review status of the HIT. Valid Values are NotReviewed | MarkedForReview | ReviewedAppropriate | ReviewedInappropriate.
- **NumberOfAssignmentsPending** *(integer) --*
The number of assignments for this HIT that are being previewed or have been accepted by Workers, but have not yet been submitted, returned, or abandoned.
- **NumberOfAssignmentsAvailable** *(integer) --*
The number of assignments for this HIT that are available for Workers to accept.
- **NumberOfAssignmentsCompleted** *(integer) --*
The number of assignments for this HIT that have been approved or rejected.
:type HITTypeId: string
:param HITTypeId: **[REQUIRED]**
The HIT type ID you want to create this HIT with.
:type MaxAssignments: integer
:param MaxAssignments:
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: **[REQUIRED]**
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type Question: string
:param Question:
The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation:
An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester\'s application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type UniqueRequestToken: string
:param UniqueRequestToken:
A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
.. note::
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy:
The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
- **PolicyName** *(string) --* **[REQUIRED]**
Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
- **Parameters** *(list) --*
Name of the parameter from the Review policy.
- *(dict) --*
Name of the parameter from the Review policy.
- **Key** *(string) --*
Name of the parameter from the list of Review Polices.
- **Values** *(list) --*
The list of values of the Parameter
- *(string) --*
- **MapEntries** *(list) --*
List of ParameterMapEntry objects.
- *(dict) --*
This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Key** *(string) --*
The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Values** *(list) --*
The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
- *(string) --*
:type HITReviewPolicy: dict
:param HITReviewPolicy:
The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
- **PolicyName** *(string) --* **[REQUIRED]**
Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
- **Parameters** *(list) --*
Name of the parameter from the Review policy.
- *(dict) --*
Name of the parameter from the Review policy.
- **Key** *(string) --*
Name of the parameter from the list of Review Polices.
- **Values** *(list) --*
The list of values of the Parameter
- *(string) --*
- **MapEntries** *(list) --*
List of ParameterMapEntry objects.
- *(dict) --*
This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Key** *(string) --*
The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Values** *(list) --*
The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
- *(string) --*
:type HITLayoutId: string
:param HITLayoutId:
The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters:
If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
- *(dict) --*
The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
- **Name** *(string) --* **[REQUIRED]**
The name of the parameter in the HITLayout.
- **Value** *(string) --* **[REQUIRED]**
The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:returns:
"""
pass
def create_qualification_type(self, Name: str, Description: str, QualificationTypeStatus: str, Keywords: str = None, RetryDelayInSeconds: int = None, Test: str = None, AnswerKey: str = None, TestDurationInSeconds: int = None, AutoGranted: bool = None, AutoGrantedValue: int = None) -> Dict:
"""
The ``CreateQualificationType`` operation creates a new Qualification type, which is represented by a ``QualificationType`` data structure.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/CreateQualificationType>`_
**Request Syntax**
::
response = client.create_qualification_type(
Name='string',
Keywords='string',
Description='string',
QualificationTypeStatus='Active'|'Inactive',
RetryDelayInSeconds=123,
Test='string',
AnswerKey='string',
TestDurationInSeconds=123,
AutoGranted=True|False,
AutoGrantedValue=123
)
**Response Syntax**
::
{
'QualificationType': {
'QualificationTypeId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Name': 'string',
'Description': 'string',
'Keywords': 'string',
'QualificationTypeStatus': 'Active'|'Inactive',
'Test': 'string',
'TestDurationInSeconds': 123,
'AnswerKey': 'string',
'RetryDelayInSeconds': 123,
'IsRequestable': True|False,
'AutoGranted': True|False,
'AutoGrantedValue': 123
}
}
**Response Structure**
- *(dict) --*
- **QualificationType** *(dict) --*
The created Qualification type, returned as a QualificationType data structure.
- **QualificationTypeId** *(string) --*
A unique identifier for the Qualification type. A Qualification type is given a Qualification type ID when you call the CreateQualificationType operation.
- **CreationTime** *(datetime) --*
The date and time the Qualification type was created.
- **Name** *(string) --*
The name of the Qualification type. The type name is used to identify the type, and to find the type using a Qualification type search.
- **Description** *(string) --*
A long description for the Qualification type.
- **Keywords** *(string) --*
One or more words or phrases that describe theQualification type, separated by commas. The Keywords make the type easier to find using a search.
- **QualificationTypeStatus** *(string) --*
The status of the Qualification type. A Qualification type's status determines if users can apply to receive a Qualification of this type, and if HITs can be created with requirements based on this type. Valid values are Active | Inactive.
- **Test** *(string) --*
The questions for a Qualification test associated with this Qualification type that a user can take to obtain a Qualification of this type. This parameter must be specified if AnswerKey is present. A Qualification type cannot have both a specified Test parameter and an AutoGranted value of true.
- **TestDurationInSeconds** *(integer) --*
The amount of time, in seconds, given to a Worker to complete the Qualification test, beginning from the time the Worker requests the Qualification.
- **AnswerKey** *(string) --*
The answers to the Qualification test specified in the Test parameter.
- **RetryDelayInSeconds** *(integer) --*
The amount of time, in seconds, Workers must wait after taking the Qualification test before they can take it again. Workers can take a Qualification test multiple times if they were not granted the Qualification from a previous attempt, or if the test offers a gradient score and they want a better score. If not specified, retries are disabled and Workers can request a Qualification only once.
- **IsRequestable** *(boolean) --*
Specifies whether the Qualification type is one that a user can request through the Amazon Mechanical Turk web site, such as by taking a Qualification test. This value is False for Qualifications assigned automatically by the system. Valid values are True | False.
- **AutoGranted** *(boolean) --*
Specifies that requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test. Valid values are True | False.
- **AutoGrantedValue** *(integer) --*
The Qualification integer value to use for automatically granted Qualifications, if AutoGranted is true. This is 1 by default.
:type Name: string
:param Name: **[REQUIRED]**
The name you give to the Qualification type. The type name is used to represent the Qualification to Workers, and to find the type using a Qualification type search. It must be unique across all of your Qualification types.
:type Keywords: string
:param Keywords:
One or more words or phrases that describe the Qualification type, separated by commas. The keywords of a type make the type easier to find during a search.
:type Description: string
:param Description: **[REQUIRED]**
A long description for the Qualification type. On the Amazon Mechanical Turk website, the long description is displayed when a Worker examines a Qualification type.
:type QualificationTypeStatus: string
:param QualificationTypeStatus: **[REQUIRED]**
The initial status of the Qualification type.
Constraints: Valid values are: Active | Inactive
:type RetryDelayInSeconds: integer
:param RetryDelayInSeconds:
The number of seconds that a Worker must wait after requesting a Qualification of the Qualification type before the worker can retry the Qualification request.
Constraints: None. If not specified, retries are disabled and Workers can request a Qualification of this type only once, even if the Worker has not been granted the Qualification. It is not possible to disable retries for a Qualification type after it has been created with retries enabled. If you want to disable retries, you must delete existing retry-enabled Qualification type and then create a new Qualification type with retries disabled.
:type Test: string
:param Test:
The questions for the Qualification test a Worker must answer correctly to obtain a Qualification of this type. If this parameter is specified, ``TestDurationInSeconds`` must also be specified.
Constraints: Must not be longer than 65535 bytes. Must be a QuestionForm data structure. This parameter cannot be specified if AutoGranted is true.
Constraints: None. If not specified, the Worker may request the Qualification without answering any questions.
:type AnswerKey: string
:param AnswerKey:
The answers to the Qualification test specified in the Test parameter, in the form of an AnswerKey data structure.
Constraints: Must not be longer than 65535 bytes.
Constraints: None. If not specified, you must process Qualification requests manually.
:type TestDurationInSeconds: integer
:param TestDurationInSeconds:
The number of seconds the Worker has to complete the Qualification test, starting from the time the Worker requests the Qualification.
:type AutoGranted: boolean
:param AutoGranted:
Specifies whether requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test.
Constraints: If the Test parameter is specified, this parameter cannot be true.
:type AutoGrantedValue: integer
:param AutoGrantedValue:
The Qualification value to use for automatically granted Qualifications. This parameter is used only if the AutoGranted parameter is true.
:rtype: dict
:returns:
"""
pass
def create_worker_block(self, WorkerId: str, Reason: str) -> Dict:
"""
The ``CreateWorkerBlock`` operation allows you to prevent a Worker from working on your HITs. For example, you can block a Worker who is producing poor quality work. You can block up to 100,000 Workers.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/CreateWorkerBlock>`_
**Request Syntax**
::
response = client.create_worker_block(
WorkerId='string',
Reason='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type WorkerId: string
:param WorkerId: **[REQUIRED]**
The ID of the Worker to block.
:type Reason: string
:param Reason: **[REQUIRED]**
A message explaining the reason for blocking the Worker. This parameter enables you to keep track of your Workers. The Worker does not see this message.
:rtype: dict
:returns:
"""
pass
def delete_hit(self, HITId: str) -> Dict:
"""
The ``DeleteHIT`` operation is used to delete HIT that is no longer needed. Only the Requester who created the HIT can delete it.
You can only dispose of HITs that are in the ``Reviewable`` state, with all of their submitted assignments already either approved or rejected. If you call the DeleteHIT operation on a HIT that is not in the ``Reviewable`` state (for example, that has not expired, or still has active assignments), or on a HIT that is Reviewable but without all of its submitted assignments already approved or rejected, the service will return an error.
.. note::
* HITs are automatically disposed of after 120 days.
* After you dispose of a HIT, you can no longer approve the HIT's rejected assignments.
* Disposed HITs are not returned in results for the ListHITs operation.
* Disposing HITs can improve the performance of operations such as ListReviewableHITs and ListHITs.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/DeleteHIT>`_
**Request Syntax**
::
response = client.delete_hit(
HITId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type HITId: string
:param HITId: **[REQUIRED]**
The ID of the HIT to be deleted.
:rtype: dict
:returns:
"""
pass
def delete_qualification_type(self, QualificationTypeId: str) -> Dict:
"""
The ``DeleteQualificationType`` deletes a Qualification type and deletes any HIT types that are associated with the Qualification type.
This operation does not revoke Qualifications already assigned to Workers because the Qualifications might be needed for active HITs. If there are any pending requests for the Qualification type, Amazon Mechanical Turk rejects those requests. After you delete a Qualification type, you can no longer use it to create HITs or HIT types.
.. note::
DeleteQualificationType must wait for all the HITs that use the deleted Qualification type to be deleted before completing. It may take up to 48 hours before DeleteQualificationType completes and the unique name of the Qualification type is available for reuse with CreateQualificationType.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/DeleteQualificationType>`_
**Request Syntax**
::
response = client.delete_qualification_type(
QualificationTypeId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type QualificationTypeId: string
:param QualificationTypeId: **[REQUIRED]**
The ID of the QualificationType to dispose.
:rtype: dict
:returns:
"""
pass
def delete_worker_block(self, WorkerId: str, Reason: str = None) -> Dict:
"""
The ``DeleteWorkerBlock`` operation allows you to reinstate a blocked Worker to work on your HITs. This operation reverses the effects of the CreateWorkerBlock operation. You need the Worker ID to use this operation. If the Worker ID is missing or invalid, this operation fails and returns the message “WorkerId is invalid.” If the specified Worker is not blocked, this operation returns successfully.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/DeleteWorkerBlock>`_
**Request Syntax**
::
response = client.delete_worker_block(
WorkerId='string',
Reason='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type WorkerId: string
:param WorkerId: **[REQUIRED]**
The ID of the Worker to unblock.
:type Reason: string
:param Reason:
A message that explains the reason for unblocking the Worker. The Worker does not see this message.
:rtype: dict
:returns:
"""
pass
def disassociate_qualification_from_worker(self, WorkerId: str, QualificationTypeId: str, Reason: str = None) -> Dict:
"""
The ``DisassociateQualificationFromWorker`` revokes a previously granted Qualification from a user.
You can provide a text message explaining why the Qualification was revoked. The user who had the Qualification can see this message.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/DisassociateQualificationFromWorker>`_
**Request Syntax**
::
response = client.disassociate_qualification_from_worker(
WorkerId='string',
QualificationTypeId='string',
Reason='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type WorkerId: string
:param WorkerId: **[REQUIRED]**
The ID of the Worker who possesses the Qualification to be revoked.
:type QualificationTypeId: string
:param QualificationTypeId: **[REQUIRED]**
The ID of the Qualification type of the Qualification to be revoked.
:type Reason: string
:param Reason:
A text message that explains why the Qualification was revoked. The user who had the Qualification sees this message.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_account_balance(self) -> Dict:
"""
The ``GetAccountBalance`` operation retrieves the amount of money in your Amazon Mechanical Turk account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/GetAccountBalance>`_
**Request Syntax**
::
response = client.get_account_balance()
**Response Syntax**
::
{
'AvailableBalance': 'string',
'OnHoldBalance': 'string'
}
**Response Structure**
- *(dict) --*
- **AvailableBalance** *(string) --*
A string representing a currency amount.
- **OnHoldBalance** *(string) --*
A string representing a currency amount.
:rtype: dict
:returns:
"""
pass
def get_assignment(self, AssignmentId: str) -> Dict:
"""
The ``GetAssignment`` operation retrieves the details of the specified Assignment.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/GetAssignment>`_
**Request Syntax**
::
response = client.get_assignment(
AssignmentId='string'
)
**Response Syntax**
::
{
'Assignment': {
'AssignmentId': 'string',
'WorkerId': 'string',
'HITId': 'string',
'AssignmentStatus': 'Submitted'|'Approved'|'Rejected',
'AutoApprovalTime': datetime(2015, 1, 1),
'AcceptTime': datetime(2015, 1, 1),
'SubmitTime': datetime(2015, 1, 1),
'ApprovalTime': datetime(2015, 1, 1),
'RejectionTime': datetime(2015, 1, 1),
'Deadline': datetime(2015, 1, 1),
'Answer': 'string',
'RequesterFeedback': 'string'
},
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
**Response Structure**
- *(dict) --*
- **Assignment** *(dict) --*
The assignment. The response includes one Assignment element.
- **AssignmentId** *(string) --*
A unique identifier for the assignment.
- **WorkerId** *(string) --*
The ID of the Worker who accepted the HIT.
- **HITId** *(string) --*
The ID of the HIT.
- **AssignmentStatus** *(string) --*
The status of the assignment.
- **AutoApprovalTime** *(datetime) --*
If results have been submitted, AutoApprovalTime is the date and time the results of the assignment results are considered Approved automatically if they have not already been explicitly approved or rejected by the Requester. This value is derived from the auto-approval delay specified by the Requester in the HIT. This value is omitted from the assignment if the Worker has not yet submitted results.
- **AcceptTime** *(datetime) --*
The date and time the Worker accepted the assignment.
- **SubmitTime** *(datetime) --*
If the Worker has submitted results, SubmitTime is the date and time the assignment was submitted. This value is omitted from the assignment if the Worker has not yet submitted results.
- **ApprovalTime** *(datetime) --*
If the Worker has submitted results and the Requester has approved the results, ApprovalTime is the date and time the Requester approved the results. This value is omitted from the assignment if the Requester has not yet approved the results.
- **RejectionTime** *(datetime) --*
If the Worker has submitted results and the Requester has rejected the results, RejectionTime is the date and time the Requester rejected the results.
- **Deadline** *(datetime) --*
The date and time of the deadline for the assignment. This value is derived from the deadline specification for the HIT and the date and time the Worker accepted the HIT.
- **Answer** *(string) --*
The Worker's answers submitted for the HIT contained in a QuestionFormAnswers document, if the Worker provides an answer. If the Worker does not provide any answers, Answer may contain a QuestionFormAnswers document, or Answer may be empty.
- **RequesterFeedback** *(string) --*
The feedback string included with the call to the ApproveAssignment operation or the RejectAssignment operation, if the Requester approved or rejected the assignment and specified feedback.
- **HIT** *(dict) --*
The HIT associated with this assignment. The response includes one HIT element.
- **HITId** *(string) --*
A unique identifier for the HIT.
- **HITTypeId** *(string) --*
The ID of the HIT type of this HIT
- **HITGroupId** *(string) --*
The ID of the HIT Group of this HIT.
- **HITLayoutId** *(string) --*
The ID of the HIT Layout of this HIT.
- **CreationTime** *(datetime) --*
The date and time the HIT was created.
- **Title** *(string) --*
The title of the HIT.
- **Description** *(string) --*
A general description of the HIT.
- **Question** *(string) --*
The data the Worker completing the HIT uses produce the results. This is either either a QuestionForm, HTMLQuestion or an ExternalQuestion data structure.
- **Keywords** *(string) --*
One or more words or phrases that describe the HIT, separated by commas. Search terms similar to the keywords of a HIT are more likely to have the HIT in the search results.
- **HITStatus** *(string) --*
The status of the HIT and its assignments. Valid Values are Assignable | Unassignable | Reviewable | Reviewing | Disposed.
- **MaxAssignments** *(integer) --*
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
- **Reward** *(string) --*
A string representing a currency amount.
- **AutoApprovalDelayInSeconds** *(integer) --*
The amount of time, in seconds, after the Worker submits an assignment for the HIT that the results are automatically approved by Amazon Mechanical Turk. This is the amount of time the Requester has to reject an assignment submitted by a Worker before the assignment is auto-approved and the Worker is paid.
- **Expiration** *(datetime) --*
The date and time the HIT expires.
- **AssignmentDurationInSeconds** *(integer) --*
The length of time, in seconds, that a Worker has to complete the HIT after accepting it.
- **RequesterAnnotation** *(string) --*
An arbitrary data field the Requester who created the HIT can use. This field is visible only to the creator of the HIT.
- **QualificationRequirements** *(list) --*
Conditions that a Worker's Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --*
The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include "Accept" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), "PreviewAndAccept" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and "DiscoverPreviewAndAccept" (Worker cannot accept, preview, or see the HIT in their search results). It's possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
- **HITReviewStatus** *(string) --*
Indicates the review status of the HIT. Valid Values are NotReviewed | MarkedForReview | ReviewedAppropriate | ReviewedInappropriate.
- **NumberOfAssignmentsPending** *(integer) --*
The number of assignments for this HIT that are being previewed or have been accepted by Workers, but have not yet been submitted, returned, or abandoned.
- **NumberOfAssignmentsAvailable** *(integer) --*
The number of assignments for this HIT that are available for Workers to accept.
- **NumberOfAssignmentsCompleted** *(integer) --*
The number of assignments for this HIT that have been approved or rejected.
:type AssignmentId: string
:param AssignmentId: **[REQUIRED]**
The ID of the Assignment to be retrieved.
:rtype: dict
:returns:
"""
pass
def get_file_upload_url(self, AssignmentId: str, QuestionIdentifier: str) -> Dict:
"""
The ``GetFileUploadURL`` operation generates and returns a temporary URL. You use the temporary URL to retrieve a file uploaded by a Worker as an answer to a FileUploadAnswer question for a HIT. The temporary URL is generated the instant the GetFileUploadURL operation is called, and is valid for 60 seconds. You can get a temporary file upload URL any time until the HIT is disposed. After the HIT is disposed, any uploaded files are deleted, and cannot be retrieved. Pending Deprecation on December 12, 2017. The Answer Specification structure will no longer support the ``FileUploadAnswer`` element to be used for the QuestionForm data structure. Instead, we recommend that Requesters who want to create HITs asking Workers to upload files to use Amazon S3.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/GetFileUploadURL>`_
**Request Syntax**
::
response = client.get_file_upload_url(
AssignmentId='string',
QuestionIdentifier='string'
)
**Response Syntax**
::
{
'FileUploadURL': 'string'
}
**Response Structure**
- *(dict) --*
- **FileUploadURL** *(string) --*
A temporary URL for the file that the Worker uploaded for the answer.
:type AssignmentId: string
:param AssignmentId: **[REQUIRED]**
The ID of the assignment that contains the question with a FileUploadAnswer.
:type QuestionIdentifier: string
:param QuestionIdentifier: **[REQUIRED]**
The identifier of the question with a FileUploadAnswer, as specified in the QuestionForm of the HIT.
:rtype: dict
:returns:
"""
pass
def get_hit(self, HITId: str) -> Dict:
"""
The ``GetHIT`` operation retrieves the details of the specified HIT.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/GetHIT>`_
**Request Syntax**
::
response = client.get_hit(
HITId='string'
)
**Response Syntax**
::
{
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
**Response Structure**
- *(dict) --*
- **HIT** *(dict) --*
Contains the requested HIT data.
- **HITId** *(string) --*
A unique identifier for the HIT.
- **HITTypeId** *(string) --*
The ID of the HIT type of this HIT
- **HITGroupId** *(string) --*
The ID of the HIT Group of this HIT.
- **HITLayoutId** *(string) --*
The ID of the HIT Layout of this HIT.
- **CreationTime** *(datetime) --*
The date and time the HIT was created.
- **Title** *(string) --*
The title of the HIT.
- **Description** *(string) --*
A general description of the HIT.
- **Question** *(string) --*
The data the Worker completing the HIT uses produce the results. This is either either a QuestionForm, HTMLQuestion or an ExternalQuestion data structure.
- **Keywords** *(string) --*
One or more words or phrases that describe the HIT, separated by commas. Search terms similar to the keywords of a HIT are more likely to have the HIT in the search results.
- **HITStatus** *(string) --*
The status of the HIT and its assignments. Valid Values are Assignable | Unassignable | Reviewable | Reviewing | Disposed.
- **MaxAssignments** *(integer) --*
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
- **Reward** *(string) --*
A string representing a currency amount.
- **AutoApprovalDelayInSeconds** *(integer) --*
The amount of time, in seconds, after the Worker submits an assignment for the HIT that the results are automatically approved by Amazon Mechanical Turk. This is the amount of time the Requester has to reject an assignment submitted by a Worker before the assignment is auto-approved and the Worker is paid.
- **Expiration** *(datetime) --*
The date and time the HIT expires.
- **AssignmentDurationInSeconds** *(integer) --*
The length of time, in seconds, that a Worker has to complete the HIT after accepting it.
- **RequesterAnnotation** *(string) --*
An arbitrary data field the Requester who created the HIT can use. This field is visible only to the creator of the HIT.
- **QualificationRequirements** *(list) --*
Conditions that a Worker's Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --*
The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include "Accept" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), "PreviewAndAccept" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and "DiscoverPreviewAndAccept" (Worker cannot accept, preview, or see the HIT in their search results). It's possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
- **HITReviewStatus** *(string) --*
Indicates the review status of the HIT. Valid Values are NotReviewed | MarkedForReview | ReviewedAppropriate | ReviewedInappropriate.
- **NumberOfAssignmentsPending** *(integer) --*
The number of assignments for this HIT that are being previewed or have been accepted by Workers, but have not yet been submitted, returned, or abandoned.
- **NumberOfAssignmentsAvailable** *(integer) --*
The number of assignments for this HIT that are available for Workers to accept.
- **NumberOfAssignmentsCompleted** *(integer) --*
The number of assignments for this HIT that have been approved or rejected.
:type HITId: string
:param HITId: **[REQUIRED]**
The ID of the HIT to be retrieved.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_qualification_score(self, QualificationTypeId: str, WorkerId: str) -> Dict:
"""
The ``GetQualificationScore`` operation returns the value of a Worker's Qualification for a given Qualification type.
To get a Worker's Qualification, you must know the Worker's ID. The Worker's ID is included in the assignment data returned by the ``ListAssignmentsForHIT`` operation.
Only the owner of a Qualification type can query the value of a Worker's Qualification of that type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/GetQualificationScore>`_
**Request Syntax**
::
response = client.get_qualification_score(
QualificationTypeId='string',
WorkerId='string'
)
**Response Syntax**
::
{
'Qualification': {
'QualificationTypeId': 'string',
'WorkerId': 'string',
'GrantTime': datetime(2015, 1, 1),
'IntegerValue': 123,
'LocaleValue': {
'Country': 'string',
'Subdivision': 'string'
},
'Status': 'Granted'|'Revoked'
}
}
**Response Structure**
- *(dict) --*
- **Qualification** *(dict) --*
The Qualification data structure of the Qualification assigned to a user, including the Qualification type and the value (score).
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the Qualification.
- **WorkerId** *(string) --*
The ID of the Worker who possesses the Qualification.
- **GrantTime** *(datetime) --*
The date and time the Qualification was granted to the Worker. If the Worker's Qualification was revoked, and then re-granted based on a new Qualification request, GrantTime is the date and time of the last call to the AcceptQualificationRequest operation.
- **IntegerValue** *(integer) --*
The value (score) of the Qualification, if the Qualification has an integer value.
- **LocaleValue** *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **Status** *(string) --*
The status of the Qualification. Valid values are Granted | Revoked.
:type QualificationTypeId: string
:param QualificationTypeId: **[REQUIRED]**
The ID of the QualificationType.
:type WorkerId: string
:param WorkerId: **[REQUIRED]**
The ID of the Worker whose Qualification is being updated.
:rtype: dict
:returns:
"""
pass
def get_qualification_type(self, QualificationTypeId: str) -> Dict:
"""
The ``GetQualificationType`` operation retrieves information about a Qualification type using its ID.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/GetQualificationType>`_
**Request Syntax**
::
response = client.get_qualification_type(
QualificationTypeId='string'
)
**Response Syntax**
::
{
'QualificationType': {
'QualificationTypeId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Name': 'string',
'Description': 'string',
'Keywords': 'string',
'QualificationTypeStatus': 'Active'|'Inactive',
'Test': 'string',
'TestDurationInSeconds': 123,
'AnswerKey': 'string',
'RetryDelayInSeconds': 123,
'IsRequestable': True|False,
'AutoGranted': True|False,
'AutoGrantedValue': 123
}
}
**Response Structure**
- *(dict) --*
- **QualificationType** *(dict) --*
The returned Qualification Type
- **QualificationTypeId** *(string) --*
A unique identifier for the Qualification type. A Qualification type is given a Qualification type ID when you call the CreateQualificationType operation.
- **CreationTime** *(datetime) --*
The date and time the Qualification type was created.
- **Name** *(string) --*
The name of the Qualification type. The type name is used to identify the type, and to find the type using a Qualification type search.
- **Description** *(string) --*
A long description for the Qualification type.
- **Keywords** *(string) --*
One or more words or phrases that describe theQualification type, separated by commas. The Keywords make the type easier to find using a search.
- **QualificationTypeStatus** *(string) --*
The status of the Qualification type. A Qualification type's status determines if users can apply to receive a Qualification of this type, and if HITs can be created with requirements based on this type. Valid values are Active | Inactive.
- **Test** *(string) --*
The questions for a Qualification test associated with this Qualification type that a user can take to obtain a Qualification of this type. This parameter must be specified if AnswerKey is present. A Qualification type cannot have both a specified Test parameter and an AutoGranted value of true.
- **TestDurationInSeconds** *(integer) --*
The amount of time, in seconds, given to a Worker to complete the Qualification test, beginning from the time the Worker requests the Qualification.
- **AnswerKey** *(string) --*
The answers to the Qualification test specified in the Test parameter.
- **RetryDelayInSeconds** *(integer) --*
The amount of time, in seconds, Workers must wait after taking the Qualification test before they can take it again. Workers can take a Qualification test multiple times if they were not granted the Qualification from a previous attempt, or if the test offers a gradient score and they want a better score. If not specified, retries are disabled and Workers can request a Qualification only once.
- **IsRequestable** *(boolean) --*
Specifies whether the Qualification type is one that a user can request through the Amazon Mechanical Turk web site, such as by taking a Qualification test. This value is False for Qualifications assigned automatically by the system. Valid values are True | False.
- **AutoGranted** *(boolean) --*
Specifies that requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test. Valid values are True | False.
- **AutoGrantedValue** *(integer) --*
The Qualification integer value to use for automatically granted Qualifications, if AutoGranted is true. This is 1 by default.
:type QualificationTypeId: string
:param QualificationTypeId: **[REQUIRED]**
The ID of the QualificationType.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_assignments_for_hit(self, HITId: str, NextToken: str = None, MaxResults: int = None, AssignmentStatuses: List = None) -> Dict:
"""
The ``ListAssignmentsForHIT`` operation retrieves completed assignments for a HIT. You can use this operation to retrieve the results for a HIT.
You can get assignments for a HIT at any time, even if the HIT is not yet Reviewable. If a HIT requested multiple assignments, and has received some results but has not yet become Reviewable, you can still retrieve the partial results with this operation.
Use the AssignmentStatus parameter to control which set of assignments for a HIT are returned. The ListAssignmentsForHIT operation can return submitted assignments awaiting approval, or it can return assignments that have already been approved or rejected. You can set AssignmentStatus=Approved,Rejected to get assignments that have already been approved and rejected together in one result set.
Only the Requester who created the HIT can retrieve the assignments for that HIT.
Results are sorted and divided into numbered pages and the operation returns a single page of results. You can use the parameters of the operation to control sorting and pagination.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListAssignmentsForHIT>`_
**Request Syntax**
::
response = client.list_assignments_for_hit(
HITId='string',
NextToken='string',
MaxResults=123,
AssignmentStatuses=[
'Submitted'|'Approved'|'Rejected',
]
)
**Response Syntax**
::
{
'NextToken': 'string',
'NumResults': 123,
'Assignments': [
{
'AssignmentId': 'string',
'WorkerId': 'string',
'HITId': 'string',
'AssignmentStatus': 'Submitted'|'Approved'|'Rejected',
'AutoApprovalTime': datetime(2015, 1, 1),
'AcceptTime': datetime(2015, 1, 1),
'SubmitTime': datetime(2015, 1, 1),
'ApprovalTime': datetime(2015, 1, 1),
'RejectionTime': datetime(2015, 1, 1),
'Deadline': datetime(2015, 1, 1),
'Answer': 'string',
'RequesterFeedback': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **NumResults** *(integer) --*
The number of assignments on the page in the filtered results list, equivalent to the number of assignments returned by this call.
- **Assignments** *(list) --*
The collection of Assignment data structures returned by this call.
- *(dict) --*
The Assignment data structure represents a single assignment of a HIT to a Worker. The assignment tracks the Worker's efforts to complete the HIT, and contains the results for later retrieval.
- **AssignmentId** *(string) --*
A unique identifier for the assignment.
- **WorkerId** *(string) --*
The ID of the Worker who accepted the HIT.
- **HITId** *(string) --*
The ID of the HIT.
- **AssignmentStatus** *(string) --*
The status of the assignment.
- **AutoApprovalTime** *(datetime) --*
If results have been submitted, AutoApprovalTime is the date and time the results of the assignment results are considered Approved automatically if they have not already been explicitly approved or rejected by the Requester. This value is derived from the auto-approval delay specified by the Requester in the HIT. This value is omitted from the assignment if the Worker has not yet submitted results.
- **AcceptTime** *(datetime) --*
The date and time the Worker accepted the assignment.
- **SubmitTime** *(datetime) --*
If the Worker has submitted results, SubmitTime is the date and time the assignment was submitted. This value is omitted from the assignment if the Worker has not yet submitted results.
- **ApprovalTime** *(datetime) --*
If the Worker has submitted results and the Requester has approved the results, ApprovalTime is the date and time the Requester approved the results. This value is omitted from the assignment if the Requester has not yet approved the results.
- **RejectionTime** *(datetime) --*
If the Worker has submitted results and the Requester has rejected the results, RejectionTime is the date and time the Requester rejected the results.
- **Deadline** *(datetime) --*
The date and time of the deadline for the assignment. This value is derived from the deadline specification for the HIT and the date and time the Worker accepted the HIT.
- **Answer** *(string) --*
The Worker's answers submitted for the HIT contained in a QuestionFormAnswers document, if the Worker provides an answer. If the Worker does not provide any answers, Answer may contain a QuestionFormAnswers document, or Answer may be empty.
- **RequesterFeedback** *(string) --*
The feedback string included with the call to the ApproveAssignment operation or the RejectAssignment operation, if the Requester approved or rejected the assignment and specified feedback.
:type HITId: string
:param HITId: **[REQUIRED]**
The ID of the HIT.
:type NextToken: string
:param NextToken:
Pagination token
:type MaxResults: integer
:param MaxResults:
:type AssignmentStatuses: list
:param AssignmentStatuses:
The status of the assignments to return: Submitted | Approved | Rejected
- *(string) --*
:rtype: dict
:returns:
"""
pass
def list_bonus_payments(self, HITId: str = None, AssignmentId: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListBonusPayments`` operation retrieves the amounts of bonuses you have paid to Workers for a given HIT or assignment.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListBonusPayments>`_
**Request Syntax**
::
response = client.list_bonus_payments(
HITId='string',
AssignmentId='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NumResults': 123,
'NextToken': 'string',
'BonusPayments': [
{
'WorkerId': 'string',
'BonusAmount': 'string',
'AssignmentId': 'string',
'Reason': 'string',
'GrantTime': datetime(2015, 1, 1)
},
]
}
**Response Structure**
- *(dict) --*
- **NumResults** *(integer) --*
The number of bonus payments on this page in the filtered results list, equivalent to the number of bonus payments being returned by this call.
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **BonusPayments** *(list) --*
A successful request to the ListBonusPayments operation returns a list of BonusPayment objects.
- *(dict) --*
An object representing a Bonus payment paid to a Worker.
- **WorkerId** *(string) --*
The ID of the Worker to whom the bonus was paid.
- **BonusAmount** *(string) --*
A string representing a currency amount.
- **AssignmentId** *(string) --*
The ID of the assignment associated with this bonus payment.
- **Reason** *(string) --*
The Reason text given when the bonus was granted, if any.
- **GrantTime** *(datetime) --*
The date and time of when the bonus was granted.
:type HITId: string
:param HITId:
The ID of the HIT associated with the bonus payments to retrieve. If not specified, all bonus payments for all assignments for the given HIT are returned. Either the HITId parameter or the AssignmentId parameter must be specified
:type AssignmentId: string
:param AssignmentId:
The ID of the assignment associated with the bonus payments to retrieve. If specified, only bonus payments for the given assignment are returned. Either the HITId parameter or the AssignmentId parameter must be specified
:type NextToken: string
:param NextToken:
Pagination token
:type MaxResults: integer
:param MaxResults:
:rtype: dict
:returns:
"""
pass
def list_hits(self, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListHITs`` operation returns all of a Requester's HITs. The operation returns HITs of any status, except for HITs that have been deleted of with the DeleteHIT operation or that have been auto-deleted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListHITs>`_
**Request Syntax**
::
response = client.list_hits(
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NextToken': 'string',
'NumResults': 123,
'HITs': [
{
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **NumResults** *(integer) --*
The number of HITs on this page in the filtered results list, equivalent to the number of HITs being returned by this call.
- **HITs** *(list) --*
The list of HIT elements returned by the query.
- *(dict) --*
The HIT data structure represents a single HIT, including all the information necessary for a Worker to accept and complete the HIT.
- **HITId** *(string) --*
A unique identifier for the HIT.
- **HITTypeId** *(string) --*
The ID of the HIT type of this HIT
- **HITGroupId** *(string) --*
The ID of the HIT Group of this HIT.
- **HITLayoutId** *(string) --*
The ID of the HIT Layout of this HIT.
- **CreationTime** *(datetime) --*
The date and time the HIT was created.
- **Title** *(string) --*
The title of the HIT.
- **Description** *(string) --*
A general description of the HIT.
- **Question** *(string) --*
The data the Worker completing the HIT uses produce the results. This is either either a QuestionForm, HTMLQuestion or an ExternalQuestion data structure.
- **Keywords** *(string) --*
One or more words or phrases that describe the HIT, separated by commas. Search terms similar to the keywords of a HIT are more likely to have the HIT in the search results.
- **HITStatus** *(string) --*
The status of the HIT and its assignments. Valid Values are Assignable | Unassignable | Reviewable | Reviewing | Disposed.
- **MaxAssignments** *(integer) --*
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
- **Reward** *(string) --*
A string representing a currency amount.
- **AutoApprovalDelayInSeconds** *(integer) --*
The amount of time, in seconds, after the Worker submits an assignment for the HIT that the results are automatically approved by Amazon Mechanical Turk. This is the amount of time the Requester has to reject an assignment submitted by a Worker before the assignment is auto-approved and the Worker is paid.
- **Expiration** *(datetime) --*
The date and time the HIT expires.
- **AssignmentDurationInSeconds** *(integer) --*
The length of time, in seconds, that a Worker has to complete the HIT after accepting it.
- **RequesterAnnotation** *(string) --*
An arbitrary data field the Requester who created the HIT can use. This field is visible only to the creator of the HIT.
- **QualificationRequirements** *(list) --*
Conditions that a Worker's Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --*
The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include "Accept" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), "PreviewAndAccept" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and "DiscoverPreviewAndAccept" (Worker cannot accept, preview, or see the HIT in their search results). It's possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
- **HITReviewStatus** *(string) --*
Indicates the review status of the HIT. Valid Values are NotReviewed | MarkedForReview | ReviewedAppropriate | ReviewedInappropriate.
- **NumberOfAssignmentsPending** *(integer) --*
The number of assignments for this HIT that are being previewed or have been accepted by Workers, but have not yet been submitted, returned, or abandoned.
- **NumberOfAssignmentsAvailable** *(integer) --*
The number of assignments for this HIT that are available for Workers to accept.
- **NumberOfAssignmentsCompleted** *(integer) --*
The number of assignments for this HIT that have been approved or rejected.
:type NextToken: string
:param NextToken:
Pagination token
:type MaxResults: integer
:param MaxResults:
:rtype: dict
:returns:
"""
pass
def list_hits_for_qualification_type(self, QualificationTypeId: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListHITsForQualificationType`` operation returns the HITs that use the given Qualification type for a Qualification requirement. The operation returns HITs of any status, except for HITs that have been deleted with the ``DeleteHIT`` operation or that have been auto-deleted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListHITsForQualificationType>`_
**Request Syntax**
::
response = client.list_hits_for_qualification_type(
QualificationTypeId='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NextToken': 'string',
'NumResults': 123,
'HITs': [
{
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **NumResults** *(integer) --*
The number of HITs on this page in the filtered results list, equivalent to the number of HITs being returned by this call.
- **HITs** *(list) --*
The list of HIT elements returned by the query.
- *(dict) --*
The HIT data structure represents a single HIT, including all the information necessary for a Worker to accept and complete the HIT.
- **HITId** *(string) --*
A unique identifier for the HIT.
- **HITTypeId** *(string) --*
The ID of the HIT type of this HIT
- **HITGroupId** *(string) --*
The ID of the HIT Group of this HIT.
- **HITLayoutId** *(string) --*
The ID of the HIT Layout of this HIT.
- **CreationTime** *(datetime) --*
The date and time the HIT was created.
- **Title** *(string) --*
The title of the HIT.
- **Description** *(string) --*
A general description of the HIT.
- **Question** *(string) --*
The data the Worker completing the HIT uses produce the results. This is either either a QuestionForm, HTMLQuestion or an ExternalQuestion data structure.
- **Keywords** *(string) --*
One or more words or phrases that describe the HIT, separated by commas. Search terms similar to the keywords of a HIT are more likely to have the HIT in the search results.
- **HITStatus** *(string) --*
The status of the HIT and its assignments. Valid Values are Assignable | Unassignable | Reviewable | Reviewing | Disposed.
- **MaxAssignments** *(integer) --*
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
- **Reward** *(string) --*
A string representing a currency amount.
- **AutoApprovalDelayInSeconds** *(integer) --*
The amount of time, in seconds, after the Worker submits an assignment for the HIT that the results are automatically approved by Amazon Mechanical Turk. This is the amount of time the Requester has to reject an assignment submitted by a Worker before the assignment is auto-approved and the Worker is paid.
- **Expiration** *(datetime) --*
The date and time the HIT expires.
- **AssignmentDurationInSeconds** *(integer) --*
The length of time, in seconds, that a Worker has to complete the HIT after accepting it.
- **RequesterAnnotation** *(string) --*
An arbitrary data field the Requester who created the HIT can use. This field is visible only to the creator of the HIT.
- **QualificationRequirements** *(list) --*
Conditions that a Worker's Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --*
The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include "Accept" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), "PreviewAndAccept" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and "DiscoverPreviewAndAccept" (Worker cannot accept, preview, or see the HIT in their search results). It's possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
- **HITReviewStatus** *(string) --*
Indicates the review status of the HIT. Valid Values are NotReviewed | MarkedForReview | ReviewedAppropriate | ReviewedInappropriate.
- **NumberOfAssignmentsPending** *(integer) --*
The number of assignments for this HIT that are being previewed or have been accepted by Workers, but have not yet been submitted, returned, or abandoned.
- **NumberOfAssignmentsAvailable** *(integer) --*
The number of assignments for this HIT that are available for Workers to accept.
- **NumberOfAssignmentsCompleted** *(integer) --*
The number of assignments for this HIT that have been approved or rejected.
:type QualificationTypeId: string
:param QualificationTypeId: **[REQUIRED]**
The ID of the Qualification type to use when querying HITs.
:type NextToken: string
:param NextToken:
Pagination Token
:type MaxResults: integer
:param MaxResults:
Limit the number of results returned.
:rtype: dict
:returns:
"""
pass
def list_qualification_requests(self, QualificationTypeId: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListQualificationRequests`` operation retrieves requests for Qualifications of a particular Qualification type. The owner of the Qualification type calls this operation to poll for pending requests, and accepts them using the AcceptQualification operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListQualificationRequests>`_
**Request Syntax**
::
response = client.list_qualification_requests(
QualificationTypeId='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NumResults': 123,
'NextToken': 'string',
'QualificationRequests': [
{
'QualificationRequestId': 'string',
'QualificationTypeId': 'string',
'WorkerId': 'string',
'Test': 'string',
'Answer': 'string',
'SubmitTime': datetime(2015, 1, 1)
},
]
}
**Response Structure**
- *(dict) --*
- **NumResults** *(integer) --*
The number of Qualification requests on this page in the filtered results list, equivalent to the number of Qualification requests being returned by this call.
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **QualificationRequests** *(list) --*
The Qualification request. The response includes one QualificationRequest element for each Qualification request returned by the query.
- *(dict) --*
The QualificationRequest data structure represents a request a Worker has made for a Qualification.
- **QualificationRequestId** *(string) --*
The ID of the Qualification request, a unique identifier generated when the request was submitted.
- **QualificationTypeId** *(string) --*
The ID of the Qualification type the Worker is requesting, as returned by the CreateQualificationType operation.
- **WorkerId** *(string) --*
The ID of the Worker requesting the Qualification.
- **Test** *(string) --*
The contents of the Qualification test that was presented to the Worker, if the type has a test and the Worker has submitted answers. This value is identical to the QuestionForm associated with the Qualification type at the time the Worker requests the Qualification.
- **Answer** *(string) --*
The Worker's answers for the Qualification type's test contained in a QuestionFormAnswers document, if the type has a test and the Worker has submitted answers. If the Worker does not provide any answers, Answer may be empty.
- **SubmitTime** *(datetime) --*
The date and time the Qualification request had a status of Submitted. This is either the time the Worker submitted answers for a Qualification test, or the time the Worker requested the Qualification if the Qualification type does not have a test.
:type QualificationTypeId: string
:param QualificationTypeId:
The ID of the QualificationType.
:type NextToken: string
:param NextToken:
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call.
:rtype: dict
:returns:
"""
pass
def list_qualification_types(self, MustBeRequestable: bool, Query: str = None, MustBeOwnedByCaller: bool = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListQualificationTypes`` operation returns a list of Qualification types, filtered by an optional search term.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListQualificationTypes>`_
**Request Syntax**
::
response = client.list_qualification_types(
Query='string',
MustBeRequestable=True|False,
MustBeOwnedByCaller=True|False,
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NumResults': 123,
'NextToken': 'string',
'QualificationTypes': [
{
'QualificationTypeId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Name': 'string',
'Description': 'string',
'Keywords': 'string',
'QualificationTypeStatus': 'Active'|'Inactive',
'Test': 'string',
'TestDurationInSeconds': 123,
'AnswerKey': 'string',
'RetryDelayInSeconds': 123,
'IsRequestable': True|False,
'AutoGranted': True|False,
'AutoGrantedValue': 123
},
]
}
**Response Structure**
- *(dict) --*
- **NumResults** *(integer) --*
The number of Qualification types on this page in the filtered results list, equivalent to the number of types this operation returns.
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **QualificationTypes** *(list) --*
The list of QualificationType elements returned by the query.
- *(dict) --*
The QualificationType data structure represents a Qualification type, a description of a property of a Worker that must match the requirements of a HIT for the Worker to be able to accept the HIT. The type also describes how a Worker can obtain a Qualification of that type, such as through a Qualification test.
- **QualificationTypeId** *(string) --*
A unique identifier for the Qualification type. A Qualification type is given a Qualification type ID when you call the CreateQualificationType operation.
- **CreationTime** *(datetime) --*
The date and time the Qualification type was created.
- **Name** *(string) --*
The name of the Qualification type. The type name is used to identify the type, and to find the type using a Qualification type search.
- **Description** *(string) --*
A long description for the Qualification type.
- **Keywords** *(string) --*
One or more words or phrases that describe theQualification type, separated by commas. The Keywords make the type easier to find using a search.
- **QualificationTypeStatus** *(string) --*
The status of the Qualification type. A Qualification type's status determines if users can apply to receive a Qualification of this type, and if HITs can be created with requirements based on this type. Valid values are Active | Inactive.
- **Test** *(string) --*
The questions for a Qualification test associated with this Qualification type that a user can take to obtain a Qualification of this type. This parameter must be specified if AnswerKey is present. A Qualification type cannot have both a specified Test parameter and an AutoGranted value of true.
- **TestDurationInSeconds** *(integer) --*
The amount of time, in seconds, given to a Worker to complete the Qualification test, beginning from the time the Worker requests the Qualification.
- **AnswerKey** *(string) --*
The answers to the Qualification test specified in the Test parameter.
- **RetryDelayInSeconds** *(integer) --*
The amount of time, in seconds, Workers must wait after taking the Qualification test before they can take it again. Workers can take a Qualification test multiple times if they were not granted the Qualification from a previous attempt, or if the test offers a gradient score and they want a better score. If not specified, retries are disabled and Workers can request a Qualification only once.
- **IsRequestable** *(boolean) --*
Specifies whether the Qualification type is one that a user can request through the Amazon Mechanical Turk web site, such as by taking a Qualification test. This value is False for Qualifications assigned automatically by the system. Valid values are True | False.
- **AutoGranted** *(boolean) --*
Specifies that requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test. Valid values are True | False.
- **AutoGrantedValue** *(integer) --*
The Qualification integer value to use for automatically granted Qualifications, if AutoGranted is true. This is 1 by default.
:type Query: string
:param Query:
A text query against all of the searchable attributes of Qualification types.
:type MustBeRequestable: boolean
:param MustBeRequestable: **[REQUIRED]**
Specifies that only Qualification types that a user can request through the Amazon Mechanical Turk web site, such as by taking a Qualification test, are returned as results of the search. Some Qualification types, such as those assigned automatically by the system, cannot be requested directly by users. If false, all Qualification types, including those managed by the system, are considered. Valid values are True | False.
:type MustBeOwnedByCaller: boolean
:param MustBeOwnedByCaller:
Specifies that only Qualification types that the Requester created are returned. If false, the operation returns all Qualification types.
:type NextToken: string
:param NextToken:
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in a single call.
:rtype: dict
:returns:
"""
pass
def list_review_policy_results_for_hit(self, HITId: str, PolicyLevels: List = None, RetrieveActions: bool = None, RetrieveResults: bool = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListReviewPolicyResultsForHIT`` operation retrieves the computed results and the actions taken in the course of executing your Review Policies for a given HIT. For information about how to specify Review Policies when you call CreateHIT, see Review Policies. The ListReviewPolicyResultsForHIT operation can return results for both Assignment-level and HIT-level review results.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListReviewPolicyResultsForHIT>`_
**Request Syntax**
::
response = client.list_review_policy_results_for_hit(
HITId='string',
PolicyLevels=[
'Assignment'|'HIT',
],
RetrieveActions=True|False,
RetrieveResults=True|False,
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'HITId': 'string',
'AssignmentReviewPolicy': {
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
'HITReviewPolicy': {
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
'AssignmentReviewReport': {
'ReviewResults': [
{
'ActionId': 'string',
'SubjectId': 'string',
'SubjectType': 'string',
'QuestionId': 'string',
'Key': 'string',
'Value': 'string'
},
],
'ReviewActions': [
{
'ActionId': 'string',
'ActionName': 'string',
'TargetId': 'string',
'TargetType': 'string',
'Status': 'Intended'|'Succeeded'|'Failed'|'Cancelled',
'CompleteTime': datetime(2015, 1, 1),
'Result': 'string',
'ErrorCode': 'string'
},
]
},
'HITReviewReport': {
'ReviewResults': [
{
'ActionId': 'string',
'SubjectId': 'string',
'SubjectType': 'string',
'QuestionId': 'string',
'Key': 'string',
'Value': 'string'
},
],
'ReviewActions': [
{
'ActionId': 'string',
'ActionName': 'string',
'TargetId': 'string',
'TargetType': 'string',
'Status': 'Intended'|'Succeeded'|'Failed'|'Cancelled',
'CompleteTime': datetime(2015, 1, 1),
'Result': 'string',
'ErrorCode': 'string'
},
]
},
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **HITId** *(string) --*
The HITId of the HIT for which results have been returned.
- **AssignmentReviewPolicy** *(dict) --*
The name of the Assignment-level Review Policy. This contains only the PolicyName element.
- **PolicyName** *(string) --*
Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
- **Parameters** *(list) --*
Name of the parameter from the Review policy.
- *(dict) --*
Name of the parameter from the Review policy.
- **Key** *(string) --*
Name of the parameter from the list of Review Polices.
- **Values** *(list) --*
The list of values of the Parameter
- *(string) --*
- **MapEntries** *(list) --*
List of ParameterMapEntry objects.
- *(dict) --*
This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Key** *(string) --*
The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Values** *(list) --*
The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
- *(string) --*
- **HITReviewPolicy** *(dict) --*
The name of the HIT-level Review Policy. This contains only the PolicyName element.
- **PolicyName** *(string) --*
Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
- **Parameters** *(list) --*
Name of the parameter from the Review policy.
- *(dict) --*
Name of the parameter from the Review policy.
- **Key** *(string) --*
Name of the parameter from the list of Review Polices.
- **Values** *(list) --*
The list of values of the Parameter
- *(string) --*
- **MapEntries** *(list) --*
List of ParameterMapEntry objects.
- *(dict) --*
This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Key** *(string) --*
The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
- **Values** *(list) --*
The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
- *(string) --*
- **AssignmentReviewReport** *(dict) --*
Contains both ReviewResult and ReviewAction elements for an Assignment.
- **ReviewResults** *(list) --*
A list of ReviewResults objects for each action specified in the Review Policy.
- *(dict) --*
This data structure is returned multiple times for each result specified in the Review Policy.
- **ActionId** *(string) --*
A unique identifier of the Review action result.
- **SubjectId** *(string) --*
The HITID or AssignmentId about which this result was taken. Note that HIT-level Review Policies will often emit results about both the HIT itself and its Assignments, while Assignment-level review policies generally only emit results about the Assignment itself.
- **SubjectType** *(string) --*
The type of the object from the SubjectId field.
- **QuestionId** *(string) --*
Specifies the QuestionId the result is describing. Depending on whether the TargetType is a HIT or Assignment this results could specify multiple values. If TargetType is HIT and QuestionId is absent, then the result describes results of the HIT, including the HIT agreement score. If ObjectType is Assignment and QuestionId is absent, then the result describes the Worker's performance on the HIT.
- **Key** *(string) --*
Key identifies the particular piece of reviewed information.
- **Value** *(string) --*
The values of Key provided by the review policies you have selected.
- **ReviewActions** *(list) --*
A list of ReviewAction objects for each action specified in the Review Policy.
- *(dict) --*
Both the AssignmentReviewReport and the HITReviewReport elements contains the ReviewActionDetail data structure. This structure is returned multiple times for each action specified in the Review Policy.
- **ActionId** *(string) --*
The unique identifier for the action.
- **ActionName** *(string) --*
The nature of the action itself. The Review Policy is responsible for examining the HIT and Assignments, emitting results, and deciding which other actions will be necessary.
- **TargetId** *(string) --*
The specific HITId or AssignmentID targeted by the action.
- **TargetType** *(string) --*
The type of object in TargetId.
- **Status** *(string) --*
The current disposition of the action: INTENDED, SUCCEEDED, FAILED, or CANCELLED.
- **CompleteTime** *(datetime) --*
The date when the action was completed.
- **Result** *(string) --*
A description of the outcome of the review.
- **ErrorCode** *(string) --*
Present only when the Results have a FAILED Status.
- **HITReviewReport** *(dict) --*
Contains both ReviewResult and ReviewAction elements for a particular HIT.
- **ReviewResults** *(list) --*
A list of ReviewResults objects for each action specified in the Review Policy.
- *(dict) --*
This data structure is returned multiple times for each result specified in the Review Policy.
- **ActionId** *(string) --*
A unique identifier of the Review action result.
- **SubjectId** *(string) --*
The HITID or AssignmentId about which this result was taken. Note that HIT-level Review Policies will often emit results about both the HIT itself and its Assignments, while Assignment-level review policies generally only emit results about the Assignment itself.
- **SubjectType** *(string) --*
The type of the object from the SubjectId field.
- **QuestionId** *(string) --*
Specifies the QuestionId the result is describing. Depending on whether the TargetType is a HIT or Assignment this results could specify multiple values. If TargetType is HIT and QuestionId is absent, then the result describes results of the HIT, including the HIT agreement score. If ObjectType is Assignment and QuestionId is absent, then the result describes the Worker's performance on the HIT.
- **Key** *(string) --*
Key identifies the particular piece of reviewed information.
- **Value** *(string) --*
The values of Key provided by the review policies you have selected.
- **ReviewActions** *(list) --*
A list of ReviewAction objects for each action specified in the Review Policy.
- *(dict) --*
Both the AssignmentReviewReport and the HITReviewReport elements contains the ReviewActionDetail data structure. This structure is returned multiple times for each action specified in the Review Policy.
- **ActionId** *(string) --*
The unique identifier for the action.
- **ActionName** *(string) --*
The nature of the action itself. The Review Policy is responsible for examining the HIT and Assignments, emitting results, and deciding which other actions will be necessary.
- **TargetId** *(string) --*
The specific HITId or AssignmentID targeted by the action.
- **TargetType** *(string) --*
The type of object in TargetId.
- **Status** *(string) --*
The current disposition of the action: INTENDED, SUCCEEDED, FAILED, or CANCELLED.
- **CompleteTime** *(datetime) --*
The date when the action was completed.
- **Result** *(string) --*
A description of the outcome of the review.
- **ErrorCode** *(string) --*
Present only when the Results have a FAILED Status.
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
:type HITId: string
:param HITId: **[REQUIRED]**
The unique identifier of the HIT to retrieve review results for.
:type PolicyLevels: list
:param PolicyLevels:
The Policy Level(s) to retrieve review results for - HIT or Assignment. If omitted, the default behavior is to retrieve all data for both policy levels. For a list of all the described policies, see Review Policies.
- *(string) --*
:type RetrieveActions: boolean
:param RetrieveActions:
Specify if the operation should retrieve a list of the actions taken executing the Review Policies and their outcomes.
:type RetrieveResults: boolean
:param RetrieveResults:
Specify if the operation should retrieve a list of the results computed by the Review Policies.
:type NextToken: string
:param NextToken:
Pagination token
:type MaxResults: integer
:param MaxResults:
Limit the number of results returned.
:rtype: dict
:returns:
"""
pass
def list_reviewable_hits(self, HITTypeId: str = None, Status: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListReviewableHITs`` operation retrieves the HITs with Status equal to Reviewable or Status equal to Reviewing that belong to the Requester calling the operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListReviewableHITs>`_
**Request Syntax**
::
response = client.list_reviewable_hits(
HITTypeId='string',
Status='Reviewable'|'Reviewing',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NextToken': 'string',
'NumResults': 123,
'HITs': [
{
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False,
'ActionsGuarded': 'Accept'|'PreviewAndAccept'|'DiscoverPreviewAndAccept'
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **NumResults** *(integer) --*
The number of HITs on this page in the filtered results list, equivalent to the number of HITs being returned by this call.
- **HITs** *(list) --*
The list of HIT elements returned by the query.
- *(dict) --*
The HIT data structure represents a single HIT, including all the information necessary for a Worker to accept and complete the HIT.
- **HITId** *(string) --*
A unique identifier for the HIT.
- **HITTypeId** *(string) --*
The ID of the HIT type of this HIT
- **HITGroupId** *(string) --*
The ID of the HIT Group of this HIT.
- **HITLayoutId** *(string) --*
The ID of the HIT Layout of this HIT.
- **CreationTime** *(datetime) --*
The date and time the HIT was created.
- **Title** *(string) --*
The title of the HIT.
- **Description** *(string) --*
A general description of the HIT.
- **Question** *(string) --*
The data the Worker completing the HIT uses produce the results. This is either either a QuestionForm, HTMLQuestion or an ExternalQuestion data structure.
- **Keywords** *(string) --*
One or more words or phrases that describe the HIT, separated by commas. Search terms similar to the keywords of a HIT are more likely to have the HIT in the search results.
- **HITStatus** *(string) --*
The status of the HIT and its assignments. Valid Values are Assignable | Unassignable | Reviewable | Reviewing | Disposed.
- **MaxAssignments** *(integer) --*
The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
- **Reward** *(string) --*
A string representing a currency amount.
- **AutoApprovalDelayInSeconds** *(integer) --*
The amount of time, in seconds, after the Worker submits an assignment for the HIT that the results are automatically approved by Amazon Mechanical Turk. This is the amount of time the Requester has to reject an assignment submitted by a Worker before the assignment is auto-approved and the Worker is paid.
- **Expiration** *(datetime) --*
The date and time the HIT expires.
- **AssignmentDurationInSeconds** *(integer) --*
The length of time, in seconds, that a Worker has to complete the HIT after accepting it.
- **RequesterAnnotation** *(string) --*
An arbitrary data field the Requester who created the HIT can use. This field is visible only to the creator of the HIT.
- **QualificationRequirements** *(list) --*
Conditions that a Worker's Qualifications must meet in order to accept the HIT. A HIT can have between zero and ten Qualification requirements. All requirements must be met in order for a Worker to accept the HIT. Additionally, other actions can be restricted using the ``ActionsGuarded`` field on each ``QualificationRequirement`` structure.
- *(dict) --*
The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT, or see the HIT in search results.
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the requirement.
- **Comparator** *(string) --*
The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
- **IntegerValues** *(list) --*
The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
- *(integer) --*
- **LocaleValues** *(list) --*
The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
- *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **RequiredToPreview** *(boolean) --*
DEPRECATED: Use the ``ActionsGuarded`` field instead. If RequiredToPreview is true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. This should not be used in combination with the ``ActionsGuarded`` field.
- **ActionsGuarded** *(string) --*
Setting this attribute prevents Workers whose Qualifications do not meet this QualificationRequirement from taking the specified action. Valid arguments include "Accept" (Worker cannot accept the HIT, but can preview the HIT and see it in their search results), "PreviewAndAccept" (Worker cannot accept or preview the HIT, but can see the HIT in their search results), and "DiscoverPreviewAndAccept" (Worker cannot accept, preview, or see the HIT in their search results). It's possible for you to create a HIT with multiple QualificationRequirements (which can have different values for the ActionGuarded attribute). In this case, the Worker is only permitted to perform an action when they have met all QualificationRequirements guarding the action. The actions in the order of least restrictive to most restrictive are Discover, Preview and Accept. For example, if a Worker meets all QualificationRequirements that are set to DiscoverPreviewAndAccept, but do not meet all requirements that are set with PreviewAndAccept, then the Worker will be able to Discover, i.e. see the HIT in their search result, but will not be able to Preview or Accept the HIT. ActionsGuarded should not be used in combination with the ``RequiredToPreview`` field.
- **HITReviewStatus** *(string) --*
Indicates the review status of the HIT. Valid Values are NotReviewed | MarkedForReview | ReviewedAppropriate | ReviewedInappropriate.
- **NumberOfAssignmentsPending** *(integer) --*
The number of assignments for this HIT that are being previewed or have been accepted by Workers, but have not yet been submitted, returned, or abandoned.
- **NumberOfAssignmentsAvailable** *(integer) --*
The number of assignments for this HIT that are available for Workers to accept.
- **NumberOfAssignmentsCompleted** *(integer) --*
The number of assignments for this HIT that have been approved or rejected.
:type HITTypeId: string
:param HITTypeId:
The ID of the HIT type of the HITs to consider for the query. If not specified, all HITs for the Reviewer are considered
:type Status: string
:param Status:
Can be either ``Reviewable`` or ``Reviewing`` . Reviewable is the default value.
:type NextToken: string
:param NextToken:
Pagination Token
:type MaxResults: integer
:param MaxResults:
Limit the number of results returned.
:rtype: dict
:returns:
"""
pass
def list_worker_blocks(self, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListWorkersBlocks`` operation retrieves a list of Workers who are blocked from working on your HITs.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListWorkerBlocks>`_
**Request Syntax**
::
response = client.list_worker_blocks(
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NextToken': 'string',
'NumResults': 123,
'WorkerBlocks': [
{
'WorkerId': 'string',
'Reason': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **NumResults** *(integer) --*
The number of assignments on the page in the filtered results list, equivalent to the number of assignments returned by this call.
- **WorkerBlocks** *(list) --*
The list of WorkerBlocks, containing the collection of Worker IDs and reasons for blocking.
- *(dict) --*
The WorkerBlock data structure represents a Worker who has been blocked. It has two elements: the WorkerId and the Reason for the block.
- **WorkerId** *(string) --*
The ID of the Worker who accepted the HIT.
- **Reason** *(string) --*
A message explaining the reason the Worker was blocked.
:type NextToken: string
:param NextToken:
Pagination token
:type MaxResults: integer
:param MaxResults:
:rtype: dict
:returns:
"""
pass
def list_workers_with_qualification_type(self, QualificationTypeId: str, Status: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
The ``ListWorkersWithQualificationType`` operation returns all of the Workers that have been associated with a given Qualification type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/ListWorkersWithQualificationType>`_
**Request Syntax**
::
response = client.list_workers_with_qualification_type(
QualificationTypeId='string',
Status='Granted'|'Revoked',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NextToken': 'string',
'NumResults': 123,
'Qualifications': [
{
'QualificationTypeId': 'string',
'WorkerId': 'string',
'GrantTime': datetime(2015, 1, 1),
'IntegerValue': 123,
'LocaleValue': {
'Country': 'string',
'Subdivision': 'string'
},
'Status': 'Granted'|'Revoked'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --*
If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results.
- **NumResults** *(integer) --*
The number of Qualifications on this page in the filtered results list, equivalent to the number of Qualifications being returned by this call.
- **Qualifications** *(list) --*
The list of Qualification elements returned by this call.
- *(dict) --*
The Qualification data structure represents a Qualification assigned to a user, including the Qualification type and the value (score).
- **QualificationTypeId** *(string) --*
The ID of the Qualification type for the Qualification.
- **WorkerId** *(string) --*
The ID of the Worker who possesses the Qualification.
- **GrantTime** *(datetime) --*
The date and time the Qualification was granted to the Worker. If the Worker's Qualification was revoked, and then re-granted based on a new Qualification request, GrantTime is the date and time of the last call to the AcceptQualificationRequest operation.
- **IntegerValue** *(integer) --*
The value (score) of the Qualification, if the Qualification has an integer value.
- **LocaleValue** *(dict) --*
The Locale data structure represents a geographical region or location.
- **Country** *(string) --*
The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
- **Subdivision** *(string) --*
The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
- **Status** *(string) --*
The status of the Qualification. Valid values are Granted | Revoked.
:type QualificationTypeId: string
:param QualificationTypeId: **[REQUIRED]**
The ID of the Qualification type of the Qualifications to return.
:type Status: string
:param Status:
The status of the Qualifications to return. Can be ``Granted | Revoked`` .
:type NextToken: string
:param NextToken:
Pagination Token
:type MaxResults: integer
:param MaxResults:
Limit the number of results returned.
:rtype: dict
:returns:
"""
pass
def notify_workers(self, Subject: str, MessageText: str, WorkerIds: List) -> Dict:
"""
The ``NotifyWorkers`` operation sends an email to one or more Workers that you specify with the Worker ID. You can specify up to 100 Worker IDs to send the same message with a single call to the NotifyWorkers operation. The NotifyWorkers operation will send a notification email to a Worker only if you have previously approved or rejected work from the Worker.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/NotifyWorkers>`_
**Request Syntax**
::
response = client.notify_workers(
Subject='string',
MessageText='string',
WorkerIds=[
'string',
]
)
**Response Syntax**
::
{
'NotifyWorkersFailureStatuses': [
{
'NotifyWorkersFailureCode': 'SoftFailure'|'HardFailure',
'NotifyWorkersFailureMessage': 'string',
'WorkerId': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NotifyWorkersFailureStatuses** *(list) --*
When MTurk sends notifications to the list of Workers, it returns back any failures it encounters in this list of NotifyWorkersFailureStatus objects.
- *(dict) --*
When MTurk encounters an issue with notifying the Workers you specified, it returns back this object with failure details.
- **NotifyWorkersFailureCode** *(string) --*
Encoded value for the failure type.
- **NotifyWorkersFailureMessage** *(string) --*
A message detailing the reason the Worker could not be notified.
- **WorkerId** *(string) --*
The ID of the Worker.
:type Subject: string
:param Subject: **[REQUIRED]**
The subject line of the email message to send. Can include up to 200 characters.
:type MessageText: string
:param MessageText: **[REQUIRED]**
The text of the email message to send. Can include up to 4,096 characters
:type WorkerIds: list
:param WorkerIds: **[REQUIRED]**
A list of Worker IDs you wish to notify. You can notify upto 100 Workers at a time.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def reject_assignment(self, AssignmentId: str, RequesterFeedback: str) -> Dict:
"""
The ``RejectAssignment`` operation rejects the results of a completed assignment.
You can include an optional feedback message with the rejection, which the Worker can see in the Status section of the web site. When you include a feedback message with the rejection, it helps the Worker understand why the assignment was rejected, and can improve the quality of the results the Worker submits in the future.
Only the Requester who created the HIT can reject an assignment for the HIT.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/RejectAssignment>`_
**Request Syntax**
::
response = client.reject_assignment(
AssignmentId='string',
RequesterFeedback='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type AssignmentId: string
:param AssignmentId: **[REQUIRED]**
The ID of the assignment. The assignment must correspond to a HIT created by the Requester.
:type RequesterFeedback: string
:param RequesterFeedback: **[REQUIRED]**
A message for the Worker, which the Worker can see in the Status section of the web site.
:rtype: dict
:returns:
"""
pass
def reject_qualification_request(self, QualificationRequestId: str, Reason: str = None) -> Dict:
"""
The ``RejectQualificationRequest`` operation rejects a user's request for a Qualification.
You can provide a text message explaining why the request was rejected. The Worker who made the request can see this message.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/RejectQualificationRequest>`_
**Request Syntax**
::
response = client.reject_qualification_request(
QualificationRequestId='string',
Reason='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type QualificationRequestId: string
:param QualificationRequestId: **[REQUIRED]**
The ID of the Qualification request, as returned by the ``ListQualificationRequests`` operation.
:type Reason: string
:param Reason:
A text message explaining why the request was rejected, to be shown to the Worker who made the request.
:rtype: dict
:returns:
"""
pass
def send_bonus(self, WorkerId: str, BonusAmount: str, AssignmentId: str, Reason: str, UniqueRequestToken: str = None) -> Dict:
"""
The ``SendBonus`` operation issues a payment of money from your account to a Worker. This payment happens separately from the reward you pay to the Worker when you approve the Worker's assignment. The SendBonus operation requires the Worker's ID and the assignment ID as parameters to initiate payment of the bonus. You must include a message that explains the reason for the bonus payment, as the Worker may not be expecting the payment. Amazon Mechanical Turk collects a fee for bonus payments, similar to the HIT listing fee. This operation fails if your account does not have enough funds to pay for both the bonus and the fees.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/SendBonus>`_
**Request Syntax**
::
response = client.send_bonus(
WorkerId='string',
BonusAmount='string',
AssignmentId='string',
Reason='string',
UniqueRequestToken='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type WorkerId: string
:param WorkerId: **[REQUIRED]**
The ID of the Worker being paid the bonus.
:type BonusAmount: string
:param BonusAmount: **[REQUIRED]**
The Bonus amount is a US Dollar amount specified using a string (for example, \"5\" represents $5.00 USD and \"101.42\" represents $101.42 USD). Do not include currency symbols or currency codes.
:type AssignmentId: string
:param AssignmentId: **[REQUIRED]**
The ID of the assignment for which this bonus is paid.
:type Reason: string
:param Reason: **[REQUIRED]**
A message that explains the reason for the bonus payment. The Worker receiving the bonus can see this message.
:type UniqueRequestToken: string
:param UniqueRequestToken:
A unique identifier for this request, which allows you to retry the call on error without granting multiple bonuses. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the bonus already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return an error with a message containing the request ID.
:rtype: dict
:returns:
"""
pass
def send_test_event_notification(self, Notification: Dict, TestEventType: str) -> Dict:
"""
The ``SendTestEventNotification`` operation causes Amazon Mechanical Turk to send a notification message as if a HIT event occurred, according to the provided notification specification. This allows you to test notifications without setting up notifications for a real HIT type and trying to trigger them using the website. When you call this operation, the service attempts to send the test notification immediately.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/SendTestEventNotification>`_
**Request Syntax**
::
response = client.send_test_event_notification(
Notification={
'Destination': 'string',
'Transport': 'Email'|'SQS'|'SNS',
'Version': 'string',
'EventTypes': [
'AssignmentAccepted'|'AssignmentAbandoned'|'AssignmentReturned'|'AssignmentSubmitted'|'AssignmentRejected'|'AssignmentApproved'|'HITCreated'|'HITExpired'|'HITReviewable'|'HITExtended'|'HITDisposed'|'Ping',
]
},
TestEventType='AssignmentAccepted'|'AssignmentAbandoned'|'AssignmentReturned'|'AssignmentSubmitted'|'AssignmentRejected'|'AssignmentApproved'|'HITCreated'|'HITExpired'|'HITReviewable'|'HITExtended'|'HITDisposed'|'Ping'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type Notification: dict
:param Notification: **[REQUIRED]**
The notification specification to test. This value is identical to the value you would provide to the UpdateNotificationSettings operation when you establish the notification specification for a HIT type.
- **Destination** *(string) --* **[REQUIRED]**
The target for notification messages. The Destination’s format is determined by the specified Transport:
* When Transport is Email, the Destination is your email address.
* When Transport is SQS, the Destination is your queue URL.
* When Transport is SNS, the Destination is the ARN of your topic.
- **Transport** *(string) --* **[REQUIRED]**
The method Amazon Mechanical Turk uses to send the notification. Valid Values: Email | SQS | SNS.
- **Version** *(string) --* **[REQUIRED]**
The version of the Notification API to use. Valid value is 2006-05-05.
- **EventTypes** *(list) --* **[REQUIRED]**
The list of events that should cause notifications to be sent. Valid Values: AssignmentAccepted | AssignmentAbandoned | AssignmentReturned | AssignmentSubmitted | AssignmentRejected | AssignmentApproved | HITCreated | HITExtended | HITDisposed | HITReviewable | HITExpired | Ping. The Ping event is only valid for the SendTestEventNotification operation.
- *(string) --*
:type TestEventType: string
:param TestEventType: **[REQUIRED]**
The event to simulate to test the notification specification. This event is included in the test message even if the notification specification does not include the event type. The notification specification does not filter out the test event.
:rtype: dict
:returns:
"""
pass
def update_expiration_for_hit(self, HITId: str, ExpireAt: datetime) -> Dict:
"""
The ``UpdateExpirationForHIT`` operation allows you update the expiration time of a HIT. If you update it to a time in the past, the HIT will be immediately expired.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/UpdateExpirationForHIT>`_
**Request Syntax**
::
response = client.update_expiration_for_hit(
HITId='string',
ExpireAt=datetime(2015, 1, 1)
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type HITId: string
:param HITId: **[REQUIRED]**
The HIT to update.
:type ExpireAt: datetime
:param ExpireAt: **[REQUIRED]**
The date and time at which you want the HIT to expire
:rtype: dict
:returns:
"""
pass
def update_hit_review_status(self, HITId: str, Revert: bool = None) -> Dict:
"""
The ``UpdateHITReviewStatus`` operation updates the status of a HIT. If the status is Reviewable, this operation can update the status to Reviewing, or it can revert a Reviewing HIT back to the Reviewable status.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/UpdateHITReviewStatus>`_
**Request Syntax**
::
response = client.update_hit_review_status(
HITId='string',
Revert=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type HITId: string
:param HITId: **[REQUIRED]**
The ID of the HIT to update.
:type Revert: boolean
:param Revert:
Specifies how to update the HIT status. Default is ``False`` .
* Setting this to false will only transition a HIT from ``Reviewable`` to ``Reviewing``
* Setting this to true will only transition a HIT from ``Reviewing`` to ``Reviewable``
:rtype: dict
:returns:
"""
pass
def update_hit_type_of_hit(self, HITId: str, HITTypeId: str) -> Dict:
"""
The ``UpdateHITTypeOfHIT`` operation allows you to change the HITType properties of a HIT. This operation disassociates the HIT from its old HITType properties and associates it with the new HITType properties. The HIT takes on the properties of the new HITType in place of the old ones.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/UpdateHITTypeOfHIT>`_
**Request Syntax**
::
response = client.update_hit_type_of_hit(
HITId='string',
HITTypeId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type HITId: string
:param HITId: **[REQUIRED]**
The HIT to update.
:type HITTypeId: string
:param HITTypeId: **[REQUIRED]**
The ID of the new HIT type.
:rtype: dict
:returns:
"""
pass
def update_notification_settings(self, HITTypeId: str, Notification: Dict = None, Active: bool = None) -> Dict:
"""
The ``UpdateNotificationSettings`` operation creates, updates, disables or re-enables notifications for a HIT type. If you call the UpdateNotificationSettings operation for a HIT type that already has a notification specification, the operation replaces the old specification with a new one. You can call the UpdateNotificationSettings operation to enable or disable notifications for the HIT type, without having to modify the notification specification itself by providing updates to the Active status without specifying a new notification specification. To change the Active status of a HIT type's notifications, the HIT type must already have a notification specification, or one must be provided in the same call to ``UpdateNotificationSettings`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/UpdateNotificationSettings>`_
**Request Syntax**
::
response = client.update_notification_settings(
HITTypeId='string',
Notification={
'Destination': 'string',
'Transport': 'Email'|'SQS'|'SNS',
'Version': 'string',
'EventTypes': [
'AssignmentAccepted'|'AssignmentAbandoned'|'AssignmentReturned'|'AssignmentSubmitted'|'AssignmentRejected'|'AssignmentApproved'|'HITCreated'|'HITExpired'|'HITReviewable'|'HITExtended'|'HITDisposed'|'Ping',
]
},
Active=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type HITTypeId: string
:param HITTypeId: **[REQUIRED]**
The ID of the HIT type whose notification specification is being updated.
:type Notification: dict
:param Notification:
The notification specification for the HIT type.
- **Destination** *(string) --* **[REQUIRED]**
The target for notification messages. The Destination’s format is determined by the specified Transport:
* When Transport is Email, the Destination is your email address.
* When Transport is SQS, the Destination is your queue URL.
* When Transport is SNS, the Destination is the ARN of your topic.
- **Transport** *(string) --* **[REQUIRED]**
The method Amazon Mechanical Turk uses to send the notification. Valid Values: Email | SQS | SNS.
- **Version** *(string) --* **[REQUIRED]**
The version of the Notification API to use. Valid value is 2006-05-05.
- **EventTypes** *(list) --* **[REQUIRED]**
The list of events that should cause notifications to be sent. Valid Values: AssignmentAccepted | AssignmentAbandoned | AssignmentReturned | AssignmentSubmitted | AssignmentRejected | AssignmentApproved | HITCreated | HITExtended | HITDisposed | HITReviewable | HITExpired | Ping. The Ping event is only valid for the SendTestEventNotification operation.
- *(string) --*
:type Active: boolean
:param Active:
Specifies whether notifications are sent for HITs of this HIT type, according to the notification specification. You must specify either the Notification parameter or the Active parameter for the call to UpdateNotificationSettings to succeed.
:rtype: dict
:returns:
"""
pass
def update_qualification_type(self, QualificationTypeId: str, Description: str = None, QualificationTypeStatus: str = None, Test: str = None, AnswerKey: str = None, TestDurationInSeconds: int = None, RetryDelayInSeconds: int = None, AutoGranted: bool = None, AutoGrantedValue: int = None) -> Dict:
"""
The ``UpdateQualificationType`` operation modifies the attributes of an existing Qualification type, which is represented by a QualificationType data structure. Only the owner of a Qualification type can modify its attributes.
Most attributes of a Qualification type can be changed after the type has been created. However, the Name and Keywords fields cannot be modified. The RetryDelayInSeconds parameter can be modified or added to change the delay or to enable retries, but RetryDelayInSeconds cannot be used to disable retries.
You can use this operation to update the test for a Qualification type. The test is updated based on the values specified for the Test, TestDurationInSeconds and AnswerKey parameters. All three parameters specify the updated test. If you are updating the test for a type, you must specify the Test and TestDurationInSeconds parameters. The AnswerKey parameter is optional; omitting it specifies that the updated test does not have an answer key.
If you omit the Test parameter, the test for the Qualification type is unchanged. There is no way to remove a test from a Qualification type that has one. If the type already has a test, you cannot update it to be AutoGranted. If the Qualification type does not have a test and one is provided by an update, the type will henceforth have a test.
If you want to update the test duration or answer key for an existing test without changing the questions, you must specify a Test parameter with the original questions, along with the updated values.
If you provide an updated Test but no AnswerKey, the new test will not have an answer key. Requests for such Qualifications must be granted manually.
You can also update the AutoGranted and AutoGrantedValue attributes of the Qualification type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mturk-requester-2017-01-17/UpdateQualificationType>`_
**Request Syntax**
::
response = client.update_qualification_type(
QualificationTypeId='string',
Description='string',
QualificationTypeStatus='Active'|'Inactive',
Test='string',
AnswerKey='string',
TestDurationInSeconds=123,
RetryDelayInSeconds=123,
AutoGranted=True|False,
AutoGrantedValue=123
)
**Response Syntax**
::
{
'QualificationType': {
'QualificationTypeId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Name': 'string',
'Description': 'string',
'Keywords': 'string',
'QualificationTypeStatus': 'Active'|'Inactive',
'Test': 'string',
'TestDurationInSeconds': 123,
'AnswerKey': 'string',
'RetryDelayInSeconds': 123,
'IsRequestable': True|False,
'AutoGranted': True|False,
'AutoGrantedValue': 123
}
}
**Response Structure**
- *(dict) --*
- **QualificationType** *(dict) --*
Contains a QualificationType data structure.
- **QualificationTypeId** *(string) --*
A unique identifier for the Qualification type. A Qualification type is given a Qualification type ID when you call the CreateQualificationType operation.
- **CreationTime** *(datetime) --*
The date and time the Qualification type was created.
- **Name** *(string) --*
The name of the Qualification type. The type name is used to identify the type, and to find the type using a Qualification type search.
- **Description** *(string) --*
A long description for the Qualification type.
- **Keywords** *(string) --*
One or more words or phrases that describe theQualification type, separated by commas. The Keywords make the type easier to find using a search.
- **QualificationTypeStatus** *(string) --*
The status of the Qualification type. A Qualification type's status determines if users can apply to receive a Qualification of this type, and if HITs can be created with requirements based on this type. Valid values are Active | Inactive.
- **Test** *(string) --*
The questions for a Qualification test associated with this Qualification type that a user can take to obtain a Qualification of this type. This parameter must be specified if AnswerKey is present. A Qualification type cannot have both a specified Test parameter and an AutoGranted value of true.
- **TestDurationInSeconds** *(integer) --*
The amount of time, in seconds, given to a Worker to complete the Qualification test, beginning from the time the Worker requests the Qualification.
- **AnswerKey** *(string) --*
The answers to the Qualification test specified in the Test parameter.
- **RetryDelayInSeconds** *(integer) --*
The amount of time, in seconds, Workers must wait after taking the Qualification test before they can take it again. Workers can take a Qualification test multiple times if they were not granted the Qualification from a previous attempt, or if the test offers a gradient score and they want a better score. If not specified, retries are disabled and Workers can request a Qualification only once.
- **IsRequestable** *(boolean) --*
Specifies whether the Qualification type is one that a user can request through the Amazon Mechanical Turk web site, such as by taking a Qualification test. This value is False for Qualifications assigned automatically by the system. Valid values are True | False.
- **AutoGranted** *(boolean) --*
Specifies that requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test. Valid values are True | False.
- **AutoGrantedValue** *(integer) --*
The Qualification integer value to use for automatically granted Qualifications, if AutoGranted is true. This is 1 by default.
:type QualificationTypeId: string
:param QualificationTypeId: **[REQUIRED]**
The ID of the Qualification type to update.
:type Description: string
:param Description:
The new description of the Qualification type.
:type QualificationTypeStatus: string
:param QualificationTypeStatus:
The new status of the Qualification type - Active | Inactive
:type Test: string
:param Test:
The questions for the Qualification test a Worker must answer correctly to obtain a Qualification of this type. If this parameter is specified, ``TestDurationInSeconds`` must also be specified.
Constraints: Must not be longer than 65535 bytes. Must be a QuestionForm data structure. This parameter cannot be specified if AutoGranted is true.
Constraints: None. If not specified, the Worker may request the Qualification without answering any questions.
:type AnswerKey: string
:param AnswerKey:
The answers to the Qualification test specified in the Test parameter, in the form of an AnswerKey data structure.
:type TestDurationInSeconds: integer
:param TestDurationInSeconds:
The number of seconds the Worker has to complete the Qualification test, starting from the time the Worker requests the Qualification.
:type RetryDelayInSeconds: integer
:param RetryDelayInSeconds:
The amount of time, in seconds, that Workers must wait after requesting a Qualification of the specified Qualification type before they can retry the Qualification request. It is not possible to disable retries for a Qualification type after it has been created with retries enabled. If you want to disable retries, you must dispose of the existing retry-enabled Qualification type using DisposeQualificationType and then create a new Qualification type with retries disabled using CreateQualificationType.
:type AutoGranted: boolean
:param AutoGranted:
Specifies whether requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test.
Constraints: If the Test parameter is specified, this parameter cannot be true.
:type AutoGrantedValue: integer
:param AutoGrantedValue:
The Qualification value to use for automatically granted Qualifications. This parameter is used only if the AutoGranted parameter is true.
:rtype: dict
:returns:
"""
pass
|
273872033b9d2dbc7a8f4287b9e6735b35ed5160
|
0f89cda6f3ae5092e0b44b9441fd93b5a2d4a703
|
/code/default/x_tunnel/local/heroku_front/test.py
|
4713616f531443e4877f30ff5d2409785577779b
|
[
"BSD-2-Clause"
] |
permissive
|
XX-net/XX-Net
|
ef59074e7dd67d1334c59bb076519bb796db4f4e
|
541f58da464296001109f9cfbb879256957b3819
|
refs/heads/master
| 2023-08-28T04:56:16.921687
| 2023-08-27T06:17:05
| 2023-08-27T06:17:32
| 29,290,473
| 40,250
| 11,454
| null | 2023-04-28T15:12:20
| 2015-01-15T09:35:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
test.py
|
#!/usr/bin/env python2
# coding:utf-8
import os
import sys
import time
import threading
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, os.pardir))
python_path = root_path
sys.path.append(root_path)
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
import env_info
from .front import front
from xlog import getLogger
xlog = getLogger("heroku_front")
xlog.set_buffer(2000)
data_path = env_info.data_path
module_data_path = os.path.join(data_path, 'x_tunnel')
def get():
start_time = time.time()
content, status, response = front.request("GET", "dns.xx-net.org", path="/query?domain=www.google.com")
time_cost = time.time() - start_time
xlog.info("GET cost:%f", time_cost)
xlog.info("status:%d content:%s", status, content.tobytes())
front.stop()
if __name__ == '__main__':
import traceback
try:
get()
except Exception:
traceback.print_exc(file=sys.stdout)
except KeyboardInterrupt:
front.stop()
sys.exit()
|
7f2b93408858d8b1150e432b587991a1300b6b12
|
6212685234047cbca58be36b97e0414b5d9caa2b
|
/scanpy/experimental/pp/__init__.py
|
a5eaf9d9c28364d15a778739ee635ef150967f0d
|
[
"BSD-3-Clause"
] |
permissive
|
scverse/scanpy
|
139fb1e34bb21c67786020e12129fc9d44634671
|
0bf043c84b4bf59ad684b69178dddc2a5732c972
|
refs/heads/master
| 2023-08-30T06:59:01.154055
| 2023-08-29T07:26:11
| 2023-08-29T07:26:11
| 80,342,493
| 437
| 146
|
BSD-3-Clause
| 2023-09-14T08:48:01
| 2017-01-29T11:31:11
|
Python
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
__init__.py
|
from scanpy.experimental.pp._normalization import (
normalize_pearson_residuals,
normalize_pearson_residuals_pca,
)
from scanpy.experimental.pp._highly_variable_genes import highly_variable_genes
from scanpy.experimental.pp._recipes import recipe_pearson_residuals
|
34a7425e15abc1204eabd2a00135aecb9a1f25c2
|
846c87361e5d6e04ccca1083254bdc35b488407f
|
/winrm/tests/test_protocol.py
|
ee5a2aa70640eebf070d94dd9c961d41c8753ef9
|
[
"MIT"
] |
permissive
|
diyan/pywinrm
|
e6bd5a194cd20319e84f95f7150a75f89c8622d0
|
f796b5aa15f0ce6c3e16aa3fd33a13efedff4937
|
refs/heads/master
| 2023-08-26T16:44:11.542214
| 2023-05-18T23:55:27
| 2023-05-18T23:55:27
| 4,926,896
| 995
| 278
|
MIT
| 2023-07-13T14:32:25
| 2012-07-06T15:25:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,141
|
py
|
test_protocol.py
|
import pytest
from winrm.protocol import Protocol
def test_open_shell_and_close_shell(protocol_fake):
shell_id = protocol_fake.open_shell()
assert shell_id == '11111111-1111-1111-1111-111111111113'
protocol_fake.close_shell(shell_id, close_session=True)
def test_run_command_with_arguments_and_cleanup_command(protocol_fake):
shell_id = protocol_fake.open_shell()
command_id = protocol_fake.run_command(shell_id, 'ipconfig', ['/all'])
assert command_id == '11111111-1111-1111-1111-111111111114'
protocol_fake.cleanup_command(shell_id, command_id)
protocol_fake.close_shell(shell_id)
def test_run_command_without_arguments_and_cleanup_command(protocol_fake):
shell_id = protocol_fake.open_shell()
command_id = protocol_fake.run_command(shell_id, 'hostname')
assert command_id == '11111111-1111-1111-1111-111111111114'
protocol_fake.cleanup_command(shell_id, command_id)
protocol_fake.close_shell(shell_id)
def test_get_command_output(protocol_fake):
shell_id = protocol_fake.open_shell()
command_id = protocol_fake.run_command(shell_id, 'ipconfig', ['/all'])
std_out, std_err, status_code = protocol_fake.get_command_output(
shell_id, command_id)
assert status_code == 0
assert b'Windows IP Configuration' in std_out
assert len(std_err) == 0
protocol_fake.cleanup_command(shell_id, command_id)
protocol_fake.close_shell(shell_id)
def test_send_command_input(protocol_fake):
shell_id = protocol_fake.open_shell()
command_id = protocol_fake.run_command(shell_id, u'cmd')
protocol_fake.send_command_input(shell_id, command_id, u'echo "hello world" && exit\r\n')
std_out, std_err, status_code = protocol_fake.get_command_output(
shell_id, command_id)
assert status_code == 0
assert b'hello world' in std_out
assert len(std_err) == 0
protocol_fake.cleanup_command(shell_id, command_id)
protocol_fake.close_shell(shell_id)
def test_set_timeout_as_sec():
protocol = Protocol('endpoint',
username='username',
password='password',
read_timeout_sec='30',
operation_timeout_sec='29')
assert protocol.read_timeout_sec == 30
assert protocol.operation_timeout_sec == 29
def test_fail_set_read_timeout_as_sec():
with pytest.raises(ValueError) as exc:
Protocol('endpoint',
username='username',
password='password',
read_timeout_sec='30a',
operation_timeout_sec='29')
assert str(exc.value) == "failed to parse read_timeout_sec as int: " \
"invalid literal for int() with base 10: '30a'"
def test_fail_set_operation_timeout_as_sec():
with pytest.raises(ValueError) as exc:
Protocol('endpoint',
username='username',
password='password',
read_timeout_sec=30,
operation_timeout_sec='29a')
assert str(exc.value) == "failed to parse operation_timeout_sec as int: " \
"invalid literal for int() with base 10: '29a'"
|
c0deb5824e287f643d81dc97eec8edee45c4511b
|
8868204daf8de5a2910cedefe29e6c56340b06e6
|
/Projects/Raspberry-Pi/motion_detection_email_notifications.py
|
3fa7c8ce5045b2a7975f190e7672cb634cf09ea7
|
[] |
no_license
|
RuiSantosdotme/Random-Nerd-Tutorials
|
0f79fb56f9b9732bfb272e8f4abedb803a7b4757
|
dd6133e1205a2cb5fd408cc198eba2aa14f9b163
|
refs/heads/master
| 2023-08-30T15:17:08.020453
| 2023-08-22T18:12:09
| 2023-08-22T18:12:09
| 23,052,165
| 379
| 261
| null | 2023-03-31T01:57:45
| 2014-08-17T22:43:29
|
C++
|
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
motion_detection_email_notifications.py
|
# Complete Project Details: https://RandomNerdTutorials.com/raspberry-pi-motion-email-python/
#import necessary libraries
from gpiozero import LED, Button, MotionSensor
import smtplib
from email.message import EmailMessage
from signal import pause
from time import sleep
#create objects to refer to the LED, the button, and the PIR sensor
led_status = LED(14)
button = Button(4)
pir = MotionSensor(18)
#replace the next three lines with your credentials
from_email_addr = "REPLACE_WITH_SENDER_EMAIL_ADDRESS"
from_email_password = "REPLACE_WITH_SENDER_APP_PASSWORD"
to_email_addr = "REPLACE_WIH_RECIPIENT_EMAIL_ADDRESS"
email_subject = "[WARNING!] Intruder Alert!"
email_body = "Motion was detected in your room!"
#control variables
motion_sensor_status = False
email_sent = False
#arm or disarm the PIR sensor
def arm_motion_sensor():
global email_sent
global motion_sensor_status
if motion_sensor_status == True:
motion_sensor_status = False
led_status.off()
print("Motion Sensor OFF")
else:
motion_sensor_status = True
email_sent = False
led_status.on()
print("Motion Sensor ON")
#send email when motion is detected and the PIR sensor is armed
def send_email():
global email_sent
global motion_sensor_status
if(motion_sensor_status == True and email_sent == False):
print("Motion Detected")
#create a message object
msg = EmailMessage()
#set the email body
msg.set_content(email_body)
#set sender and recipient
msg['From'] = from_email_addr
msg['To'] = to_email_addr
#set your email subject
msg['Subject'] = email_subject
#connect to server and send email
#edit this line with your provider's SMTP server details
server = smtplib.SMTP('smtp.gmail.com', 587)
#comment out this line if your provider doesn't use TLS
server.starttls()
server.login(from_email_addr, from_email_password)
server.send_message(msg)
server.quit()
email_sent = True
print('Email sent')
sleep(5)
email_sent = False
#assign a function that runs when the button is pressed
button.when_pressed = arm_motion_sensor
#assign a function that runs when motion is detected
pir.when_motion = send_email
pause()
|
d3c3abc621bdd93782daf7d659d62aa9692e8dbd
|
7bea5adf7d6284fbad0131d665e957d58adfe7c7
|
/allauth/socialaccount/providers/line/views.py
|
53d6da0902336c3f54638d8fa985d11d64d2eda9
|
[
"MIT"
] |
permissive
|
pennersr/django-allauth
|
50c9e71c3666785368e92ed9e19ea0f6a5438cd2
|
6b8911a5ebbabda0d446f2743bd4d00d250ed500
|
refs/heads/main
| 2023-09-03T16:48:10.988418
| 2023-09-02T08:00:53
| 2023-09-02T08:00:53
| 976,994
| 7,719
| 3,481
|
MIT
| 2023-09-14T15:06:57
| 2010-10-10T20:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,149
|
py
|
views.py
|
import requests
from datetime import timedelta
from django.utils import timezone
from allauth.socialaccount import app_settings
from allauth.socialaccount.models import SocialToken
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import LineProvider
class LineOAuth2Adapter(OAuth2Adapter):
provider_id = LineProvider.id
access_token_url = "https://api.line.me/oauth2/v2.1/token"
authorize_url = "https://access.line.me/oauth2/v2.1/authorize"
profile_url = "https://api.line.me/v2/profile" # https://developers.line.biz/en/reference/line-login/#get-user-profile
id_token_url = "https://api.line.me/oauth2/v2.1/verify" # https://developers.line.biz/en/reference/line-login/#verify-id-token
def parse_token(self, data):
"""
data: access_token data from line
"""
settings = app_settings.PROVIDERS.get(self.provider_id, {})
if "email" in settings.get("SCOPE", ""):
token = SocialToken(token=data["id_token"])
else:
token = SocialToken(token=data["access_token"])
token.token_secret = data.get("refresh_token", "")
expires_in = data.get(self.expires_in_key, None)
if expires_in:
token.expires_at = timezone.now() + timedelta(seconds=int(expires_in))
return token
def complete_login(self, request, app, token, **kwargs):
settings = app_settings.PROVIDERS.get(self.provider_id, {})
if "email" in settings.get("SCOPE", ""):
payload = {"client_id": app.client_id, "id_token": token.token}
resp = requests.post(self.id_token_url, payload)
else:
headers = {"Authorization": "Bearer {0}".format(token.token)}
resp = requests.get(self.profile_url, headers=headers)
resp.raise_for_status()
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(LineOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(LineOAuth2Adapter)
|
d426d51d1ed2c52a2657dcf6a99abbfcb9b8e334
|
842e05376fceb0df0c8a28e82a0627f13d48cd46
|
/cashews/wrapper/time_condition.py
|
759c1123cf7efd2dd4c5b34cd0aeb048df0eae9f
|
[
"MIT"
] |
permissive
|
Krukov/cashews
|
79becdb0434c245b28309e2e4330e6f72cc8967e
|
45f3a6e1bd7976800e42dd793b553c23282b1442
|
refs/heads/master
| 2023-08-09T21:31:03.283814
| 2023-07-21T20:37:10
| 2023-07-21T20:37:10
| 227,915,294
| 251
| 21
|
MIT
| 2023-09-12T03:12:34
| 2019-12-13T20:17:25
|
Python
|
UTF-8
|
Python
| false
| false
| 820
|
py
|
time_condition.py
|
import time
from contextvars import ContextVar
from functools import wraps
from typing import Any, Dict, Tuple
from cashews._typing import AsyncCallable_T, CallableCacheCondition, Decorator
_spent = ContextVar("spent", default=0.0)
def create_time_condition(limit: float) -> Tuple[CallableCacheCondition, Decorator]:
def decorator(func: AsyncCallable_T) -> AsyncCallable_T:
@wraps(func)
async def _wrapper(*args, **kwargs):
start = time.perf_counter()
try:
return await func(*args, **kwargs)
finally:
_spent.set(time.perf_counter() - start)
return _wrapper
def condition(result: Any, args: Tuple, kwargs: Dict[str, Any], key: str = "") -> bool:
return _spent.get() > limit
return condition, decorator
|
a251ba61f5b8111c78da2e61e50a45d3731e1014
|
c80df697c0b66cd58a039c928574926bd6161a36
|
/tools/test.py
|
3e676fcda5edec49b6ba45dd4a96ef096fe8c63c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-unknown"
] |
permissive
|
dart-lang/sdk
|
d4e50700dfc54b33c0a7a09fab1aa9623ebc84e5
|
b25873f11c68772408f6a4aea5f5c961f31ac9f7
|
refs/heads/master
| 2023-08-31T11:13:09.400940
| 2023-08-31T09:10:57
| 2023-08-31T09:10:57
| 35,726,310
| 10,701
| 2,079
|
BSD-3-Clause
| 2023-09-14T10:34:15
| 2015-05-16T14:14:58
|
Dart
|
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
test.py
|
#!/usr/bin/env python3
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
from contextlib import ExitStack
import os
import string
import subprocess
import sys
import utils
def Main():
args = sys.argv[1:]
cleanup_dart = False
if '--cleanup-dart-processes' in args:
args.remove('--cleanup-dart-processes')
cleanup_dart = True
tools_dir = os.path.dirname(os.path.realpath(__file__))
repo_dir = os.path.dirname(tools_dir)
dart_test_script = os.path.join(repo_dir, 'pkg', 'test_runner', 'bin',
'test_runner.dart')
command = [utils.CheckedInSdkExecutable(), dart_test_script] + args
# The testing script potentially needs the android platform tools in PATH so
# we do that in ./tools/test.py (a similar logic exists in ./tools/build.py).
android_platform_tools = os.path.normpath(
os.path.join(tools_dir,
'../third_party/android_tools/sdk/platform-tools'))
if os.path.isdir(android_platform_tools):
os.environ['PATH'] = '%s%s%s' % (os.environ['PATH'], os.pathsep,
android_platform_tools)
with utils.FileDescriptorLimitIncreaser():
with ExitStack() as stack:
for ctx in utils.CoreDumpArchiver(args):
stack.enter_context(ctx)
exit_code = subprocess.call(command)
if cleanup_dart:
cleanup_command = [
sys.executable,
os.path.join(tools_dir, 'task_kill.py'), '--kill_dart=True',
'--kill_vc=False'
]
subprocess.call(cleanup_command)
utils.DiagnoseExitCode(exit_code, command)
return exit_code
if __name__ == '__main__':
sys.exit(Main())
|
d296423e84034199d211065878e5b17206988f2e
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/handlers.pyi
|
11c6b9cc00311876dd4c195d1407cd2f45972112
|
[
"MIT"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 417
|
pyi
|
handlers.pyi
|
from typing import Any
from django.core.handlers.wsgi import WSGIHandler, WSGIRequest
class StaticFilesHandler(WSGIHandler):
handles_files: bool = ...
application: WSGIHandler = ...
base_url: Any = ...
def __init__(self, application: WSGIHandler) -> None: ...
def get_base_url(self) -> str: ...
def file_path(self, url: str) -> str: ...
def serve(self, request: WSGIRequest) -> Any: ...
|
03de02f2be0464891123b4452e3d2343c85fd427
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/caffe2/python/operator_test/rebatching_queue_test.py
|
af207bedc69a5d4442b686792cef127d550253a9
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 9,152
|
py
|
rebatching_queue_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import numpy.testing as npt
from hypothesis import given
import hypothesis.strategies as st
import functools
def primefac(n):
ret = []
divisor = 2
while divisor * divisor <= n:
while (n % divisor) == 0:
ret.append(divisor)
n = n // divisor
divisor = divisor + 1
if n > 1:
ret.append(n)
return ret
class TestReBatchingQueue(TestCase):
def test_rebatching_queue_single_enqueue_dequeue(self):
net = core.Net('net')
tensors = [
net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(3)
]
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, tensors[0]], [])
net.EnqueueRebatchingQueue([queue, tensors[1]], [])
net.EnqueueRebatchingQueue([queue, tensors[2]], [])
results = [
net.DequeueRebatchingQueue([queue], 1),
net.DequeueRebatchingQueue([queue], 1),
net.DequeueRebatchingQueue([queue], 1),
]
workspace.RunNetOnce(net)
for idx in range(3):
self.assertEquals(workspace.FetchBlob(results[idx]), [1.0])
def test_rebatching_queue_multi_enqueue_dequeue(self):
net = core.Net('net')
workspace.FeedBlob(
"tensors", np.array([x for x in range(10)], np.int32)
)
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, "tensors"], [], enqueue_batch=True)
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
workspace.RunNetOnce(net)
npt.assert_array_equal(
workspace.FetchBlob(results[0]), workspace.FetchBlob("tensors")[:5]
)
npt.assert_array_equal(
workspace.FetchBlob(results[1]), workspace.FetchBlob("tensors")[5:]
)
def test_rebatching_queue_closes_properly(self):
net = core.Net('net')
workspace.FeedBlob(
"tensors", np.array([x for x in range(10)], np.int32)
)
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, "tensors"], 0, enqueue_batch=True)
net.CloseRebatchingQueue([queue], 0)
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
workspace.RunNetOnce(net)
npt.assert_array_equal(
workspace.FetchBlob(results[0]), workspace.FetchBlob("tensors")[:5]
)
npt.assert_array_equal(
workspace.FetchBlob(results[1]), workspace.FetchBlob("tensors")[5:]
)
# Enqueuing more should fail now since the queue is closed
net.EnqueueRebatchingQueue([queue, "tensors"], [], enqueue_batch=True)
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
# Dequeuing more should fail now since the queue is closed
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
def test_rebatching_queue_multiple_components(self):
NUM_BLOBS = 4
NUM_ELEMENTS = 10
net = core.Net('net')
workspace.blobs['complex_tensor'] = np.array(
[[x, x + 1] for x in range(NUM_ELEMENTS)], dtype=np.int32
)
tensors = [
net.GivenTensorIntFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[x for x in range(NUM_ELEMENTS)]
),
net.GivenTensorFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[x * 1.0 for x in range(NUM_ELEMENTS)]
),
net.GivenTensorBoolFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[(x % 2 == 0) for x in range(NUM_ELEMENTS)]
),
'complex_tensor',
]
queue = net.CreateRebatchingQueue(
[], 1, capacity=10, num_blobs=NUM_BLOBS
)
net.EnqueueRebatchingQueue([queue] + tensors, [], enqueue_batch=True)
results = net.DequeueRebatchingQueue([queue], NUM_BLOBS, num_elements=5)
workspace.RunNetOnce(net)
for idx in range(NUM_BLOBS):
npt.assert_array_equal(
workspace.FetchBlob(results[idx]),
workspace.FetchBlob(tensors[idx])[:5]
)
@given(
num_producers=st.integers(1, 5),
num_consumers=st.integers(1, 5),
producer_input_size=st.integers(1, 10),
producer_num_iterations=st.integers(1, 10),
capacity=st.integers(1, 10)
)
def test_rebatching_parallel_producer_consumer(
self, num_producers, num_consumers, producer_input_size,
producer_num_iterations, capacity
):
### Init ###
total_inputs = producer_num_iterations * producer_input_size * num_producers
inputs = []
init_net = core.Net('init_net')
queue = init_net.CreateRebatchingQueue(
[], 1, capacity=capacity, num_blobs=1
)
### Producers ###
producer_steps = []
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
values = [
producer_input_size * i + x for x in range(producer_input_size)
]
for _ in range(producer_num_iterations):
inputs.extend(values)
tensors = net.GivenTensorIntFill(
[], 1, shape=[producer_input_size], values=values
)
net.EnqueueRebatchingQueue([queue, tensors], [], enqueue_batch=True)
step = core.execution_step(
name, net, num_iter=producer_num_iterations
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True
)
]
)
### Consumers ###
outputs = []
def append(ins, outs):
# Extend is atomic
outputs.extend(ins[0].data.tolist())
consumer_steps = []
for i in range(num_consumers):
# This is just a way of deterministally read all the elements.
# We make `num_consumers` almost equal splits
# (the reminder goes to the last consumer).
num_elements_to_read = total_inputs // num_consumers
if i == num_consumers - 1:
num_elements_to_read = num_elements_to_read \
+ total_inputs % num_consumers
# If we have nothing to read this consumer will be idle
if (num_elements_to_read == 0):
continue
# Now we have to make a split on number of iterations and the read
# size for each iteration. This is again just one of many
# deterministic ways of doing it. We factorize the total number of
# elements we have to read and assign half of the factors to the
# iterations half to the read size.
factors = list(primefac(num_elements_to_read))
num_elements_per_iteration = functools.reduce(
lambda x, y: x * y, factors[len(factors) // 2:], 1
)
num_iterations = functools.reduce(
lambda x, y: x * y, factors[:len(factors) // 2], 1
)
name = 'consumer_%d' % i
net = core.Net(name)
blobs = net.DequeueRebatchingQueue(
[queue], 1, num_elements=num_elements_per_iteration
)
net.Python(append)([blobs], 0)
consumer_steps.append(
core.execution_step(name, net, num_iter=num_iterations)
)
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True
)
### Execute Plan ###
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
### Check Results ###
# We check that the outputs are a permutation of inputs
inputs.sort()
outputs.sort()
self.assertEquals(inputs, outputs)
if __name__ == "__main__":
import unittest
unittest.main()
|
5b7ae4526c4bda158e8eeb3c97613ef87831c143
|
4a9b074e9cafebcffa4c8212bb966658c476a33b
|
/neo/SmartContract/tests/sc_debug_events.py
|
290c4e3b8441de2a35b8d0cb05f66ad61da71696
|
[
"LicenseRef-scancode-free-unknown",
"MIT"
] |
permissive
|
CityOfZion/neo-python
|
66a46c2509044d9b52ccce4643b4df74a1f14da2
|
99783bc8310982a5380081ec41a6ee07ba843f3f
|
refs/heads/master
| 2023-07-19T00:04:36.415986
| 2021-11-15T12:15:55
| 2021-11-15T12:15:55
| 97,416,422
| 396
| 288
|
MIT
| 2021-06-01T21:42:39
| 2017-07-16T23:05:32
|
Python
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
sc_debug_events.py
|
"""
Sample smart contract for use in `test_sc_debug_events.py`
"""
from boa.interop.Neo.Runtime import Notify, Log
def Main(args):
Notify("Start main")
x = args[0]
Log(x)
Notify("End main")
|
5e1d58351f72895c41775736fb85bce7b80792ea
|
117aaf186609e48230bff9f4f4e96546d3484963
|
/questions/53496605/main.py
|
04944ac98b99e948a9153d2b948a312c2e6255bb
|
[
"MIT"
] |
permissive
|
eyllanesc/stackoverflow
|
8d1c4b075e578496ea8deecbb78ef0e08bcc092e
|
db738fbe10e8573b324d1f86e9add314f02c884d
|
refs/heads/master
| 2022-08-19T22:23:34.697232
| 2022-08-10T20:59:17
| 2022-08-10T20:59:17
| 76,124,222
| 355
| 433
|
MIT
| 2022-08-10T20:59:18
| 2016-12-10T16:29:34
|
C++
|
UTF-8
|
Python
| false
| false
| 721
|
py
|
main.py
|
import sys
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from doublespinbox import DoubleSpinBox
qtCreatorFile = "rvmod_gui.ui" # Enter file here.
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class MyApp(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(MyApp, self).__init__(parent)
self.setupUi(self)
self.K1.minimize_signal.connect(self.on_minimize_k1)
@QtCore.pyqtSlot()
def on_minimize_k1(self):
spinbox = self.sender()
print(spinbox, "minimize k1")
def main():
app = QtWidgets.QApplication(sys.argv)
window = MyApp()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
07b9a3afd96199c4b6b18b48206ff4a3979930f8
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/commands/help.py
|
49a81cbb074299f520e52643dc63d2fd33e8ad2a
|
[
"MIT"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
help.py
|
from __future__ import absolute_import
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import CommandError
class HelpCommand(Command):
"""Show help for commands"""
name = 'help'
usage = """
%prog <command>"""
summary = 'Show help for commands.'
ignore_require_venv = True
def run(self, options, args):
from pip._internal.commands import commands_dict, get_similar_commands
try:
# 'pip help' with no args is handled by pip.__init__.parseopt()
cmd_name = args[0] # the command we need help for
except IndexError:
return SUCCESS
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
command = commands_dict[cmd_name]()
command.parser.print_help()
return SUCCESS
|
fcfebfe741d7aa98df13596cc059584f429d960d
|
2ad93a1cf25a580fe980482d2d17a657de3b2523
|
/django-stubs/contrib/flatpages/views.pyi
|
bd63e39b4cb826be258e12f742e5203f5f0d44ed
|
[
"MIT"
] |
permissive
|
typeddjango/django-stubs
|
f35dfcb001e54694a0a1e8c0afcc6e6a3d130c32
|
0117348c3c7713f25f96b46e53ebdeed7bdba544
|
refs/heads/master
| 2023-08-25T19:42:52.707151
| 2023-08-23T15:13:25
| 2023-08-23T15:13:25
| 142,779,680
| 1,133
| 376
|
MIT
| 2023-09-13T19:05:06
| 2018-07-29T17:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 309
|
pyi
|
views.pyi
|
from django.contrib.flatpages.models import FlatPage
from django.http.request import HttpRequest
from django.http.response import HttpResponse
DEFAULT_TEMPLATE: str
def flatpage(request: HttpRequest, url: str) -> HttpResponse: ...
def render_flatpage(request: HttpRequest, f: FlatPage) -> HttpResponse: ...
|
2e4e7ebbab6aa4a6b695229c06581da8540a3759
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-faker/source_faker/utils.py
|
1a10b9cbb497a6ccfc71ea28548eaaecc20eada1
|
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
utils.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import datetime
import json
from airbyte_cdk.models import AirbyteEstimateTraceMessage, AirbyteTraceMessage, EstimateType, TraceType
def read_json(filepath):
with open(filepath, "r") as f:
return json.loads(f.read())
def format_airbyte_time(d: datetime):
s = f"{d}"
s = s.split(".")[0]
s = s.replace(" ", "T")
s += "+00:00"
return s
def now_millis():
return int(datetime.datetime.now().timestamp() * 1000)
def generate_estimate(stream_name: str, total: int, bytes_per_row: int):
emitted_at = int(datetime.datetime.now().timestamp() * 1000)
estimate_message = AirbyteEstimateTraceMessage(
type=EstimateType.STREAM, name=stream_name, row_estimate=round(total), byte_estimate=round(total * bytes_per_row)
)
return AirbyteTraceMessage(type=TraceType.ESTIMATE, emitted_at=emitted_at, estimate=estimate_message)
|
005945865ab078b51a0796efabef21c5c4af480e
|
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
|
/pennylane/gradients/pulse_gradient.py
|
f73045bae4d26b1eae40661e68d9a81c961f839a
|
[
"Apache-2.0"
] |
permissive
|
PennyLaneAI/pennylane
|
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
|
0843183ff15a013c2622af5e61fea431d18076d3
|
refs/heads/master
| 2023-09-03T17:00:43.105784
| 2023-09-01T16:15:07
| 2023-09-01T16:15:07
| 129,936,360
| 1,431
| 410
|
Apache-2.0
| 2023-09-14T21:30:56
| 2018-04-17T16:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 40,012
|
py
|
pulse_gradient.py
|
# Copyright 2023 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains functions for computing the stochastic parameter-shift gradient
of pulse sequences in a qubit-based quantum tape.
"""
import warnings
import numpy as np
import pennylane as qml
from pennylane.pulse import ParametrizedEvolution, HardwareHamiltonian
from .parameter_shift import _make_zero_rep
from .general_shift_rules import eigvals_to_frequencies, generate_shift_rule
from .gradient_transform import (
_all_zero_grad,
assert_no_state_returns,
assert_no_tape_batching,
assert_no_variance,
choose_grad_methods,
gradient_analysis_and_validation,
gradient_transform,
_no_trainable_grad,
reorder_grads,
)
has_jax = True
try:
import jax
import jax.numpy as jnp
except ImportError:
has_jax = False
def _assert_has_jax(transform_name):
"""Check that JAX is installed and imported correctly, otherwise raise an error.
Args:
transform_name (str): Name of the gradient transform that queries the return system
"""
if not has_jax: # pragma: no cover
raise ImportError(
f"Module jax is required for the {transform_name} gradient transform. "
"You can install jax via: pip install jax jaxlib"
)
def raise_pulse_diff_on_qnode(transform_name):
"""Raises an error as the gradient transform with the provided name does
not support direct application to QNodes.
"""
msg = (
f"Applying the {transform_name} gradient transform to a QNode directly is currently "
"not supported. Please use differentiation via a JAX entry point "
"(jax.grad, jax.jacobian, ...) instead.",
UserWarning,
)
raise NotImplementedError(msg)
def _split_evol_ops(op, ob, tau):
r"""Randomly split a ``ParametrizedEvolution`` with respect to time into two operations and
insert a Pauli rotation using a given Pauli word and rotation angles :math:`\pm\pi/2`.
This yields two groups of three operations each.
Args:
op (ParametrizedEvolution): operation to split up.
ob (`~.Operator`): generating Hamiltonian term to insert the parameter-shift rule for.
tau (float or tensor_like): split-up time(s). If multiple times are passed, the split-up
operations are set up to return intermediate time evolution results, leading to
broadcasting effectively.
Returns:
tuple[list[`~.Operation`]]: The split-time evolution, expressed as three operations in the
inner lists. The number of tuples is given by the number of shifted terms in the
parameter-shift rule of the generating Hamiltonian term ``ob``.
tensor_like: Coefficients of the parameter-shift rule of the provided generating Hamiltonian
term ``ob``.
"""
t0, *_, t1 = op.t
# If there are multiple values for tau, use broadcasting
if bcast := qml.math.ndim(tau) > 0:
# With broadcasting, create a sorted array of [t_0, *sorted(taus), t_1]
# Use this array for both, the pulse before and after the inserted operation.
# The way we slice the resulting tape results later on accomodates for the additional
# time points t_0 and t_1 in the array.
tau = jnp.sort(tau)
before_t = jnp.concatenate([jnp.array([t0]), tau, jnp.array([t1])])
after_t = before_t.copy()
else:
# Create a time interval from start to split and one from split to end
before_t = jax.numpy.array([t0, tau])
after_t = jax.numpy.array([tau, t1])
if qml.pauli.is_pauli_word(ob):
prefactor = qml.pauli.pauli_word_prefactor(ob)
word = qml.pauli.pauli_word_to_string(ob)
insert_ops = [qml.PauliRot(shift, word, ob.wires) for shift in [np.pi / 2, -np.pi / 2]]
coeffs = [prefactor, -prefactor]
else:
with warnings.catch_warnings():
if len(ob.wires) <= 4:
warnings.filterwarnings(
"ignore", ".*the eigenvalues will be computed numerically.*"
)
eigvals = qml.eigvals(ob)
coeffs, shifts = zip(*generate_shift_rule(eigvals_to_frequencies(tuple(eigvals))))
insert_ops = [qml.exp(qml.dot([-1j * shift], [ob])) for shift in shifts]
# Create Pauli rotations to be inserted at tau
ode_kwargs = op.odeint_kwargs
# If we are broadcasting, make use of the `return_intermediate` and `complementary` features
ops = tuple(
[
op(op.data, before_t, return_intermediate=bcast, **ode_kwargs),
insert_op,
op(op.data, after_t, return_intermediate=bcast, complementary=bcast, **ode_kwargs),
]
for insert_op in insert_ops
)
return ops, jnp.array(coeffs)
def _split_evol_tape(tape, split_evolve_ops, op_idx):
"""Replace a marked ``ParametrizedEvolution`` in a given tape by provided operations, creating
one tape per group of operations.
Args:
tape (QuantumTape): original tape
split_evolve_ops (tuple[list[qml.Operation]]): The time-split evolution operations as
created by ``_split_evol_ops``. For each group of operations, a new tape
is created.
op_idx (int): index of the operation to replace within the tape
Returns:
list[QuantumTape]: new tapes with replaced operation, one tape per group of operations in
``split_evolve_ops``.
"""
ops_pre = tape.operations[:op_idx]
ops_post = tape.operations[op_idx + 1 :]
return [
qml.tape.QuantumScript(ops_pre + split + ops_post, tape.measurements, shots=tape.shots)
for split in split_evolve_ops
]
# pylint: disable=too-many-arguments
def _parshift_and_integrate(
results,
cjacs,
int_prefactor,
psr_coeffs,
single_measure,
has_partitioned_shots,
use_broadcasting,
):
"""Apply the parameter-shift rule post-processing to tape results and contract
with classical Jacobians, effectively evaluating the numerical integral of the stochastic
parameter-shift rule.
Args:
results (list): Tape evaluation results, corresponding to the modified quantum
circuit result when using the applicable parameter shifts and the sample splitting
times. Results should be ordered such that the different shifted circuits for a given
splitting time are grouped together
cjacs (tensor_like): classical Jacobian evaluated at the splitting times
int_prefactor (float): prefactor of the numerical integration, corresponding to the size
of the time range divided by the number of splitting time samples
psr_coeffs (tensor_like or tuple[tensor_like]): Coefficients of the parameter-shift
rule to contract the results with before integrating numerically.
single_measure (bool): Whether the results contain a single measurement per shot setting
has_partitioned_shots (bool): Whether the results have a shot vector axis
use_broadcasting (bool): Whether broadcasting was used in the tapes that returned the
``results``.
Returns:
tensor_like or tuple[tensor_like] or tuple[tuple[tensor_like]]: Gradient entry
"""
def _contract(coeffs, res, cjac):
"""Contract three tensors, the first two like a standard matrix multiplication
and the result with the third tensor along the first axes."""
return jnp.tensordot(jnp.tensordot(coeffs, res, axes=1), cjac, axes=[[0], [0]])
if isinstance(psr_coeffs, tuple):
num_shifts = [len(c) for c in psr_coeffs]
def _psr_and_contract(res_list, cjacs, int_prefactor):
"""Execute the parameter-shift rule and contract with classical Jacobians.
This function assumes multiple generating terms for the pulse parameter
of interest"""
res = jnp.stack(res_list)
idx = 0
# Preprocess the results: Reshape, create slices for different generating terms
if use_broadcasting:
# Slice the results according to the different generating terms. Slice away the
# first and last value for each term, which correspond to the initial condition
# and the final value of the time evolution, but not to splitting times
res = tuple(res[idx : (idx := idx + n), 1:-1] for n in num_shifts)
else:
shape = jnp.shape(res)
num_taus = shape[0] // sum(num_shifts)
# Reshape the slices of the results corresponding to different generating terms.
# Afterwards the first axis corresponds to the splitting times and the second axis
# corresponds to the different shifts of the respective term.
# Finally move the shifts-axis to the first position of each term.
res = tuple(
jnp.moveaxis(
jnp.reshape(
res[idx : (idx := idx + n * num_taus)], (num_taus, n) + shape[1:]
),
1,
0,
)
for n in num_shifts
)
# Contract the results, parameter-shift rule coefficients and (classical) Jacobians,
# and include the rescaling factor from the Monte Carlo integral and from global
# prefactors of Pauli word generators.
diff_per_term = jnp.array(
[_contract(c, r, cjac) for c, r, cjac in zip(psr_coeffs, res, cjacs)]
)
return qml.math.sum(diff_per_term, axis=0) * int_prefactor
else:
num_shifts = len(psr_coeffs)
def _psr_and_contract(res_list, cjacs, int_prefactor):
"""Execute the parameter-shift rule and contract with classical Jacobians.
This function assumes a single generating term for the pulse parameter
of interest"""
res = jnp.stack(res_list)
# Preprocess the results: Reshape, create slices for different generating terms
if use_broadcasting:
# Slice away the first and last values, corresponding to the initial condition
# and the final value of the time evolution, but not to splitting times
res = res[:, 1:-1]
else:
# Reshape the results such that the first axis corresponds to the splitting times
# and the second axis corresponds to different shifts. All other axes are untouched.
# Afterwards move the shifts-axis to the first position.
shape = jnp.shape(res)
new_shape = (shape[0] // num_shifts, num_shifts) + shape[1:]
res = jnp.moveaxis(jnp.reshape(res, new_shape), 1, 0)
# Contract the results, parameter-shift rule coefficients and (classical) Jacobians,
# and include the rescaling factor from the Monte Carlo integral and from global
# prefactors of Pauli word generators.
return _contract(psr_coeffs, res, cjacs) * int_prefactor
nesting_layers = (not single_measure) + has_partitioned_shots
if nesting_layers == 1:
return tuple(_psr_and_contract(r, cjacs, int_prefactor) for r in zip(*results))
if nesting_layers == 0:
# Single measurement without shot vector
return _psr_and_contract(results, cjacs, int_prefactor)
# Multiple measurements with shot vector. Not supported with broadcasting yet.
if use_broadcasting:
# TODO: Remove once #2690 is resolved
raise NotImplementedError(
"Broadcasting, multiple measurements and shot vectors are currently not "
"supported all simultaneously by stoch_pulse_grad."
)
return tuple(
tuple(_psr_and_contract(_r, cjacs, int_prefactor) for _r in zip(*r)) for r in zip(*results)
)
# pylint: disable=too-many-arguments
def _stoch_pulse_grad(
tape, argnum=None, num_split_times=1, sampler_seed=None, use_broadcasting=False
):
r"""Compute the gradient of a quantum circuit composed of pulse sequences by applying the
stochastic parameter shift rule.
For a pulse-based cost function :math:`C(\boldsymbol{v}, T)`
with variational parameters :math:`\boldsymbol{v}` and evolution time :math:`T`, it is given by
(c.f. Eqn. (6) in `Leng et al. (2022) <https://arxiv.org/abs/2210.15812>`__ with altered
notation):
.. math::
\frac{\partial C}{\partial v_k}
= \int_{0}^{T} \mathrm{d}\tau \sum_{j=1}^m
\frac{\partial f_j}{\partial v_k}(\boldsymbol{v}, \tau)
\left[C_j^{(+)}(\boldsymbol{v}, \tau) - C_j^{(-)}(\boldsymbol{v}, \tau)\right]
Here, :math:`f_j` are the pulse envelopes that capture the time dependence of the pulse
Hamiltonian:
.. math::
H(\boldsymbol{v}, t) = H_\text{drift} + \sum_j f_j(\boldsymbol{v}, t) H_j,
and :math:`C_j^{(\pm)}` are modified cost functions:
.. math::
C_j^{(\pm)}(\boldsymbol{v}, \tau)&=
\bra{\psi^{(\pm)}_{j}(\boldsymbol{v}, \tau)} B
\ket{\psi^{(\pm)}_{j}(\boldsymbol{v}, \tau)} \\
\ket{\psi^{(\pm)}_{j}(\boldsymbol{v}, \tau)}
&= U_{\boldsymbol{v}}(T, \tau) e^{-i (\pm \frac{\pi}{4}) H_j}
U_{\boldsymbol{v}}(\tau, 0)\ket{\psi_0}.
That is, the :math:`j`\ th modified time evolution in these circuit interrupts the
evolution generated by the pulse Hamiltonian by inserting a rotation gate generated by
the corresponding Hamiltonian term :math:`H_j` with a rotation angle of
:math:`\pm\frac{\pi}{4}`.
See below for a more detailed description. The integral in the first equation above
is estimated numerically in the stochastic parameter-shift rule. For this, it samples
split times :math:`\tau` and averages the modified cost functions and the Jacobians
of the envelopes :math:`\partial f_j / \partial v_k` at the sampled times suitably.
Args:
tape (pennylane.QNode or .QuantumTape): quantum tape or QNode to differentiate
argnum (int or list[int] or None): Trainable tape parameter indices to differentiate
with respect to. If not provided, the derivatives with respect to all
trainable parameters are returned.
num_split_times (int): number of time samples to use in the stochastic parameter-shift
rule underlying the differentiation; also see details
sample_seed (int): randomness seed to be used for the time samples in the stochastic
parameter-shift rule
use_broadcasting (bool): Whether to use broadcasting across the different sampled
splitting times. If ``False`` (the default), one set of modified tapes per
splitting time is created, if ``True`` only a single set of broadcasted, modified
tapes is created, increasing performance on simulators.
Returns:
function or tuple[list[QuantumTape], function]:
- If the input is a QNode, an object representing the Jacobian (function) of the QNode
that can be executed to obtain the Jacobian.
The type of the Jacobian returned is either a tensor, a tuple or a
nested tuple depending on the nesting structure of the original QNode output.
- If the input is a tape, a tuple containing a
list of generated tapes, together with a post-processing
function to be applied to the results of the evaluated tapes
in order to obtain the Jacobian.
This transform realizes the stochastic parameter-shift rule for pulse sequences, as introduced
in `Banchi and Crooks (2018) <https://quantum-journal.org/papers/q-2021-01-25-386/>`_ and
`Leng et al. (2022) <https://arxiv.org/abs/2210.15812>`_.
.. note::
This function requires the JAX interface and does not work with other autodiff interfaces
commonly encountered with PennyLane.
Finally, this transform is not JIT-compatible yet.
.. note::
This function uses a basic sampling approach with a uniform distribution to estimate the
integral appearing in the stochastic parameter-shift rule. In many cases, there are
probability distributions that lead to smaller variances of the estimator.
In addition, the sampling approach will not reduce trivially to simpler parameter-shift
rules when used with simple pulses (see details and examples below), potentially leading
to imprecise results and/or unnecessarily large computational efforts.
.. warning::
This transform may not be applied directly to QNodes. Use JAX entrypoints
(``jax.grad``, ``jax.jacobian``, ...) instead or apply the transform on the tape level.
Also see the examples below.
**Examples**
Consider a pulse program with a single two-qubit pulse, generated by a Hamiltonian
with three terms: the non-trainable term :math:`\frac{1}{2}X_0`, the trainable
constant (over time) term :math:`v_1 Z_0 Z_1` and the trainable sinoidal term
:math:`\sin(v_2 t) (\frac{1}{5} Y_0 + \frac{7}{10} X_1)`.
.. code-block:: python
jax.config.update("jax_enable_x64", True)
dev = qml.device("default.qubit.jax", wires=2)
def sin(p, t):
return jax.numpy.sin(p * t)
ZZ = qml.PauliZ(0) @ qml.PauliZ(1)
Y_plus_X = qml.dot([1/5, 3/5], [qml.PauliY(0), qml.PauliX(1)])
H = 0.5 * qml.PauliX(0) + qml.pulse.constant * ZZ + sin * Y_plus_X
def ansatz(params):
qml.evolve(H)(params, (0.2, 0.4))
return qml.expval(qml.PauliY(1))
qnode = qml.QNode(ansatz, dev, interface="jax", diff_method=qml.gradients.stoch_pulse_grad)
The program takes the two parameters :math:`v_1, v_2` for the two trainable terms:
>>> params = [jax.numpy.array(0.4), jax.numpy.array(1.3)]
>>> qnode(params)
Array(-0.0905377, dtype=float64)
And as we registered the differentiation method :func:`~.stoch_pulse_grad`,
we can compute its gradient in a hardware compatible manner:
>>> jax.grad(qnode)(params)
[Array(0.00109782, dtype=float64, weak_type=True),
Array(-0.05833371, dtype=float64, weak_type=True)] # results may differ
Note that the derivative is computed using a stochastic parameter-shift rule,
which is based on a sampled approximation of an integral expression (see theoretical
background below). This makes the computed derivative an approximate quantity subject
to statistical fluctuations with notable variance. The number of samples used to
approximate the integral can be chosen with ``num_split_times``, the seed for the
sampling can be fixed with ``sampler_seed``:
.. code-block:: python
qnode = qml.QNode(
ansatz,
dev,
interface="jax",
diff_method=qml.gradients.stoch_pulse_grad,
num_split_times=5, # Use 5 samples for the approximation
sampler_seed=18, # Fix randomness seed
)
>>> jax.grad(qnode)(params)
[Array(0.00207256, dtype=float64, weak_type=True),
Array(-0.05989856, dtype=float64, weak_type=True)]
We may activate the option ``use_broadcasting`` to improve the performance when running
on classical simulators. Internally, it reuses intermediate results of the time evolution.
We can compare the performance with a simple test:
.. code-block:: python
from time import process_time
faster_grad_qnode = qml.QNode(
ansatz,
dev,
interface="jax",
diff_method=qml.gradients.stoch_pulse_grad,
num_split_times=5, # Use 5 samples for the approximation
sampler_seed=18, # Fix randomness seed
use_broadcasting=True, # Activate broadcasting
)
times = []
for node in [qnode, faster_grad_qnode]:
start = process_time()
jax.grad(node)(params)
times.append(process_time() - start)
>>> print(times) # Show the gradient computation times in seconds.
[55.75785480000002, 12.297400500000009]
.. warning::
As the option ``use_broadcasting=True`` adds a broadcasting dimension to the modified
circuits, it is not compatible with circuits that already are broadcasted.
.. details::
:title: Theoretical background
:href: theory
Consider a pulse generated by a time-dependent Hamiltonian
.. math::
H(\boldsymbol{v}, t) = H_\text{drift} + \sum_j f_j(v_j, t) H_j,
where :math:`\boldsymbol{v}=\{v_j\}` are variational parameters and :math:`t` is the time.
In addition, consider a cost function that is based on using this pulse for
a duration :math:`T`
in a pulse sequence and measuring the expectation value of an observable.
For simplicity we absorb the parts of the sequence
before and after the considered pulse into the initial state and the observable,
respectively:
.. math::
C(\boldsymbol{v}, t) =
\bra{\psi_0} U_{\boldsymbol{v}}(T, 0)^\dagger B U_{\boldsymbol{v}}(T, 0)\ket{\psi_0}.
Here, we denoted the unitary evolution under :math:`H(\boldsymbol{v}, t)` from time
:math:`t_1` to :math:`t_2` as :math:`U_{\boldsymbol{v}(t_2, t_1)}`.
Then the derivative of :math:`C` with respect to a specific parameter :math:`v_k`
is given by (see Eqn. (6) of `Leng et al. (2022) <https://arxiv.org/abs/2210.15812>`_)
.. math::
\frac{\partial C}{\partial v_k}
= \int_{0}^{T} \mathrm{d}\tau \sum_{j=1}^m
\frac{\partial f_j}{\partial v_k}(\boldsymbol{v}, \tau)
\widetilde{C_j}(\boldsymbol{v}, \tau).
Here, the integral ranges over the duration of the pulse, the partial derivatives of
the coefficient functions, :math:`\partial f_j / \partial v_k`, are computed classically,
and :math:`\widetilde{C_j}` is a linear combination of the results from modified pulse
sequence executions based on generalized parameter-shift rules
(see e.g. `Kyriienko and Elfving (2022) <https://arxiv.org/abs/2108.01218>`_ or
`Wierichs et al. (2022) <https://doi.org/10.22331/q-2022-03-30-677>`_ for more details
and :func:`~.param_shift` for an implementation of the non-stochastic generalized shift
rules)
Given the parameter shift rule with coefficients :math:`\{y_\ell\}` and shifts
:math:`\{x_\ell\}` for the single-parameter pulse :math:`\exp(-i \theta H_j)`,
the linear combination is given by
.. math::
\widetilde{C_j}(\boldsymbol{v}, \tau)&=\sum_{\ell=1} y_\ell
\bra{\psi_{j}(\boldsymbol{v}, x_\ell, \tau)} B
\ket{\psi_{j}(\boldsymbol{v}, x_\ell, \tau)} \\
\ket{\psi_{j}(\boldsymbol{v}, x_\ell, \tau)}
&= U_{\boldsymbol{v}}(T, \tau) e^{-i x_\ell H_j}
U_{\boldsymbol{v}}(\tau, 0)\ket{\psi_0}.
In practice, the time integral over :math:`\tau` is computed by sampling values for
the time, evaluating the integrand, and averaging appropriately. The probability
distribution used for the sampling may have a significant impact on the quality of the
obtained estimates, in particular with regards to their variance.
In this function, a uniform distribution over the interval :math:`[0, t]` is used,
which often can be improved upon.
**Examples**
Consider the pulse generated by
.. math::
H(\boldsymbol{v}, t) = \frac{1}{2} X_0 + v_1 Z_0 Z_1 + \sin(v_2 t) X_1
and the observable :math:`B=Y_1`. There are two variational parameters, :math:`v_1`
and :math:`v_2`, for which we may compute the derivative of the cost function:
.. math::
\frac{\partial C}{\partial v_1}
&= \int_{0}^{T} \mathrm{d}\tau \ \widetilde{C_1}((v_1, v_2), \tau)\\
\frac{\partial C}{\partial v_2}
&= \int_{0}^{T} \mathrm{d}\tau \cos(v_2 \tau) \tau \ \widetilde{C_2}((v_1, v_2), \tau)\\
\widetilde{C_j}((v_1, v_2), \tau)&=
\bra{\psi_{j}((v_1, v_2), \pi/4, \tau)} B
\ket{\psi_{j}((v_1, v_2), \pi/4, \tau)}\\
&-\bra{\psi_{j}((v_1, v_2), -\pi/4, \tau)} B
\ket{\psi_{j}((v_1, v_2), -\pi/4, \tau)} \\
\ket{\psi_{j}((v_1, v_2), x, \tau)}
&= U_{(v_1, v_2)}(T, \tau) e^{-i x H_j}U_{(v_1, v_2)}(\tau, 0)\ket{0}.
Here we used the partial derivatives
.. math::
\frac{\partial f_1}{\partial v_1}&= 1\\
\frac{\partial f_2}{\partial v_2}&= \cos(v_2 t) t \\
\frac{\partial f_1}{\partial v_2}=
\frac{\partial f_2}{\partial v_1}&= 0
and the fact that both :math:`H_1=Z_0 Z_1` and :math:`H_2=X_1`
have two unique eigenvalues and therefore admit a two-term parameter-shift rule
(see e.g. `Schuld et al. (2018) <https://arxiv.org/abs/1811.11184>`_).
As a second scenario, consider the single-qubit pulse generated by
.. math::
H((v_1, v_2), t) = v_1 \sin(v_2 t) X
together with the observable :math:`B=Z`.
You may already notice that this pulse can be rewritten as a :class:`~.RX` rotation,
because we have a single Hamiltonian term and the spectrum of :math:`H` consequently
will be constant up to rescaling.
In particular, the unitary time evolution under the Schrödinger equation is given by
.. math::
U_{(v_1, v_2)}(t_2, t_1) &=
\exp\left(-i\int_{t_1}^{t_2} \mathrm{d}\tau v_1 \sin(v_2 \tau) X\right)\\
&=\exp(-i\theta(v_1, v_2) X)\\
\theta(v_1, v_2) &= \int_{t_1}^{t_2} \mathrm{d}\tau v_1 \sin(v_2 \tau)\\
&=-\frac{v_1}{v_2}(\cos(v_2 t_2) - \cos(v_2 t_1)).
As the ``RX`` rotation satisfies a (non-stochastic) two-term parameter-shift rule,
we could compute the derivatives with respect to :math:`v_1` and :math:`v_2` by
implementing :math:`\exp(-i\theta(v_1, v_2) X)`, applying the two-term shift rule
and evaluating the classical Jacobian of the mapping :math:`\theta(v_1, v_2)`.
Using the stochastic parameter-shift rule instead will lead to approximation errors.
This is because the approximated integral not only includes the shifted circuit
evaluations, which do not depend on :math:`\tau` in this example, but also on the
classical Jacobian, which is *not* constant over :math:`\tau`.
Therefore, it is important to implement pulses in the simplest way possible.
"""
# pylint:disable=unused-argument
transform_name = "stochastic pulse parameter-shift"
_assert_has_jax(transform_name)
assert_no_state_returns(tape.measurements, transform_name)
assert_no_variance(tape.measurements, transform_name)
assert_no_tape_batching(tape, transform_name)
if num_split_times < 1:
raise ValueError(
"Expected a positive number of samples for the stochastic pulse "
f"parameter-shift gradient, got {num_split_times}."
)
if argnum is None and not tape.trainable_params:
return _no_trainable_grad(tape)
if use_broadcasting and tape.batch_size is not None:
raise ValueError("Broadcasting is not supported for tapes that already are broadcasted.")
diff_methods = gradient_analysis_and_validation(tape, "analytic", grad_fn=stoch_pulse_grad)
if all(g == "0" for g in diff_methods):
return _all_zero_grad(tape)
method_map = choose_grad_methods(diff_methods, argnum)
argnum = [i for i, dm in method_map.items() if dm == "A"]
sampler_seed = sampler_seed or np.random.randint(18421)
key = jax.random.PRNGKey(sampler_seed)
return _expval_stoch_pulse_grad(tape, argnum, num_split_times, key, use_broadcasting)
def _generate_tapes_and_cjacs(
tape, operation, key, num_split_times, use_broadcasting, par_idx=None
):
"""Generate the tapes and compute the classical Jacobians for one given
generating Hamiltonian term of one pulse.
Args:
tape (QuantumScript): Tape for which to compute the stochastic pulse parameter-shift
gradient tapes.
operation (tuple[Operation, int, int]): Information about the pulse operation to be
shifted. The first entry is the operation itself, the second entry is its position
in the ``tape``, and the third entry is the index of the differentiated parameter
(and generating term) within the ``HardwareHamiltonian`` of the operation.
key (tuple[int]): Randomness key to create spliting times.
num_split_times (int): Number of splitting times at which to create shifted tapes for
the stochastic shift rule.
use_broadcasting (bool): Whether to use broadcasting in the shift rule or not.
Returns:
list[QuantumScript]: Gradient tapes for the indicated operation and Hamiltonian term.
list[tensor_like]: Classical Jacobian at the splitting times for the given parameter.
float: Prefactor for the Monte Carlo estimate of the integral in the stochastic shift rule.
tensor_like: Parameter-shift coefficients for the shift rule of the indicated term.
"""
op, op_idx, term_idx = operation
coeff, ob = op.H.coeffs_parametrized[term_idx], op.H.ops_parametrized[term_idx]
if par_idx is None:
cjac_fn = jax.jacobian(coeff, argnums=0)
else:
# For `par_idx is not None`, we need to extract the entry of the coefficient
# Jacobian that belongs to the parameter of interest. This only happens when
# more than one parameter effectively feeds into one coefficient (HardwareHamiltonian)
def cjac_fn(params, t):
return jax.jacobian(coeff, argnums=0)(params, t)[par_idx]
t0, *_, t1 = op.t
taus = jnp.sort(jax.random.uniform(key, shape=(num_split_times,)) * (t1 - t0) + t0)
if isinstance(op.H, HardwareHamiltonian):
op_data = op.H.reorder_fn(op.data, op.H.coeffs_parametrized)
else:
op_data = op.data
cjacs = [cjac_fn(op_data[term_idx], tau) for tau in taus]
if use_broadcasting:
split_evolve_ops, psr_coeffs = _split_evol_ops(op, ob, taus)
tapes = _split_evol_tape(tape, split_evolve_ops, op_idx)
else:
tapes = []
for tau in taus:
split_evolve_ops, psr_coeffs = _split_evol_ops(op, ob, tau)
tapes.extend(_split_evol_tape(tape, split_evolve_ops, op_idx))
int_prefactor = (t1 - t0) / num_split_times
return tapes, cjacs, int_prefactor, psr_coeffs
def _tapes_data_hardware(tape, operation, key, num_split_times, use_broadcasting):
"""Create tapes and gradient data for a trainable parameter of a HardwareHamiltonian,
taking into account its reordering function.
Args:
tape (QuantumScript): Tape for which to compute the stochastic pulse parameter-shift
gradient tapes.
operation (tuple[Operation, int, int]): Information about the pulse operation to be
shifted. The first entry is the operation itself, the second entry is its position
in the ``tape``, and the third entry is the index of the differentiated parameter
within the ``HardwareHamiltonian`` of the operation.
key (tuple[int]): Randomness key to create spliting times in ``_generate_tapes_and_cjacs``
num_split_times (int): Number of splitting times at which to create shifted tapes for
the stochastic shift rule.
use_broadcasting (bool): Whether to use broadcasting in the shift rule or not.
Returns:
list[QuantumScript]: Gradient tapes for the indicated operation and Hamiltonian term.
tuple: Gradient postprocessing data.
See comment below.
This function analyses the ``reorder_fn`` of the ``HardwareHamiltonian`` of the pulse
that is being differentiated. Given a ``term_idx``, the index of the parameter
in the Hamiltonian, stochastic parameter shift tapes are created for all terms in the
Hamiltonian into which the parameter feeds. While this is a one-to-one relation for
standard ``ParametrizedHamiltonian`` objects, the reordering function of
the ``HardwareHamiltonian`` requires to create tapes for multiple Hamiltonian terms,
and for each term ``_generate_tapes_and_cjacs`` is called.
The returned gradient data has four entries:
1. ``int``: Total number of tapes created for all the terms that depend on the indicated
parameter.
2. ``tuple[tensor_like]``: Classical Jacobians for all terms and splitting times
3. ``float``: Prefactor for the Monte Carlo estimate of the integral in the stochastic
shift rule.
4. ``tuple[tensor_like]``: Parameter-shift coefficients for all terms.
The tuple axes in the second and fourth entry correspond to the different terms in the
Hamiltonian.
"""
op, op_idx, term_idx = operation
# Map a simple enumeration of numbers from HardwareHamiltonian input parameters to
# ParametrizedHamiltonian parameters. This is typically a fan-out function.
fake_params, allowed_outputs = np.arange(op.num_params), set(range(op.num_params))
reordered = op.H.reorder_fn(fake_params, op.H.coeffs_parametrized)
def _raise():
raise ValueError(
"Only permutations, fan-out or fan-in functions are allowed as reordering functions "
"in HardwareHamiltonians treated by stoch_pulse_grad. The reordering function of "
f"{op.H} mapped {fake_params} to {reordered}."
)
cjacs, tapes, psr_coeffs = [], [], []
for coeff_idx, x in enumerate(reordered):
# Find out whether the value term_idx, corresponding to the current parameter of interest,
# has been mapped to x (for scalar x) or into x (for 1d x). If so, generate tapes and data
# Also check that only allowed outputs have been produced by the reordering function.
if not hasattr(x, "__len__"):
if x not in allowed_outputs:
_raise()
if x != term_idx:
continue
cjac_idx = None
else:
if not all(_x in list(range(op.num_params)) for _x in x):
_raise()
if term_idx not in x:
continue
cjac_idx = np.argwhere([_x == term_idx for _x in x])[0][0]
_operation = (op, op_idx, coeff_idx)
# Overwriting int_prefactor does not matter, it is equal for all parameters in this op,
# because it only consists of the duration `op.t[-1]-op.t[0]` and `num_split_times`
_tapes, _cjacs, int_prefactor, _psr_coeffs = _generate_tapes_and_cjacs(
tape, _operation, key, num_split_times, use_broadcasting, cjac_idx
)
cjacs.append(qml.math.stack(_cjacs))
tapes.extend(_tapes)
psr_coeffs.append(_psr_coeffs)
# The fact that psr_coeffs are a tuple only for hardware Hamiltonian generators will be
# used in `_parshift_and_integrate`.
data = (len(tapes), tuple(cjacs), int_prefactor, tuple(psr_coeffs))
return tapes, data
# pylint: disable=too-many-arguments
def _expval_stoch_pulse_grad(tape, argnum, num_split_times, key, use_broadcasting):
r"""Compute the gradient of a quantum circuit composed of pulse sequences that measures
an expectation value or probabilities, by applying the stochastic parameter shift rule.
See the main function for the signature.
"""
tapes = []
gradient_data = []
for idx, trainable_idx in enumerate(tape.trainable_params):
if trainable_idx not in argnum:
# Only the number of tapes is needed to indicate a zero gradient entry
gradient_data.append((0, None, None, None))
continue
key, _key = jax.random.split(key)
operation = tape.get_operation(idx)
op, *_ = operation
if not isinstance(op, ParametrizedEvolution):
raise ValueError(
"stoch_pulse_grad does not support differentiating parameters of "
"other operations than pulses."
)
if isinstance(op.H, HardwareHamiltonian):
# Treat HardwareHamiltonians separately because they have a reordering function
_tapes, data = _tapes_data_hardware(
tape, operation, key, num_split_times, use_broadcasting
)
else:
_tapes, cjacs, int_prefactor, psr_coeffs = _generate_tapes_and_cjacs(
tape, operation, _key, num_split_times, use_broadcasting
)
data = (len(_tapes), qml.math.stack(cjacs), int_prefactor, psr_coeffs)
tapes.extend(_tapes)
gradient_data.append(data)
num_measurements = len(tape.measurements)
single_measure = num_measurements == 1
num_params = len(tape.trainable_params)
has_partitioned_shots = tape.shots.has_partitioned_shots
tape_specs = (single_measure, num_params, num_measurements, tape.shots)
def processing_fn(results):
start = 0
grads = []
for num_tapes, cjacs, int_prefactor, psr_coeffs in gradient_data:
if num_tapes == 0:
grads.append(None)
continue
res = results[start : start + num_tapes]
start += num_tapes
# Apply the postprocessing of the parameter-shift rule and contract
# with classical Jacobian, effectively computing the integral approximation
g = _parshift_and_integrate(
res,
cjacs,
int_prefactor,
psr_coeffs,
single_measure,
has_partitioned_shots,
use_broadcasting,
)
grads.append(g)
# g will have been defined at least once (because otherwise all gradients would have
# been zero), providing a representative for a zero gradient to emulate its type/shape.
zero_rep = _make_zero_rep(g, single_measure, has_partitioned_shots)
# Fill in zero-valued gradients
grads = [zero_rep if g is None else g for g in grads]
return reorder_grads(grads, tape_specs)
return tapes, processing_fn
def expand_invalid_trainable_stoch_pulse_grad(x, *args, **kwargs):
r"""Do not expand any operation. We expect the ``stoch_pulse_grad`` to be used
on pulse programs and we do not expect decomposition pipelines between pulses
and gate-based circuits yet.
"""
# pylint:disable=unused-argument
return x
stoch_pulse_grad = gradient_transform(
_stoch_pulse_grad, expand_fn=expand_invalid_trainable_stoch_pulse_grad
)
@stoch_pulse_grad.custom_qnode_wrapper
def stoch_pulse_grad_qnode_wrapper(self, qnode, targs, tkwargs):
"""A custom QNode wrapper for the gradient transform :func:`~.stoch_pulse_grad`.
It raises an error, so that applying ``pulse_generator`` to a ``QNode`` directly
is not supported.
"""
# pylint:disable=unused-argument
transform_name = "stochastic pulse parameter-shift"
raise_pulse_diff_on_qnode(transform_name)
|
02f92cd1433be22dfcc103ab74fc32554bc9bd81
|
b2fef77e77f77b6cfd83da4ec2f89cbe73330844
|
/tests/test_copy_itemsd.py
|
ff4799a094ae361ba56ca8a793908619418ec439
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAI
|
8ef2593cc5fd1cd16e13464f927fe563fe3f5bac
|
e48c3e2c741fa3fc705c4425d17ac4a5afac6c47
|
refs/heads/dev
| 2023-09-02T00:21:04.532596
| 2023-09-01T06:46:45
| 2023-09-01T06:46:45
| 214,485,001
| 4,805
| 996
|
Apache-2.0
| 2023-09-14T15:19:30
| 2019-10-11T16:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,810
|
py
|
test_copy_itemsd.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks import eval_mode
from monai.transforms import CopyItemsd
from monai.utils import ensure_tuple
from tests.utils import assert_allclose
TEST_CASE_1 = ["img", 1, "img_1"]
TEST_CASE_2 = [["img", "seg"], 1, ["img_1", "seg_1"]]
TEST_CASE_3 = ["img", 2, ["img_1", "img_2"]]
TEST_CASE_4 = [["img", "seg"], 2, ["img_1", "seg_1", "img_2", "seg_2"]]
class TestCopyItemsd(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])
def test_numpy_values(self, keys, times, names):
input_data = {"img": np.array([[0, 1], [1, 2]]), "seg": np.array([[3, 4], [4, 5]])}
result = CopyItemsd(keys=keys, times=times, names=names)(input_data)
for name in ensure_tuple(names):
self.assertTrue(name in result)
result["img_1"] += 1
np.testing.assert_allclose(result["img_1"], np.array([[1, 2], [2, 3]]))
np.testing.assert_allclose(result["img"], np.array([[0, 1], [1, 2]]))
def test_default_names(self):
input_data = {"img": np.array([[0, 1], [1, 2]]), "seg": np.array([[3, 4], [4, 5]])}
result = CopyItemsd(keys=["img", "seg"], times=2, names=None)(input_data)
for name in ["img_0", "seg_0", "img_1", "seg_1"]:
self.assertTrue(name in result)
def test_tensor_values(self):
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu:0")
input_data = {
"img": torch.tensor([[0, 1], [1, 2]], device=device),
"seg": torch.tensor([[0, 1], [1, 2]], device=device),
}
# test default `times=1`
result = CopyItemsd(keys="img", names="img_1")(input_data)
self.assertTrue("img_1" in result)
result["img_1"] += 1
assert_allclose(result["img"], torch.tensor([[0, 1], [1, 2]], device=device))
assert_allclose(result["img_1"], torch.tensor([[1, 2], [2, 3]], device=device))
def test_array_values(self):
input_data = {"img": [[0, 1], [1, 2]], "seg": [[0, 1], [1, 2]]}
result = CopyItemsd(keys="img", times=1, names="img_1")(input_data)
self.assertTrue("img_1" in result)
result["img_1"][0][0] += 1
np.testing.assert_allclose(result["img"], [[0, 1], [1, 2]])
np.testing.assert_allclose(result["img_1"], [[1, 1], [1, 2]])
def test_graph_tensor_values(self):
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu:0")
net = torch.nn.PReLU().to(device)
with eval_mode(net):
pred = net(torch.tensor([[0.0, 1.0], [1.0, 2.0]], device=device))
input_data = {"pred": pred, "seg": torch.tensor([[0.0, 1.0], [1.0, 2.0]], device=device)}
result = CopyItemsd(keys="pred", times=1, names="pred_1")(input_data)
self.assertTrue("pred_1" in result)
result["pred_1"] += 1.0
assert_allclose(result["pred"], torch.tensor([[0.0, 1.0], [1.0, 2.0]], device=device))
assert_allclose(result["pred_1"], torch.tensor([[1.0, 2.0], [2.0, 3.0]], device=device))
if __name__ == "__main__":
unittest.main()
|
b309af7c218262b68155297d5926d3efc0555cfc
|
4dc59af25e81526cf3724699b03e262d201015ec
|
/usdmanager/preferences_dialog.py
|
eb7cb7ebb8584bb62fb576b94265d1c05f37f097
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
dreamworksanimation/usdmanager
|
93935c73e7f0143aa2ec2c8027c6ff3d13603a08
|
6c276c4e4cb08994047e2432e86e332d2726c3b2
|
refs/heads/master
| 2022-10-11T06:56:01.746501
| 2022-08-02T20:53:35
| 2022-09-28T20:41:37
| 157,460,021
| 288
| 48
|
Apache-2.0
| 2022-06-17T00:24:59
| 2018-11-13T23:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 15,554
|
py
|
preferences_dialog.py
|
#
# Copyright 2018 DreamWorks Animation L.L.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Create a Preferences dialog.
"""
from Qt.QtCore import Slot, QRegExp
from Qt.QtGui import QRegExpValidator
from Qt.QtWidgets import QAbstractButton, QDialog, QDialogButtonBox, QFontDialog, QLineEdit, QMessageBox, QVBoxLayout
from .constants import LINE_LIMIT
from .utils import icon, loadUiWidget
class PreferencesDialog(QDialog):
"""
Preferences dialog
"""
def __init__(self, parent, **kwargs):
""" Initialize the dialog.
:Parameters:
parent : `UsdMngrWindow`
Main window
"""
super(PreferencesDialog, self).__init__(parent, **kwargs)
self.docFont = parent.tabWidget.font()
self.fileAssociations = {}
self.lineEditProgs = []
self.lineEditExts = []
self.setupUi()
self.connectSignals()
def setupUi(self):
""" Creates and lays out the widgets defined in the ui file.
"""
self.baseInstance = loadUiWidget("preferences_dialog.ui", self)
self.setWindowIcon(icon("preferences-system"))
self.buttonFont.setIcon(icon("preferences-desktop-font"))
self.buttonNewProg.setIcon(icon("list-add"))
# ----- General tab -----
# Set initial preferences.
parent = self.parent()
self.checkBox_parseLinks.setChecked(parent.preferences['parseLinks'])
self.checkBox_newTab.setChecked(parent.preferences['newTab'])
self.checkBox_syntaxHighlighting.setChecked(parent.preferences['syntaxHighlighting'])
self.checkBox_teletypeConversion.setChecked(parent.preferences['teletype'])
self.checkBox_lineNumbers.setChecked(parent.preferences['lineNumbers'])
self.checkBox_showAllMessages.setChecked(parent.preferences['showAllMessages'])
self.checkBox_showHiddenFiles.setChecked(parent.preferences['showHiddenFiles'])
self.checkBox_autoCompleteAddressBar.setChecked(parent.preferences['autoCompleteAddressBar'])
self.useSpacesCheckBox.setChecked(parent.preferences['useSpaces'])
self.useSpacesSpinBox.setValue(parent.preferences['tabSpaces'])
self.lineEditTextEditor.setText(parent.preferences['textEditor'])
self.lineEditDiffTool.setText(parent.preferences['diffTool'])
self.themeWidget.setChecked(parent.preferences['theme'] == "dark")
self.lineLimitSpinBox.setValue(parent.preferences['lineLimit'])
self.checkBox_autoIndent.setChecked(parent.preferences['autoIndent'])
self.updateFontLabel()
# ----- Programs tab -----
self.progLayout = QVBoxLayout()
self.extLayout = QVBoxLayout()
# Extensions can only be: <optional .><alphanumeric><optional comma><optional space>
#self.progValidator = QRegExpValidator(QRegExp("[\w,. ]+"), self)
self.extValidator = QRegExpValidator(QRegExp(r"(?:\.?\w*,?\s*)+"), self)
self.lineEdit.setValidator(self.extValidator)
# Create the fields for programs and extensions.
self.populateProgsAndExts(parent.programs)
def connectSignals(self):
""" Connect signals to slots.
"""
self.buttonBox.clicked.connect(self.restoreDefaults)
self.buttonNewProg.clicked.connect(self.newProgField)
self.buttonBox.accepted.connect(self.validate)
self.buttonFont.clicked.connect(self.selectFont)
def deleteItems(self, layout):
""" Delete all items in given layout.
:Parameters:
layout : `QLayout`
Layout to delete items from
"""
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.deleteItems(item.layout())
def getPrefFont(self):
""" Get the user preference for font.
:Returns:
Font selected for documents.
:Rtype:
`QFont`
"""
return self.docFont
def getPrefLineNumbers(self):
""" Get the user preference for displaying line numbers.
:Returns:
State of "Show line numbers" check box.
:Rtype:
`bool`
"""
return self.checkBox_lineNumbers.isChecked()
def getPrefNewTab(self):
""" Get the user preference for opening links in a new tab or not.
:Returns:
State of "Open links in new tabs" check box.
:Rtype:
`bool`
"""
return self.checkBox_newTab.isChecked()
def getPrefParseLinks(self):
""" Get the user preference to enable link parsing.
:Returns:
Search for links in the opened file.
Disable this for huge files that freeze the app.
:Rtype:
`bool`
"""
return self.checkBox_parseLinks.isChecked()
def getPrefPrograms(self):
""" Get the user preference for file extensions and apps to open them with.
:Returns:
Dictionary of extension: program pairs of strings.
:Rtype:
`dict`
"""
return self.fileAssociations
def getPrefShowAllMessages(self):
""" Get the user preference to display all messages or just errors.
:Returns:
State of "Show success messages" check box.
:Rtype:
`bool`
"""
return self.checkBox_showAllMessages.isChecked()
def getPrefShowHiddenFiles(self):
""" Get the user preference for showing hidden files by default.
:Returns:
State of "Show hidden files" check box.
:Rtype:
`bool`
"""
return self.checkBox_showHiddenFiles.isChecked()
def getPrefAutoCompleteAddressBar(self):
""" Get the user preference for enabling address bar auto-completion.
:Returns:
State of "Auto complete paths in address bar" check box.
:Rtype:
`bool`
"""
return self.checkBox_autoCompleteAddressBar.isChecked()
def getPrefLineLimit(self):
""" Get the user preference for line limit before truncating files.
:Returns:
Number of lines to display before truncating a file.
:Rtype:
`int`
"""
return self.lineLimitSpinBox.value()
def getPrefSyntaxHighlighting(self):
""" Get the user preference to enable syntax highlighting.
:Returns:
State of "Enable syntax highlighting" check box.
:Rtype:
`bool`
"""
return self.checkBox_syntaxHighlighting.isChecked()
def getPrefTeletypeConversion(self):
""" Get the user preference to enable teletype character conversion.
:Returns:
State of "Display teletype colors" check box.
:Rtype:
`bool`
"""
return self.checkBox_teletypeConversion.isChecked()
def getPrefTextEditor(self):
""" Get the user-preferred text editor.
:Returns:
Text in Text editor QTextEdit.
:Rtype:
`str`
"""
return self.lineEditTextEditor.text()
def getPrefTheme(self):
""" Get the selected theme.
We may eventually make this a combo box supporting multiple themes,
so use the string name instead of just a boolean.
:Returns:
Selected theme name, or None if the default
:Rtype:
`str` | None
"""
return "dark" if self.themeWidget.isChecked() else None
def getPrefUseSpaces(self):
""" Get the user preference for spaces vs. tabs.
:Returns:
State of "Use spaces instead of tabs" check box.
:Rtype:
`bool`
"""
return self.useSpacesCheckBox.isChecked()
def getPrefTabSpaces(self):
""" Get the user preference for number of spaces equaling a tab.
:Returns:
Number of spaces to use instead of a tab.
Only use this number of use spaces is also True.
:Rtype:
`int`
"""
return self.useSpacesSpinBox.value()
def getPrefAutoIndent(self):
""" Get the user preference for auto-indentation.
:Returns:
State of "Use auto indentation" check box.
:Rtype:
`bool`
"""
return self.checkBox_autoIndent.isChecked()
def getPrefDiffTool(self):
""" Get the user preference for diff tool.
:Returns:
Text in Diff tool QTextEdit.
:Rtype:
`str`
"""
return self.lineEditDiffTool.text()
@Slot(bool)
def newProgField(self, *args):
""" Add a new line to the programs list.
"""
self.lineEditProgs.append(QLineEdit(self))
self.progLayout.addWidget(self.lineEditProgs[len(self.lineEditProgs)-1])
self.lineEditExts.append(QLineEdit(self))
self.extLayout.addWidget(self.lineEditExts[len(self.lineEditExts)-1])
def populateProgsAndExts(self, programs):
""" Fill out the UI with the user preference for programs and extensions.
:Parameters:
programs : `dict`
Dictionary of extension: program pairs of strings.
"""
self.lineEditProgs = []
self.lineEditExts = []
# Get unique programs.
tmpSet = set()
progs = [x for x in programs.values() if x not in tmpSet and not tmpSet.add(x)]
del tmpSet
progs.sort()
# Get extensions per program.
exts = []
for prog in progs:
# Find each extension matching this program.
progExts = ["."+x for x in programs if programs[x] == prog]
progExts.sort()
# Format in comma-separated list for display.
exts.append(", ".join(progExts))
# Put the files that should open with this app in their own place.
# Then remove them from these lists.
index = progs.index("")
progs.pop(index)
self.lineEdit.setText(exts[index])
exts.pop(index)
del index
for i, prog in enumerate(progs):
# Create and populate two QLineEdit objects per extension: program pair.
self.lineEditProgs.append(QLineEdit(prog, self))
#self.lineEditProgs[i].setValidator(self.progValidator)
self.progLayout.addWidget(self.lineEditProgs[i])
self.lineEditExts.append(QLineEdit(exts[i], self))
self.lineEditExts[i].setValidator(self.extValidator)
self.extLayout.addWidget(self.lineEditExts[i])
self.progWidget.setLayout(self.progLayout)
self.extWidget.setLayout(self.extLayout)
@Slot(QAbstractButton)
def restoreDefaults(self, btn):
""" Restore the GUI to the program's default settings.
Don't update the actual preferences (that happens if OK is pressed).
"""
if btn == self.buttonBox.button(QDialogButtonBox.RestoreDefaults):
# Delete old QLineEdit objects.
self.deleteItems(self.progLayout)
self.deleteItems(self.extLayout)
# Set other preferences in the GUI.
default = self.parent().window().app.DEFAULTS
self.checkBox_parseLinks.setChecked(default['parseLinks'])
self.checkBox_newTab.setChecked(default['newTab'])
self.checkBox_syntaxHighlighting.setChecked(default['syntaxHighlighting'])
self.checkBox_teletypeConversion.setChecked(default['teletype'])
self.checkBox_lineNumbers.setChecked(default['lineNumbers'])
self.checkBox_showAllMessages.setChecked(default['showAllMessages'])
self.checkBox_showHiddenFiles.setChecked(default['showHiddenFiles'])
self.checkBox_autoCompleteAddressBar.setChecked(default['autoCompleteAddressBar'])
self.lineEditTextEditor.setText(default['textEditor'])
self.lineEditDiffTool.setText(default['diffTool'])
self.useSpacesCheckBox.setChecked(default['useSpaces'])
self.useSpacesSpinBox.setValue(default['tabSpaces'])
self.themeWidget.setChecked(False)
self.docFont = default['font']
self.updateFontLabel()
self.lineLimitSpinBox.setValue(default['lineLimit'])
self.checkBox_autoIndent.setChecked(default['autoIndent'])
# Re-create file association fields with the default programs.
self.populateProgsAndExts(self.parent().defaultPrograms)
@Slot(bool)
def selectFont(self, *args):
""" Update the user's font preference.
"""
font, ok = QFontDialog.getFont(self.docFont, self, "Select Font")
if ok:
self.docFont = font
self.updateFontLabel()
def updateFontLabel(self):
""" Update the UI font label to show the user's selected font.
"""
bold = "Bold " if self.docFont.bold() else ""
italic = "Italic " if self.docFont.italic() else ""
self.labelFont.setText("Document font: {}pt {}{}{}".format(self.docFont.pointSize(), bold, italic,
self.docFont.family()))
@Slot()
def validate(self):
""" Make sure everything has valid input.
Make sure there are no duplicate extensions.
Accepts or rejects accepted() signal accordingly.
"""
for lineEdit in self.lineEditExts:
if lineEdit.hasAcceptableInput():
lineEdit.setStyleSheet("background-color:none")
else:
lineEdit.setStyleSheet("background-color:salmon")
QMessageBox.warning(self, "Warning", "One or more extension is invalid.")
return
# Get file extensions for this app to handle.
extText = self.lineEdit.text()
# Strip out periods and spaces.
extText = extText.replace(' ', '').replace('.', '')
progList = [[x, ""] for x in extText.split(',') if x]
for i in range(len(self.lineEditProgs)):
extText = self.lineEditExts[i].text()
progText = self.lineEditProgs[i].text()
extText = extText.replace(' ', '').replace('.', '')
for ext in extText.split(','):
if ext:
progList.append([ext, progText])
# Make sure there aren't any duplicate extensions.
tmpSet = set()
uniqueExt = [ext for ext, prog in progList if ext not in tmpSet and not tmpSet.add(ext)]
if len(uniqueExt) == len(progList):
self.fileAssociations = dict(progList)
else:
QMessageBox.warning(self, "Warning", "You have entered the same extension for two or more programs.")
return
# Accept if we made it this far.
self.accept()
|
104cb6b9cd50be410c503cc3054fb7df79722180
|
2909c14ac6232b4867a3aca166945634da5f9b7a
|
/tests/functional_tests/config_public.py
|
90fc75966ce5ae769e6834376a985736fe476dfa
|
[
"MIT"
] |
permissive
|
authomatic/authomatic
|
4c66cd6941e4309c8226ee935261ce2bd3bebde8
|
290e62e572f038fbd01d686a4556629f72037c15
|
refs/heads/master
| 2023-09-03T04:03:55.755212
| 2023-06-09T10:55:35
| 2023-06-09T10:55:35
| 8,073,983
| 289
| 67
|
MIT
| 2023-06-09T10:55:36
| 2013-02-07T14:13:35
|
Python
|
UTF-8
|
Python
| false
| false
| 6,154
|
py
|
config_public.py
|
# -*- coding: utf-8 -*-
import datetime
import os
from pyvirtualdisplay import Display
from selenium import webdriver
import constants
# Choose and configure the browser of your choice
def get_browser():
# # These work on Mac
# return webdriver.Chrome()
# return webdriver.Firefox()
# On Linux you need to initialize a display
global display
display = Display(visible=0, size=(1024, 768))
display.start()
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--ignore-certificate-errors")
options.add_experimental_option("useAutomationExtension", False);
return webdriver.Chrome(options=options)
# If present and callable, it will be called at the end of the whole test suite
def teardown():
global display
try:
display.stop()
except NameError:
pass
# A failed login by a provider will be retried so many times as set here
MAX_LOGIN_ATTEMPTS = 3
# Multiplies the wait times set in expected values
WAIT_MULTIPLIER = 1
# Minimum wait time
MIN_WAIT = 0
# The host and port where the tested ap should listen.
HOST = '127.0.0.1'
PORT = 443
# The host alias set in the /etc/hosts file.
# The actual tests will navigate selenium browser to this host.
# This is necessary because some providers don't support localhost as the
# callback url.
HOST_ALIAS = 'authomatic.org'
# Only frameworks included here will be tested.
INCLUDE_FRAMEWORKS = [
# 'django',
'flask', # Runs with https
'pyramid', # Runs with https
]
# Only providers included here will be tested.
# Leave commented-out entries (with explanation) to prevent trying to re-add tests for services
# Which aren't testable in an automated environment.
INCLUDE_PROVIDERS = [
# OAuth 1.0a - This mostly deprecated as a service 'in the wild' - we should drop support.
# 'bitbucket',
# 'flickr',
# 'plurk',
# 'twitter',
# 'tumblr',
# 'ubuntuone', # UbuntuOne service is no longer available
# 'vimeo',
# Xero requires creation of a new trial project every month which makes
# the setup of the automated test too laborious to support it.
# 'xero',
# 'xing',
# 'yahoo',
# OAuth 2.0
# 'amazon', # Asks for a captcha (cannot be automated)
# 'behance', # doesn't support third party authorization anymore.
# 'bitly', # deprecated for test suite refactoring - consider re-enabling
# 'deviantart', # deprecated for test suite refactoring - consider re-enabling
'facebook',
# 'foursquare', # deprecated for test suite refactoring - consider re-enabling
# 'google', # deprecated for test suite refactoring - consider re-enabling
# 'github', # Asks for 2FA/one-time-pass verification in Travis CI environment.
# 'linkedin', # # Asks for verification (captcha) in the login form in Travis CI environment.
# 'paypal', # deprecated for test suite refactoring - consider re-enabling
# 'reddit', # deprecated for test suite refactoring - consider re-enabling
# 'vk', # deprecated for test suite refactoring - consider re-enabling
# 'windowslive', # Asks for verification (captcha) in the login form in Travis CI environment.
# 'yammer', # deprecated for test suite refactoring - consider re-enabling
# 'yandex', # deprecated for test suite refactoring - consider re-enabling
# OpenID
# 'openid_livejournal', # Login and password elements are not visible.
# 'openid_verisignlabs', # deprecated for test suite refactoring - consider re-enabling
# 'openid_wordpress', # deprecated for test suite refactoring - consider re-enabling
# 'openid_yahoo', # deprecated for test suite refactoring - consider re-enabling
]
# Recommended setup for Travis CI environment.
if os.environ.get('TRAVIS'):
MAX_LOGIN_ATTEMPTS = 20
WAIT_MULTIPLIER = 2
MIN_WAIT = 2
# Use these constants if you have the same user info by all tested providers.
EMAIL = 'authomaticproject@protonmail.com'
FIRST_NAME = 'Authomatic'
LAST_NAME = 'Testuser'
NAME = FIRST_NAME + ' ' + LAST_NAME
USERNAME = 'authomaticproject'
USERNAME_REVERSE = 'projectauthomatic'
NICKNAME = 'Mr. AP'
BIRTH_YEAR = 2000
BIRTH_MONTH = 5
BIRTH_DAY = 5
BIRTH_DATE = datetime.datetime(BIRTH_YEAR, BIRTH_MONTH, BIRTH_DAY)
CITY = 'London'
COUNTRY = 'Great Britain'
COUNTRY_ISO2 = 'gb'
POSTAL_CODE = 'EC1A1DH'
PHONE = '??????????'
PHONE_INTERNATIONAL = '0044??????????'
GENDER = constants.GENDER_MALE
LOCALE = 'en_UK'
LOCATION = CITY + ', ' + COUNTRY
# Common values for all providers
COMMON = {
# Could be same if the user sets it so
'user_birth_date': BIRTH_DATE,
'user_birth_day': BIRTH_DAY,
'user_birth_month': BIRTH_MONTH,
'user_birth_year': BIRTH_YEAR,
'user_login': EMAIL,
'user_email': EMAIL,
'user_first_name': FIRST_NAME,
'user_last_name': LAST_NAME,
'user_name': NAME,
'user_username': USERNAME,
'user_username_reverse': USERNAME_REVERSE,
'user_nickname': NICKNAME,
'user_birth_year': BIRTH_YEAR,
'user_city': CITY,
'user_country': COUNTRY,
'user_gender': GENDER,
'user_phone': PHONE,
'user_postal_code': POSTAL_CODE,
'user_locale': LOCALE,
'user_location': LOCATION,
# It is not a good idea to have the same password for all providers
# 'user_password': '##########',
# Provider and user specific value
# 'user_id': '',
# 'user_locale': None,
# 'user_timezone': None,
# Provider specific format
# 'user_picture': '',
# 'user_link': '',
# Provider specific value
# 'consumer_key': '',
# 'consumer_secret': '',
}
# Values from COMMON will be overridden by values from PROVIDERS[provider_name]
# if set.
# Since this file is public, only put providers in here if they aren't secret.
# Otherwise, secret providers should be added to config_secret.py[.enc]
PROVIDERS = {
# # OAuth 2.0
# 'facebook': {
# 'consumer_key': '##########',
# 'consumer_secret': '##########',
# 'user_password': '##########',
# 'user_id': '??????????',
# },
}
|
6e1dc2a23d8d460040013011702418d14708eccb
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/tkinter/label/label-FormatLabel/main.py
|
0194cc2c8617181b7dc3e1cf96669ab4a5aea248
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
main.py
|
import tkinter as tk
# --- class ---
class FormatLabel(tk.Label):
def __init__(self, master=None, cnf={}, **kw):
# default values
self._format = '{}'
self._textvariable = None
# get `format` and remove from `kw` so later `Label.__init__` doesn't get it
if 'format' in kw:
self._format = kw['format']
del kw['format']
# get `textvariable` and remove from `kw` so later `Label.__init__` doesn't get it
# Assign own function to format text in Label when variable change value
if 'textvariable' in kw:
self._textvariable = kw['textvariable']
self._textvariable.trace('w', self._update_text)
del kw['textvariable']
# run `Label.__init__` without `format` and `textvariable`
super().__init__(master, cnf={}, **kw)
# update text after running `Label.__init__`
if self._textvariable:
self._update_text(self._textvariable, '', 'w')
def _update_text(self, a, b, c):
"""update text in label when variable change value"""
#print(f'|{a}|{b}|{c}|')
self["text"] = self._format.format(self._textvariable.get())
# --- main ---
root = tk.Tk()
myvar = tk.DoubleVar(value=0.05)
label = FormatLabel(root, textvariable=myvar, format="Today: {:.0%} and growing")
#label = FormatLabel(root, textvariable=myvar, format="{:.0%}")
label.pack()
myvar.set(0.1)
root.mainloop()
|
9e7707487e1a0ef73a17595201a703920cfbc772
|
fc8ffd2f74d63024145e0628c700461765797c1c
|
/custom_components/edgeos/core/helpers/enums.py
|
ba787db69154b819ecc45489681abb99d6205223
|
[] |
no_license
|
elad-bar/ha-edgeos
|
a22edeb4429c2bd581e74cc535ce5ce3583ce84f
|
62e66ac5d67d2704cd8d602db9b5f1f490ed8f1f
|
refs/heads/master
| 2023-08-23T17:57:52.953761
| 2023-03-30T07:58:02
| 2023-03-30T07:58:02
| 169,467,285
| 129
| 31
| null | 2023-08-13T08:46:35
| 2019-02-06T19:53:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
enums.py
|
import logging
from homeassistant.backports.enum import StrEnum
class ConnectivityStatus(StrEnum):
NotConnected = "Not connected"
Connecting = "Establishing connection to API"
Connected = "Connected to the API"
TemporaryConnected = "Connected with temporary API key"
Failed = "Failed to access API"
InvalidCredentials = "Invalid credentials"
MissingAPIKey = "Permanent API Key was not found"
Disconnected = "Disconnected by the system"
NotFound = "API Not found"
@staticmethod
def get_log_level(status: StrEnum) -> int:
if status == ConnectivityStatus.Connected:
return logging.DEBUG
elif status in [ConnectivityStatus.Disconnected]:
return logging.INFO
elif status in [ConnectivityStatus.NotConnected, ConnectivityStatus.Connecting]:
return logging.WARNING
else:
return logging.ERROR
class EntityStatus(StrEnum):
EMPTY = "empty"
READY = "ready"
CREATED = "created"
DELETED = "deleted"
UPDATED = "updated"
|
288c80ae70fdcbe0784be00bb87b6e7a471daaaf
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/utils/chart.py
|
5c92aa94eda34e5a7e4b6319ba3d780ecde25ce9
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 6,130
|
py
|
chart.py
|
import re
from datetime import datetime
from django.db.models import Case, F, FloatField, IntegerField, Q, Value, When
from django.db.models.fields.related import RelatedField
from django.db.models.functions import Cast
from django_pivot.histogram import get_column_values, histogram
from clist.templatetags.extras import title_field
from utils.json_field import JSONF
from utils.math import get_divisors
def make_bins(src, dst, n_bins, logger=None, field=None, step=None):
n_bins += 1
if isinstance(src, str):
if not dst:
logger and logger.warning(f'One of border is empty, field = {field}')
return
st = ord(src[0]) + 1 if src else 32
fn = ord(dst[0])
bins = [src] + [chr(int(round(st + (fn - st) * i / (n_bins - 1)))) for i in range(n_bins)] + [dst]
else:
if step is not None:
for divisor in get_divisors(step, reverse=True):
n_src = src - src % divisor
n_dst = dst + (divisor - dst % divisor) % divisor
delta = (n_dst - n_src) / (n_bins - 1)
if divisor <= delta:
src, dst = n_src, n_dst
n_bins = (n_dst - n_src) // divisor + 1
break
bins = [src + (dst - src) * i / (n_bins - 1) for i in range(n_bins)]
if isinstance(src, int):
bins = [int(round(b)) for b in bins]
elif isinstance(src, float):
bins = [round(b, 2) for b in bins]
bins = list(sorted(set(bins)))
if isinstance(src, int) and len(bins) < n_bins:
bins.append(bins[-1] + 1)
elif len(bins) == 1:
bins.append(bins[-1])
return bins
def make_histogram(values, n_bins=None, bins=None, src=None, dst=None, deltas=None):
if bins is None:
if src is None:
src = min(values)
if dst is None:
dst = max(values)
bins = make_bins(src, dst, n_bins)
idx = 0
ret = [0] * (len(bins) - 1)
if deltas is None:
deltas = [1] * len(values)
for x, delta in sorted(zip(values, deltas)):
while idx + 1 < len(bins) and bins[idx + 1] <= x:
idx += 1
if idx == len(ret):
if bins[idx] == x:
idx -= 1
else:
break
ret[idx] += delta
return ret, bins
def make_beetween(column, value, start, end=None):
if end is None:
return When(Q(**{column + '__gte': start}), then=Value(value))
else:
return When(Q(**{column + '__gte': start, column + '__lt': end}), then=Value(value))
def make_chart(qs, field, groupby=None, logger=None, n_bins=42, cast=None, step=None, aggregations=None, bins=None,
norm_value=None):
context = {'title': title_field(field) + (f' (slice by {groupby})' if groupby else '')}
if cast == 'int':
cast = IntegerField()
elif cast == 'float':
cast = FloatField()
else:
cast = None
if '__' in field:
related_fields = set()
for f in qs.model._meta.related_objects:
related_fields.add(f.name)
for f in qs.model._meta.many_to_many:
related_fields.add(f.name)
for f in qs.model._meta.fields:
if isinstance(f, RelatedField):
related_fields.add(f.name)
related_field = field.split('__')[0]
if related_field in related_fields or '___' in field:
logger and logger.error(f'use of an invalid field = {field}')
return
cast = cast or IntegerField()
qs = qs.annotate(value=Cast(JSONF(field), cast))
else:
if cast:
qs = qs.annotate(value=Cast(F(field), cast))
else:
qs = qs.annotate(value=F(field))
context['queryset'] = qs
context['field'] = field
qs = qs.filter(value__isnull=False)
slice_on = None
if groupby == 'resource':
slice_on = 'resource__host'
elif groupby == 'country':
slice_on = 'country'
if slice_on:
values = get_column_values(qs, slice_on, choices='minimum')
fields = [f for f, v in values]
n_bins = max(2 * n_bins // len(fields) + 1, 4)
context['fields'] = fields
context['slice'] = slice_on
if not qs.exists():
logger and logger.warning(f'Empty histogram, field = {field}')
return
src = qs.earliest('value').value
dst = qs.latest('value').value
bins = bins or make_bins(src=src, dst=dst, n_bins=n_bins, logger=logger, field=field, step=step)
context['bins'] = bins.copy()
if isinstance(src, datetime):
context['x_type'] = 'time'
bins.pop(-1)
context['data'] = histogram(qs, 'value', bins=bins, slice_on=slice_on, choices='minimum')
for idx, row in enumerate(context['data']):
if isinstance(src, datetime):
st = re.findall('([0-9]+|.)', str(context['bins'][idx]))
fn = re.findall('([0-9]+|.)', str(context['bins'][idx + 1]))
title = ''
n_diff = 0
for lhs, rhs in zip(st, fn):
title += lhs
if lhs != rhs:
n_diff += 1
if n_diff == 2:
break
row['title'] = title
else:
interval = ']' if idx + 1 == len(context['data']) else ')'
row['title'] = f"[{context['bins'][idx]}..{context['bins'][idx + 1]}{interval}"
if norm_value:
interval = context['bins'][idx + 1] - context['bins'][idx]
if interval:
row['value'] *= norm_value / interval
if aggregations:
whens = [
make_beetween('value', k, bins[k], bins[k + 1] if k + 1 < len(bins) else None)
for k in range(len(bins))
]
qs = qs.annotate(k=Case(*whens, output_field=IntegerField()))
qs = qs.order_by('k').values('k')
for field, aggregate in aggregations.items():
result = qs.annotate(**{field: aggregate})
for record in result:
context['data'][record['k']][field] = record[field]
return context
|
da8e29130f513411b5ccb865515c1a8912fbfa86
|
31766b4e629cd57db879d976b1ddda30e2901368
|
/kombu/asynchronous/timer.py
|
a3b7fb777675f77d59d53fda8349cc7aa502becd
|
[
"BSD-3-Clause"
] |
permissive
|
celery/kombu
|
980bbdf819eccc05952139f29386b69087725d80
|
c18e9626e1dec56fa58cfe384403b25f34fe4473
|
refs/heads/main
| 2023-09-03T23:25:04.599675
| 2023-09-01T12:44:44
| 2023-09-03T20:16:00
| 735,498
| 2,232
| 928
|
BSD-3-Clause
| 2023-09-14T06:56:34
| 2010-06-23T10:10:24
|
Python
|
UTF-8
|
Python
| false
| false
| 6,940
|
py
|
timer.py
|
"""Timer scheduling Python callbacks."""
from __future__ import annotations
import heapq
import sys
from collections import namedtuple
from datetime import datetime
from functools import total_ordering
from time import monotonic
from time import time as _time
from typing import TYPE_CHECKING
from weakref import proxy as weakrefproxy
from vine.utils import wraps
from kombu.log import get_logger
if sys.version_info >= (3, 9):
from zoneinfo import ZoneInfo
else:
from backports.zoneinfo import ZoneInfo
if TYPE_CHECKING:
from types import TracebackType
__all__ = ('Entry', 'Timer', 'to_timestamp')
logger = get_logger(__name__)
DEFAULT_MAX_INTERVAL = 2
EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=ZoneInfo("UTC"))
IS_PYPY = hasattr(sys, 'pypy_version_info')
scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry'))
def to_timestamp(d, default_timezone=ZoneInfo("UTC"), time=monotonic):
"""Convert datetime to timestamp.
If d' is already a timestamp, then that will be used.
"""
if isinstance(d, datetime):
if d.tzinfo is None:
d = d.replace(tzinfo=default_timezone)
diff = _time() - time()
return max((d - EPOCH).total_seconds() - diff, 0)
return d
@total_ordering
class Entry:
"""Schedule Entry."""
if not IS_PYPY: # pragma: no cover
__slots__ = (
'fun', 'args', 'kwargs', 'tref', 'canceled',
'_last_run', '__weakref__',
)
def __init__(self, fun, args=None, kwargs=None):
self.fun = fun
self.args = args or []
self.kwargs = kwargs or {}
self.tref = weakrefproxy(self)
self._last_run = None
self.canceled = False
def __call__(self):
return self.fun(*self.args, **self.kwargs)
def cancel(self):
try:
self.tref.canceled = True
except ReferenceError: # pragma: no cover
pass
def __repr__(self):
return '<TimerEntry: {}(*{!r}, **{!r})'.format(
self.fun.__name__, self.args, self.kwargs)
# must not use hash() to order entries
def __lt__(self, other):
return id(self) < id(other)
@property
def cancelled(self):
return self.canceled
@cancelled.setter
def cancelled(self, value):
self.canceled = value
class Timer:
"""Async timer implementation."""
Entry = Entry
on_error = None
def __init__(self, max_interval=None, on_error=None, **kwargs):
self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL)
self.on_error = on_error or self.on_error
self._queue = []
def __enter__(self):
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None
) -> None:
self.stop()
def call_at(self, eta, fun, args=(), kwargs=None, priority=0):
kwargs = {} if not kwargs else kwargs
return self.enter_at(self.Entry(fun, args, kwargs), eta, priority)
def call_after(self, secs, fun, args=(), kwargs=None, priority=0):
kwargs = {} if not kwargs else kwargs
return self.enter_after(secs, self.Entry(fun, args, kwargs), priority)
def call_repeatedly(self, secs, fun, args=(), kwargs=None, priority=0):
kwargs = {} if not kwargs else kwargs
tref = self.Entry(fun, args, kwargs)
@wraps(fun)
def _reschedules(*args, **kwargs):
last, now = tref._last_run, monotonic()
lsince = (now - tref._last_run) if last else secs
try:
if lsince and lsince >= secs:
tref._last_run = now
return fun(*args, **kwargs)
finally:
if not tref.canceled:
last = tref._last_run
next = secs - (now - last) if last else secs
self.enter_after(next, tref, priority)
tref.fun = _reschedules
tref._last_run = None
return self.enter_after(secs, tref, priority)
def enter_at(self, entry, eta=None, priority=0, time=monotonic):
"""Enter function into the scheduler.
Arguments:
---------
entry (~kombu.asynchronous.timer.Entry): Item to enter.
eta (datetime.datetime): Scheduled time.
priority (int): Unused.
"""
if eta is None:
eta = time()
if isinstance(eta, datetime):
try:
eta = to_timestamp(eta)
except Exception as exc:
if not self.handle_error(exc):
raise
return
return self._enter(eta, priority, entry)
def enter_after(self, secs, entry, priority=0, time=monotonic):
return self.enter_at(entry, time() + float(secs), priority)
def _enter(self, eta, priority, entry, push=heapq.heappush):
push(self._queue, scheduled(eta, priority, entry))
return entry
def apply_entry(self, entry):
try:
entry()
except Exception as exc:
if not self.handle_error(exc):
logger.error('Error in timer: %r', exc, exc_info=True)
def handle_error(self, exc_info):
if self.on_error:
self.on_error(exc_info)
return True
def stop(self):
pass
def __iter__(self, min=min, nowfun=monotonic,
pop=heapq.heappop, push=heapq.heappush):
"""Iterate over schedule.
This iterator yields a tuple of ``(wait_seconds, entry)``,
where if entry is :const:`None` the caller should wait
for ``wait_seconds`` until it polls the schedule again.
"""
max_interval = self.max_interval
queue = self._queue
while 1:
if queue:
eventA = queue[0]
now, eta = nowfun(), eventA[0]
if now < eta:
yield min(eta - now, max_interval), None
else:
eventB = pop(queue)
if eventB is eventA:
entry = eventA[2]
if not entry.canceled:
yield None, entry
continue
else:
push(queue, eventB)
else:
yield None, None
def clear(self):
self._queue[:] = [] # atomic, without creating a new list.
def cancel(self, tref):
tref.cancel()
def __len__(self):
return len(self._queue)
def __nonzero__(self):
return True
@property
def queue(self, _pop=heapq.heappop):
"""Snapshot of underlying datastructure."""
events = list(self._queue)
return [_pop(v) for v in [events] * len(events)]
@property
def schedule(self):
return self
|
652e4b3a7f7ceebac5b7705ebbb2248f59549aa9
|
57e77c8a46104b1675ebd8e9b0439f4434eabe8f
|
/algnuth/ideals.py
|
5ab27aa21b1ddec66fdb98576abb46fec7ea5bca
|
[
"MIT"
] |
permissive
|
louisabraham/algnuth
|
7e0a9b98e5cd399fb79478427c4e29ca60635418
|
47d2cd88d59d76110c1807ec5e036e10103c3f62
|
refs/heads/master
| 2022-09-11T21:55:23.812494
| 2021-06-23T16:05:47
| 2021-06-23T16:05:47
| 120,233,707
| 295
| 9
|
MIT
| 2018-02-24T16:26:45
| 2018-02-04T23:37:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
ideals.py
|
"""
This module provides functions to manipulate
extension fields given their minimal polynomial.
"""
from math import pi, factorial
from .polynom import Polynomial
from .jacobi import sieve
def minkowski_bound(P):
"""
Any ideal of the ring of integers
of the algebraic number field whose
minimal polynomial is P contains
an integer N such that
1 ≤ N ≤ minkowski_bound(P)
"""
return (4 / pi) ** P.r2 * factorial(P.deg) / P.deg ** P.deg * abs(P.disc) ** .5
def idealsContaining(P, p):
"""
Ideals of the extension field of minimal
polynomial P containing the prime p
"""
Pmodp = P.reduceP(p)
c, Ds = Pmodp.factor()
print('%s mod %s = %s' % (P, p, Polynomial.ppfactors((c, Ds))))
print("(%s) = " % p +
'⋅'.join(("(%s, %s)" % (p, D)
+ (v > 1) * ("^%s" % v))
if sum(Ds.values()) > 1 else "(%s)" % p
for D, v in Ds.items()).replace("X", "α"))
def factorIdeals(P):
"""
Finds the ideals of the ring of integers
of the algebraic number field whose
minimal polynomial is P
"""
b = int(minkowski_bound(P))
if b == 1:
print('Principal!')
for p in sieve(b + 1):
idealsContaining(P, p)
print()
|
df3b81ed9a209d27ab5525a8235dc3005c58838b
|
df87814cb32990ad8c27d0b13a821aabce012819
|
/kolibri/core/exams/migrations/0001_initial.py
|
aa7de52bd58f2bce1c23d005ae754439c25a8016
|
[
"MIT"
] |
permissive
|
learningequality/kolibri
|
26812d4ae771f3b389d3317a586bc032fc84866b
|
cc9da2a6acd139acac3cd71c4cb05c15d4465712
|
refs/heads/release-v0.16.x
| 2023-09-01T18:07:29.720772
| 2023-08-31T15:43:47
| 2023-08-31T15:43:47
| 49,976,939
| 689
| 682
|
MIT
| 2023-09-14T20:02:29
| 2016-01-19T19:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,450
|
py
|
0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-16 22:34
from __future__ import unicode_literals
import django.db.models.deletion
import morango.models
from django.db import migrations
from django.db import models
import kolibri.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [("kolibriauth", "0001_initial")]
operations = [
migrations.CreateModel(
name="Exam",
fields=[
(
"id",
morango.models.UUIDField(
editable=False, primary_key=True, serialize=False
),
),
(
"_morango_dirty_bit",
models.BooleanField(default=True, editable=False),
),
("_morango_source_id", models.CharField(editable=False, max_length=96)),
(
"_morango_partition",
models.CharField(editable=False, max_length=128),
),
("title", models.CharField(max_length=200)),
("channel_id", models.CharField(max_length=32, blank=True)),
("question_count", models.IntegerField()),
(
"question_sources",
kolibri.core.fields.JSONField(blank=True, default=[]),
),
("seed", models.IntegerField(default=1)),
("active", models.BooleanField(default=False)),
("archive", models.BooleanField(default=False)),
(
"collection",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="exams",
to="kolibriauth.Collection",
),
),
(
"creator",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="exams",
to="kolibriauth.FacilityUser",
),
),
(
"dataset",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="kolibriauth.FacilityDataset",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="ExamAssignment",
fields=[
(
"id",
morango.models.UUIDField(
editable=False, primary_key=True, serialize=False
),
),
(
"_morango_dirty_bit",
models.BooleanField(default=True, editable=False),
),
("_morango_source_id", models.CharField(editable=False, max_length=96)),
(
"_morango_partition",
models.CharField(editable=False, max_length=128),
),
(
"assigned_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="assigned_exams",
to="kolibriauth.FacilityUser",
),
),
(
"collection",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="assigned_exams",
to="kolibriauth.Collection",
),
),
(
"dataset",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="kolibriauth.FacilityDataset",
),
),
(
"exam",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="assignments",
to="exams.Exam",
),
),
],
options={"abstract": False},
),
]
|
d73d2017edd7b663177b21448293402f440c0232
|
e6ef43f3b0cdc6f2c973c01f3c204c72b8c09da4
|
/tests/roots/test-incremental_js/conf.py
|
4ec9236cfd1e9225a479e6a0f73f0f03cde7c485
|
[
"MIT"
] |
permissive
|
mozilla/sphinx-js
|
5c72ed088216d6d875a186779e3f93ab3fc23984
|
ba54cc570e3455661fb297394d17a76aa1a0b286
|
refs/heads/master
| 2023-08-10T23:34:51.861866
| 2023-01-30T13:18:41
| 2023-02-27T14:58:32
| 80,552,521
| 133
| 52
|
MIT
| 2023-04-10T08:32:43
| 2017-01-31T19:11:59
|
Python
|
UTF-8
|
Python
| false
| false
| 438
|
py
|
conf.py
|
extensions = [
'sphinx_js'
]
# Minimal stuff needed for Sphinx to work:
source_suffix = '.rst'
master_doc = 'index'
author = u'Nick Alexander'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
js_source_path = ['.', 'inner']
root_for_relative_js_paths = '.'
# Temp directories on macOS have internal directories starting with
# "_", running afoul of https://github.com/jsdoc/jsdoc/issues/1328.
jsdoc_config_path = 'jsdoc.json'
|
ad20f8883cf37727d2758719b7124a5df1bf7d68
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/wyhash/all/conanfile.py
|
34e8d0350c888cdbc6d636efc89e519d29198198
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.tools.files import get, copy
from conan.tools.layout import basic_layout
import os
required_conan_version = ">=1.52.0"
class WyhashConan(ConanFile):
name = "wyhash"
description = "The FASTEST QUALITY hash function, random number generators (PRNG) and hash map."
license = "Unlicense"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/wangyi-fudan/wyhash"
topics = ("bloom-filter", "hash", "random-number-generators", "header-only")
settings = "os", "arch", "compiler", "build_type"
no_copy_source = True
def layout(self):
basic_layout(self, src_folder="src")
def package_id(self):
self.info.clear()
def source(self):
get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
copy(
self,
pattern="*.h",
dst=os.path.join(self.package_folder, "include"),
src=self.source_folder,
)
def package_info(self):
self.cpp_info.bindirs = []
self.cpp_info.libdirs = []
|
23f22b54d1874b1e560b7f7699a5922bd7df1de6
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/studio/micro-services/cube-studio/install/kubernetes/prometheus/alertmanater/make-alert-wechat-config.py
|
7c42dc6fa07108407f53dcefe497a88fdbbe15f7
|
[
"Apache-2.0",
"BSD-3-Clause",
"EPL-2.0",
"MIT",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
make-alert-wechat-config.py
|
import base64
# alert 报警的secret
# 换行都会变换base64编码后的格式
config='''
global:
resolve_timeout: 5m
templates:
- '/etc/alertmanager/template/*.tmpl'
route:
group_by: ['alertname', 'cluster', 'service']
group_wait: 30s
group_interval: 5m
repeat_interval: 1h
receiver: 'null'
routes:
- match_re:
namespace: ".*"
receiver: 'default'
# 告警抑制。避免重复告警的。
# https://yunlzheng.gitbook.io/prometheus-book/parti-prometheus-ji-chu/alert/alert-manager-inhibit
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
# Apply inhibition if the alertname is the same.
equal: ['alertname', 'cluster', 'service']
receivers:
- name: 'default'
webhook_configs:
- send_resolved: true
url: 'http://xx.xx.xx.xx/'
- name: 'null'
'''
base64str = base64.b64encode(bytes(config,encoding='utf-8'))
print(str(base64str,encoding='utf-8'))
|
c509e53f48ff50ee9f3f66d011144274590facf8
|
fdbfbcf4d6a0ef6f3c1b600e7b8037eed0f03f9e
|
/tools/workspace/ipopt_internal_fromsource/test/lint_test.py
|
f76d9e6dbd9687d4d557d5e0360b71d1577767eb
|
[
"BSD-3-Clause"
] |
permissive
|
RobotLocomotion/drake
|
4529c397f8424145623dd70665531b5e246749a0
|
3905758e8e99b0f2332461b1cb630907245e0572
|
refs/heads/master
| 2023-08-30T21:45:12.782437
| 2023-08-30T15:59:07
| 2023-08-30T15:59:07
| 16,256,144
| 2,904
| 1,270
|
NOASSERTION
| 2023-09-14T20:51:30
| 2014-01-26T16:11:05
|
C++
|
UTF-8
|
Python
| false
| false
| 4,244
|
py
|
lint_test.py
|
import json
import unittest
class IpoptLintTest(unittest.TestCase):
def _read(self, filename):
"""Returns the contents of the given filename."""
with open(filename, encoding="utf-8") as f:
return f.read()
def _parse_build(self, varname):
"""Parses a constant list of filenames from a BUILD file.
The only supported format is like this:
{varname} = [
"path/to/file1",
"path/to/file2",
]
"""
result = []
contents = self._read(
"tools/workspace/ipopt_internal_fromsource/package.BUILD.bazel")
lines = contents.splitlines()
start_line = f"{varname} = ["
end_line = "]"
start_index = lines.index(start_line)
end_index = lines.index(end_line, start_index + 1)
for i in range(start_index + 1, end_index):
line = lines[i]
prefix = ' "'
suffix = '",'
self.assertTrue(line.startswith(prefix), line)
self.assertTrue(line.endswith(suffix), line)
result.append(line[len(prefix):-len(suffix)])
return set(result)
def _parse_make(self, varname, *, guard_line=None):
"""Parses a constant list of filenames from Makefile.am.
The only supported formats are like this (with no `guard_line`):
{varname} = \
path/to/file1 \
path/to/file2
Or this (with a `guard_line`):
{guard_line}
{varname} += \
path/to/file1 \
path/to/file2
"""
result = []
contents = self._read(
"external/ipopt_internal_fromsource/src/Makefile.am")
lines = contents.splitlines()
if guard_line is None:
start_line = f"{varname} = \\"
index = lines.index(start_line) + 1
else:
index = lines.index(guard_line) + 1
self.assertEqual(lines[index], f"{varname} += \\")
index += 1
while True:
line = lines[index]
has_slash = line.endswith("\\")
if has_slash:
line = line[:-1]
result.append("src/" + line.strip())
index += 1
if not has_slash:
break
return set(result)
def test_hdrs_public(self):
"""Checks that _HDRS_PUBLIC matches includeipopt_HEADERS."""
self.assertSetEqual(self._parse_build("_HDRS_PUBLIC"),
self._parse_make("includeipopt_HEADERS"))
def test_srcs_initial(self):
"""Checks that _SRCS_INITIAL matches libipopt_la_SOURCES, except for
two specific unwanted sources.
"""
make_sources = self._parse_make("libipopt_la_SOURCES")
make_sources.remove("src/Interfaces/IpStdCInterface.cpp")
make_sources.remove("src/Interfaces/IpStdFInterface.c")
self.assertSetEqual(self._parse_build("_SRCS_INITIAL"),
make_sources)
def test_srcs_solver_int32(self):
"""Checks that _SRCS_SOLVER_INT32 matches !IPOPT_INT64's effect."""
self.assertSetEqual(self._parse_build("_SRCS_SOLVER_INT32"),
self._parse_make("libipopt_la_SOURCES",
guard_line="if !IPOPT_INT64"))
def test_wheel_verison_pin(self):
"""Checks that the repository rule and wheel agree on which version of
IPOPT we should be using.
"""
# Parse the Bazel version.
commit = json.loads(self._read(
"external/ipopt_internal_fromsource/"
"drake_repository_metadata.json"))["commit"]
prefix = "releases/"
self.assertTrue(commit.startswith(prefix), commit)
bazel_version = commit[len(prefix):]
# Parse the Wheel version from the line `set(ipopt_version #.#.#)`.
projects = self._read(
"tools/wheel/image/dependencies/projects.cmake")
prefix = "set(ipopt_version "
start = projects.index(prefix) + len(prefix)
end = projects.index(")", start)
wheel_version = projects[start:end]
# Exact string match.
self.assertEqual(wheel_version, bazel_version)
|
82eded0580f1256a67375e337dc89f1d9052b60b
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/examples/docs_snippets/docs_snippets/concepts/configuration/env_vars_config.py
|
d85bf7e74be09c1eddc23968b3bb833d083f8268
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 711
|
py
|
env_vars_config.py
|
# start_database_example
from dagster import StringSource, job, op, resource
@resource(config_schema={"username": StringSource, "password": StringSource})
def database_client(context):
username = context.resource_config["username"]
password = context.resource_config["password"]
...
@op(required_resource_keys={"database"})
def get_one(context):
context.resources.database.execute_query("SELECT 1")
@job(
resource_defs={
"database": database_client.configured(
{
"username": {"env": "SYSTEM_USER"},
"password": {"env": "SYSTEM_PASSWORD"},
}
)
}
)
def get_one_from_db():
get_one()
# end_database_example
|
07e62072363ca30653e71e494dfe4a70ab2fcdb7
|
0c118414382c8d875a39bea82a227f1f5e6e8f07
|
/logo/logo.py
|
56fe6714e9768876d3ebca83e9ecb9c0ce86a426
|
[
"CC0-1.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LGPL-2.0-or-later",
"MIT",
"LGPL-2.1-only",
"GPL-2.0-only",
"GPL-1.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-3.0-only",
"AGPL-3.0-only",
"LGPL-3.0-only"
] |
permissive
|
nschloe/awesome-scientific-computing
|
e1eccb4559ec1ce73383b2fd7d679170fd075132
|
8ea7bd7c8f3c0e3391971e73c57aeed7e34bb188
|
refs/heads/main
| 2023-08-19T14:13:47.621917
| 2023-04-12T15:29:54
| 2023-04-12T15:29:54
| 132,509,274
| 1,086
| 138
|
CC0-1.0
| 2023-08-15T11:38:22
| 2018-05-07T19:49:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
logo.py
|
# -*- coding: utf-8 -*-
#
import os.path
import dmsh
import meshio
import optimesh
import numpy
# from dolfin import (
# Mesh,
# XDMFFile,
# mpi_comm_world,
# Constant,
# DirichletBC,
# inner,
# grad,
# dx,
# FunctionSpace,
# TestFunction,
# TrialFunction,
# Function,
# solve
# )
def _create_mesh(filename):
poly = dmsh.Polygon(
[
[-295.0, 0.0],
[-160.0, 0.0],
[-50.0, 110.0],
[-50.0, 190.0],
[+50.0, 190.0],
[+50.0, 110.0],
[+160.0, 0.0],
[+295.0, 0.0],
[+405.0, 110.0],
[+405.0, 235.0],
[+200.0, 430.0],
[+170.0, 400.0],
[+355.0, 235.0],
[-355.0, 235.0],
[-170.0, 400.0],
[-200.0, 430.0],
[-405.0, 235.0],
[-405.0, 110.0],
]
)
geo = dmsh.Union(
[
poly,
dmsh.Circle([-295.0, 110.0], 110.0),
dmsh.Circle([+295.0, 110.0], 110.0),
dmsh.Circle([-160.0, 110.0], 110.0),
dmsh.Circle([+160.0, 110.0], 110.0),
]
)
X, cells = dmsh.generate(
geo,
35.0,
# show=True
)
X, cells = optimesh.cvt.quasi_newton_uniform_full(
X, cells, 1.0e-3, 100, verbose=True
)
X = numpy.column_stack([X[:, 0], X[:, 1], numpy.zeros(X.shape[0])])
meshio.write_points_cells(filename, X, {"triangle": cells})
return
def _main():
this_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(this_dir, "sunglasses.xdmf")
_create_mesh(filename)
# mesh = Mesh()
# with XDMFFile(mpi_comm_world(), filename) as f:
# f.read(mesh)
# # Create mesh and define function space
# V = FunctionSpace(mesh, "Lagrange", 1)
# def boundary(x):
# return x[1] < 1.0e-10
# # Define boundary condition
# u0 = Constant(0.0)
# bc = DirichletBC(V, u0, "on_boundary")
# # Define variational problem
# u = TrialFunction(V)
# v = TestFunction(V)
# f = Constant(1.0)
# a = inner(grad(u), grad(v)) * dx
# L = f * v * dx
# # Compute solution
# u = Function(V)
# solve(a == L, u, bc)
# filename = os.path.join(this_dir, "solution.xdmf")
# xf = XDMFFile(filename)
# xf.write(u)
return
if __name__ == "__main__":
_main()
|
d2696d6188475be0a5f1462fd315d73c68c30b04
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/LED_Snowboard/code.py
|
2473806a920d7cfcfc978b6c4ebcd67f5e5e1d38
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 10,608
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2021 Erin St Blaine for Adafruit Industries
# SPDX-FileCopyrightText: 2021 Limor Fried for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
LED Snowboard with Feather M4 and PropMaker Wing
Adafruit invests time and resources providing this open source code.
Please support Adafruit and open source hardware by purchasing
products from Adafruit!
Written by Erin St Blaine & Limor Fried for Adafruit Industries
Copyright (c) 2020-2021 Adafruit Industries
Licensed under the MIT license.
All text above must be included in any redistribution.
"""
# pylint: disable=import-error
# pylint: disable=no-member
import time
import board
import digitalio
from digitalio import DigitalInOut, Direction, Pull
from rainbowio import colorwheel
import adafruit_lis3dh
import neopixel
from adafruit_led_animation.helper import PixelMap
from adafruit_led_animation.sequence import AnimationSequence
from adafruit_led_animation.group import AnimationGroup
from adafruit_led_animation.animation.sparkle import Sparkle
from adafruit_led_animation.animation.rainbow import Rainbow
from adafruit_led_animation.animation.rainbowchase import RainbowChase
from adafruit_led_animation.animation.rainbowcomet import RainbowComet
from adafruit_led_animation.animation.solid import Solid
from adafruit_led_animation.animation.chase import Chase
from adafruit_led_animation.animation.comet import Comet
from adafruit_led_animation.color import (
BLACK,
RED,
GREEN,
ORANGE,
BLUE,
PURPLE,
WHITE,
)
btn = DigitalInOut(board.A1)
btn.direction = Direction.INPUT
btn.pull = Pull.UP
prev_state = btn.value
THRESHOLD = -1 #shake threshold
CARVE_THRESHOLD = 5
# Set to the length in seconds for the animations
POWER_ON_DURATION = 2
NUM_PIXELS = 211 # Number of pixels used in project
NEOPIXEL_PIN = board.D5
POWER_PIN = board.D10
enable = digitalio.DigitalInOut(POWER_PIN)
enable.direction = digitalio.Direction.OUTPUT
enable.value = False
# Set up accelerometer on I2C bus, 4G range:
i2c = board.I2C() # uses board.SCL and board.SDA
# i2c = board.STEMMA_I2C() # For using the built-in STEMMA QT connector on a microcontroller
accel = adafruit_lis3dh.LIS3DH_I2C(i2c)
accel.range = adafruit_lis3dh.RANGE_4_G
accel.set_tap(2, 15)
pixels = neopixel.NeoPixel(NEOPIXEL_PIN, NUM_PIXELS, brightness=1, auto_write=False)
pixels.fill(0) # NeoPixels off ASAP on startup
pixels.show()
#PIXEL MAPS: Used for reordering pixels so the animations can run in different configurations.
#My LED strips inside the neck are accidentally swapped left-right,
#so these maps also correct for that
#Bottom up along both sides at once
pixel_map_right = PixelMap(pixels, [
150, 151, 152, 149, 148, 147, 146, 145, 144, 143, 142,
141, 140, 139, 138, 137, 136, 135, 134, 133, 132, 131,
130, 129, 128, 127, 126, 125, 124, 123, 122, 121, 120,
119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109,
108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97,
96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83,
82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69,
68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55,
54, 53, 52, 51, 50, 49, 48, 47, 46,
], individual_pixels=True)
#Starts at the bottom and goes around clockwise
pixel_map_left = PixelMap(pixels, [
153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
203, 204, 205, 206, 207, 208, 209, 210, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
], individual_pixels=True)
#Radiates from the center outwards like a starburst
pixel_map_radiate = PixelMap(pixels, [
206, 98, 205, 99, 207, 97, 204, 100, 208, 96, 203, 101, 209,
95, 202, 102, 210, 94, 0, 93, 92, 201, 103, 1, 91, 200, 104,
2, 90, 199, 105, 3, 89, 198, 106, 4, 88, 197, 107, 5, 87, 196,
108, 6, 86, 195, 109, 7, 85, 194, 110, 8, 84, 193, 111, 9, 83,
192, 112, 10, 82, 191, 113, 11, 81, 190, 114, 12, 80, 189, 115,
13, 79, 188, 116, 14, 78, 187, 117, 15, 77, 186, 118, 16, 76, 185,
119, 17, 75, 184, 120, 18, 74, 183, 121, 19, 73, 182, 122, 20, 72,
181, 123, 21, 71, 180, 124, 22, 70, 179, 125, 23, 69, 178, 126, 24,
68, 177, 127, 25, 67, 176, 128, 26, 66, 175, 129, 27, 65, 174, 130,
28, 64, 173, 131, 29, 63, 172, 132, 30, 62, 171, 133, 31, 61, 170, 134, 32,
60, 169, 135, 33, 69, 168, 136, 34, 58, 167, 137, 35, 57, 166, 138, 36, 56,
165, 139, 37, 55, 164, 140, 38, 54, 163, 141, 39, 53, 162, 142, 40, 52, 161,
143, 41, 51, 160, 144, 42, 50, 159, 145, 43, 49, 158, 146, 44, 48, 157, 147,
45, 47, 156, 148, 46, 46, 155, 149, 47, 154, 149, 153, 150, 152, 151,
], individual_pixels=True)
#Top down along both sides at once
pixel_map_sweep = PixelMap(pixels, [
151, 152, 150, 153, 149, 154, 148, 155, 147, 156, 146, 157, 145, 158,
144, 159, 143, 160, 142, 161, 141, 162, 140, 163, 139, 164, 138, 165,
137, 166, 136, 167, 135, 168, 134, 169, 133, 170, 132, 171, 131, 172,
130, 173, 129, 174, 128, 175, 127, 176, 126, 177, 125, 178, 124, 179,
123, 180, 122, 181, 121, 182, 120, 183, 119, 184, 118, 185, 117, 186,
116, 187, 115, 188, 114, 189, 113, 190, 112, 191, 111, 192, 110, 193,
109, 194, 108, 195, 107, 196, 106, 197, 105, 198, 104, 199, 103, 200,
102, 201, 101, 202, 100, 203, 99, 204, 98, 205, 97, 206, 96, 207, 95,
208, 94, 209, 93, 210, 92, 91, 0, 90, 1, 89, 2, 88, 3, 87, 4, 86, 5, 85,
6, 84, 7, 83, 8, 82, 9, 81, 10, 80, 11, 79, 12, 78, 13, 77, 14, 76, 15,
75, 16, 74, 17, 73, 18, 72, 19, 71, 20, 70, 21, 69, 22, 68, 23, 67, 24,
66, 25, 65, 24, 64, 25, 63, 26, 62, 27, 61, 28, 60, 29, 59, 30, 58, 31,
57, 32, 56, 33, 55, 34, 54, 35, 53, 36, 52, 37, 51, 38, 50, 39, 49, 40,
48, 41, 47, 42, 46, 43, 45, 44,
], individual_pixels=True)
pixel_map_tail = PixelMap(pixels, [
15, 75, 16, 74, 17, 73, 18, 72, 19, 71, 20, 70, 21, 69, 22, 68, 23, 67,
24, 66, 25, 65, 24, 64, 25, 63, 26, 62, 27, 61, 28, 60, 29, 59, 30, 58,
31, 57, 32, 56, 33, 55, 34, 54, 35, 53, 36, 52, 37, 51, 38, 50, 39, 49,
40, 48, 41, 47, 42, 46, 43, 45, 44,
], individual_pixels=True)
pixel_map = [
pixel_map_right,
pixel_map_left,
pixel_map_radiate,
pixel_map_sweep,
pixel_map_tail,
]
def power_on(duration):
"""
Animate NeoPixels for power on.
"""
start_time = time.monotonic() # Save start time
while True:
elapsed = time.monotonic() - start_time # Time spent
if elapsed > duration: # Past duration?
break # Stop animating
powerup.animate()
# Cusomize LED Animations ------------------------------------------------------
powerup = RainbowComet(pixel_map[3], speed=0, tail_length=25, bounce=False)
rainbow = Rainbow(pixel_map[2], speed=0, period=6, name="rainbow", step=2.4)
rainbow_chase = RainbowChase(pixels, speed=0, size=6, spacing=15, step=10)
rainbow_chase2 = RainbowChase(pixels, speed=0, size=10, spacing=1, step=18, name="rainbow_chase2")
chase = RainbowChase(pixel_map[3], speed=0, size=20, spacing=20)
chase2 = Chase(pixels, speed=0.1, color=ORANGE, size=6, spacing=6)
rainbow_comet = RainbowComet(pixel_map[2], speed=0, tail_length=200, bounce=True)
rainbow_comet2 = RainbowComet(
pixels, speed=0, tail_length=104, colorwheel_offset=80, bounce=True
)
rainbow_comet3 = RainbowComet(
pixel_map[2], speed=0, tail_length=80, colorwheel_offset=80, step=4, bounce=False
)
strum = RainbowComet(
pixel_map[3], speed=0, tail_length=50, bounce=False, colorwheel_offset=50, step=4
)
fuego = RainbowComet(
pixel_map[4], speed=0.05, colorwheel_offset=40, step=2, tail_length=40
)
fuego2 = RainbowComet(
pixel_map[4], speed=0.02, colorwheel_offset=40, step=2, tail_length=40
)
lava = Comet(pixel_map[4], speed=0, color=ORANGE, tail_length=40, bounce=False)
sparkle = Sparkle(pixel_map[3], speed=0.05, color=BLUE, num_sparkles=10)
sparkle2 = Sparkle(pixels, speed=0.05, color=PURPLE, num_sparkles=4)
sparkle3 = Sparkle(pixels, speed=0, color=WHITE, num_sparkles=1)
carve_left = Solid(pixel_map[0], color=GREEN)
carve_right = Solid(pixel_map[1], color=RED)
black_left = Solid(pixel_map[0], color=BLACK, name="BLACK")
black_right = Solid(pixel_map[1], color=BLACK)
# Animations Playlist - reorder as desired. AnimationGroups play at the same time
animations = AnimationSequence(
AnimationGroup(
fuego,
fuego2,
lava,
sparkle,
),
chase,
rainbow_chase2,
rainbow,
AnimationGroup(
rainbow_comet,
sparkle3,
),
AnimationGroup(
rainbow_comet2,
sparkle3,
),
AnimationGroup(
sparkle,
strum,
),
AnimationGroup(
sparkle2,
rainbow_comet3,
),
AnimationGroup(
black_left,
black_right,
),
black_left,
auto_clear=True,
auto_reset=True,
)
MODE = 0
LASTMODE = 1
i = 0
# Main loop
while True:
i = (i + 0.5) % 256 # run from 0 to 255
TILT_COLOR = colorwheel(i)
if MODE == 0: # If currently off...
enable.value = True
power_on(POWER_ON_DURATION) # Power up!
MODE = LASTMODE
elif MODE >= 1: # If not OFF MODE...
# Read button
cur_state = btn.value
if cur_state != prev_state:
if not cur_state:
animations.next()
print("BTN is down")
else:
print("BTN is up")
prev_state = cur_state
# Read accelerometer
x, y, z = accel.acceleration
accel_total = z # x=tilt, y=rotate
accel_total_y = y # x=tilt, y=rotate
print(accel_total_y)
if accel_total > THRESHOLD:
print("THRESHOLD: ", accel_total)
if MODE == 1:
animations.animate()
if animations.current_animation.name == "BLACK":
MODE = 2
if MODE == 2:
if y > CARVE_THRESHOLD:
black_right.animate()
carve_left.animate()
if y < (CARVE_THRESHOLD * -1):
black_left.animate()
carve_right.animate()
#print (MODE)
cur_state = btn.value
if cur_state != prev_state:
if not cur_state:
MODE = 1
|
eea13a23bdd37867d4da92a45d94f774c38503cd
|
5f097436903a0a8fe83fc1a5f7b19f417b47c6b1
|
/lib/utils/tless/tless_config.py
|
8b78ea2f8fb478be777afefd235861a4f2b11ca4
|
[
"Apache-2.0"
] |
permissive
|
zju3dv/clean-pvnet
|
e2fba2e0ca470d7068b977360b7aacd45f1e3a3d
|
6382aa9e3340f421c2434ad7089fc91ca725ebf1
|
refs/heads/master
| 2023-06-26T17:38:42.052848
| 2023-06-21T08:09:40
| 2023-06-21T08:09:40
| 227,833,303
| 375
| 107
|
Apache-2.0
| 2023-04-19T02:04:27
| 2019-12-13T12:17:00
|
C++
|
UTF-8
|
Python
| false
| false
| 951
|
py
|
tless_config.py
|
import numpy as np
from lib.config import cfg
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
data_rng = np.random.RandomState(123)
eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
down_ratio = 4
num_obj_in_training_image = 6
train_w, train_h = 720, 540
ct_score = 0.2
visib_gt_min = 0.1
vsd_cost = 'step'
vsd_delta = 15
vsd_tau = 20
error_thresh_vsd = 0.3
pvnet_input_scale = cfg.tless.pvnet_input_scale
scale_train_ratio = cfg.tless.scale_train_ratio
scale_ratio = cfg.tless.scale_ratio
box_train_ratio = cfg.tless.box_train_ratio
box_ratio = cfg.tless.box_ratio
|
e5bf4c22e05f8f7a259906d667687afc020f403c
|
6cb76008db278ecc757f96b4a5c0042d9c14195c
|
/pyshopee2/client.py
|
689ca428dbd02a46deab4b4eacf3a205d218ccfd
|
[
"MIT"
] |
permissive
|
JimCurryWang/python-shopee
|
afb0414164e2a3dbc8204e356d5a04bc30c542d7
|
00a52575bff19e736973eca910c77a86b9fbf367
|
refs/heads/master
| 2022-07-11T02:32:16.084787
| 2022-06-22T09:22:15
| 2022-06-22T09:22:15
| 119,407,345
| 202
| 79
|
MIT
| 2022-06-22T09:22:16
| 2018-01-29T16:16:55
|
Python
|
UTF-8
|
Python
| false
| false
| 10,066
|
py
|
client.py
|
import os
import time
import json
import hmac, hashlib
from requests import Request, Session, exceptions
from selenium import webdriver
from urllib3.util import parse_url
import requests
from .product import Product
from .mediaspace import MediaSpace
from .shop import Shop
from .merchant import Merchant
from .order import Order
from .logistics import Logistics
from .firstmile import FirstMile
from .payment import Payment
from .discount import Discount
from .bundledeal import BundleDeal
from .addondeal import AddonDeal
from .voucher import Voucher
from .followprize import FollowPrize
from .toppicks import Toppicks
from .returns import Returns
from .accounthealth import AccountHealth
from .chat import Chat
from .public import Public
from .push import Push
# installed sub-module
registered_module = {
"product": Product,
"mediaspace": MediaSpace,
"shop": Shop,
"merchant": Merchant,
"order": Order,
"logistics": Logistics,
"firstmile" : FirstMile,
"payment" : Payment,
"discount" : Discount,
"bundledeal": BundleDeal,
"addondeal": AddonDeal,
"voucher" : Voucher,
"followprize" : FollowPrize,
"toppicks": Toppicks,
"returns" : Returns,
"accounthealth" : AccountHealth,
"chat" : Chat,
"public" : Public,
"push" : Push
# "shopcategory": ShopCategory,
# "item": Item,
# "image": Image,
# "discount": Discount,
# "order": Order,
# "logistic": Logistic,
# "returns": Returns,
# "rma": RMA,
# "public": Public,
}
class ClientMeta(type):
def __new__(mcs, name, bases, dct):
klass = super(ClientMeta, mcs).__new__(mcs, name, bases, dct)
setattr(klass, "registered_module", registered_module)
return klass
class Client(object, metaclass=ClientMeta):
__metaclass__ = ClientMeta
### declare CACHED_MODULE in __init__
# CACHED_MODULE = {}
BASE_URL = "https://partner.shopeemobile.com"
BASE_TEST_URL = "https://partner.test-stable.shopeemobile.com"
BASE_API_URL = "/api/v2/"
# PER_MINUTE_API_RATE = 1000
def __init__(self, shop_id, partner_id, partner_key, redirect_url, test_env=False, code = None ,access_token = None, refresh_token = None):
''' initialize basic params and cache class
'''
if test_env:
self.BASE_URL = self.BASE_TEST_URL
self.partner_id = int(partner_id)
self.partner_key = partner_key
self.redirect_url = redirect_url
self.host = self.BASE_URL
self.shop_id = int(shop_id)
self.code = code
self.access_token = access_token
self.refresh_token = refresh_token
self.timeout = None
self.CACHED_MODULE = {}
def __getattr__(self, name):
try:
value = super(Client, self).__getattribute__(name)
except AttributeError as e:
value = self._get_cached_module(name)
if not value:
raise e
return value
def _make_timestamp(self):
return int(time.time())
def set_access_token(self, access_token):
self.access_token = access_token
# def set_additional_parameter(self, parameter, timest, access_token, sign):
# add_param = {
# "Sign" : sign,
# "Timestamp" : timest,
# "access_token" : access_token
# }
# parameter.update(add_param)
# return parameter
def _make_default_parameter(self, timest, sign):
return {
"Partner_id": self.partner_id,
"Timestamp": timest,
"Access_token": self.access_token,
"Shop_id": self.shop_id,
"Sign": sign
}
def _make_short_default_parameter(self, timest, sign):
return {
"Partner_id": self.partner_id,
"Sign": sign,
"Timestamp": timest
}
def _api_sign(self, path, timest):
base_string = f'{self.partner_id}{path}{timest}{self.access_token}{self.shop_id}'.encode()
sign = hmac.new(self.partner_key.encode(), base_string, hashlib.sha256).hexdigest()
return sign
def _api_short_sign(self, path, timest):
base_string = f'{self.partner_id}{path}{timest}'.encode()
sign = hmac.new(self.partner_key.encode(), base_string, hashlib.sha256).hexdigest()
return sign
def _api_url(self, path):
url = self.host + path
return url
def _create_parameter_url(self, url, parameter):
if parameter !=None:
url = url + "?"
par = ""
for param in parameter:
if par != "":
par = par + "&"
par = par + f"{param.lower()}={parameter[param]}"
return url + par
return url
def _build_request(self, uri, method, body):
method = method.upper()
headers = {'Content-Type': 'application/json'}
timest = self._make_timestamp()
uri = self.BASE_API_URL + uri
url = self.host + uri
if ("/public/" in uri) or ("/push/" in uri):
sign = self._api_short_sign(uri, timest)
parameter = self._make_short_default_parameter(timest, sign)
else:
sign = self._api_sign(uri, timest)
parameter = self._make_default_parameter(timest, sign)
# parameter = self.set_additional_parameter(parameter, sign, timest, self.access_token)
req = Request(method, url, headers=headers)
if body:
if method in ["POST", "PUT", "PATH"]:
req.json = body
else:
parameter.update(body)
req.url = self._create_parameter_url(url, parameter)
return req
def _build_response(self, resp):
'''Decoding JSON - Decode json string to python object
JSONDecodeError can happen when requests have an HTTP error code like 404 and try to parse the response as JSON
'''
if resp.status_code / 100 == 2:
body = json.loads(resp.text)
else:
body = {"request_id": None, "error": resp.status_code, "msg": "http error code"}
return body
# if "error" not in body:
# return body
# else:
# raise AttributeError(body["error"])
def _get_cached_module(self, key):
CACHED_MODULE = self.CACHED_MODULE.get(key)
if not CACHED_MODULE:
installed = self.registered_module.get(key)
if not installed:
return None
CACHED_MODULE = installed(self)
self.CACHED_MODULE.setdefault(key, CACHED_MODULE)
return CACHED_MODULE
def execute(self, uri, method, body=None):
''' defalut timeout value will be 10 seconds
'''
#parameter = self._make_default_parameter()
if body.get("timeout"):
timeout = body.get("timeout")
body.pop("timeout")
else:
timeout = 10
#if body is not None:
#parameter.update(body)
req = self._build_request(uri, method, body)
print(req.params)
print(req.url)
prepped = req.prepare()
s = Session()
resp = s.send(prepped, timeout=timeout)
resp = self._build_response(resp)
return resp
def _sign(self, path, timest):
base_string = f'{self.partner_id}{path}{timest}'.encode()
sign = hmac.new(self.partner_key.encode(), base_string, hashlib.sha256).hexdigest()
return sign
def auth_url(self, path):
timest = self._make_timestamp()
#base_string = f'{self.partner_id}{path}{timest}'.encode()
#sign = hmac.new(self.partner_key.encode(), base_string, hashlib.sha256).hexdigest()
sign = self._sign(path, timest)
url = self.host + path + f'?partner_id={self.partner_id}×tamp={timest}&sign={sign}'
return sign, url
def shop_authorization(self, redirect_url):
'''
The difference between hmac and hashlib,
hmac uses the provided key to generate a salt and make the hash more strong, while hashlib only hashes the provided message
In shopee partner API, shopee use hmac for general encryption while using hashlib for Authorize and CancelAuthorize module
'''
path = "/api/v2/shop/auth_partner"
url = self.auth_url(path)[1] + f'&redirect={redirect_url}'
return url
def get_code(self):
url = self.shop_authorization(self.redirect_url)
browser = webdriver.Chrome('c:\\chromedriver\\chromedriver.exe')
browser.get(url)
while self.redirect_url not in browser.current_url:
pass
code = parse_url(browser.current_url).query.split('&')
browser.close()
self.code = code[0] = code[0].replace('code=', '')
self.shop_id = int(code[1].replace('shop_id=', ''))
return self.code, self.shop_id
def get_token(self):
body = {'code': self.code, 'shop_id': int(self.shop_id), 'partner_id': int(self.partner_id)}
url = self.auth_url('/api/v2/auth/token/get')[1]
headers = {'Content-Type': 'application/json'}
resp = requests.post(url, json=body, headers=headers).json()
print(resp)
self.access_token = resp['access_token']
self.refresh_token = resp['refresh_token']
self.timeout = resp['expire_in']
return self.access_token, self.timeout, self.refresh_token
def get_access_token(self, shop_id, partner_id, partner_key, refresh_token):
body = {'shop_id': int(shop_id), 'partner_id': int(partner_id), 'refresh_token': refresh_token}
url = self.auth_url('/api/v2/auth/access_token/get')[1]
headers = {'Content-Type': 'application/json'}
resp = requests.post(url, json=body, headers=headers).json()
print(resp)
self.access_token = resp['access_token']
self.refresh_token = resp['refresh_token']
self.timeout = resp['expire_in']
return self.access_token, self.timeout, self.refresh_token
|
0f216211e702d7e459323d30ed049f37c86b972e
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/migrations/0439_fix_deleteduser_email.py
|
b9792714642b696abe1957ff8ef6ccb1d00e3637
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
0439_fix_deleteduser_email.py
|
# Generated by Django 4.2 on 2023-04-17 18:25
from email.headerregistry import Address
from functools import lru_cache
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.db import migrations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Q
@lru_cache
def get_fake_email_domain(apps: StateApps, realm_id: int) -> str:
Realm = apps.get_model("zerver", "Realm")
realm = Realm.objects.get(id=realm_id)
# Build out realm.host
subdomain = realm.string_id
if subdomain == "":
host = settings.EXTERNAL_HOST
else:
default_host = f"{subdomain}.{settings.EXTERNAL_HOST}"
host = settings.REALM_HOSTS.get(subdomain, default_host)
# Implement get_fake_email_domain
try:
validate_email(Address(username="bot", domain=host).addr_spec)
return host
except ValidationError:
pass
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email(Address(username="bot", domain=settings.FAKE_EMAIL_DOMAIN).addr_spec)
return settings.FAKE_EMAIL_DOMAIN
except ValidationError:
raise Exception(
settings.FAKE_EMAIL_DOMAIN + " is not a valid domain. "
"Consider setting the FAKE_EMAIL_DOMAIN setting."
)
def fix_invalid_emails(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""Find deleted users prior to the fix in 208c0c303405, which have
invalid delivery_email values; fixing them allows them to survive
an export/import.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
invalid_users = UserProfile.objects.filter(is_active=False).filter(
Q(delivery_email__icontains="@https://") | Q(delivery_email__icontains="@http://")
)
for invalid_user in invalid_users:
local_part = invalid_user.delivery_email.split("@")[0]
invalid_user.delivery_email = (
local_part + "@" + get_fake_email_domain(apps, invalid_user.realm_id)
)
invalid_user.save(update_fields=["delivery_email"])
class Migration(migrations.Migration):
dependencies = [
("zerver", "0438_add_web_mark_read_on_scroll_policy_setting"),
]
operations = [
migrations.RunPython(
fix_invalid_emails, reverse_code=migrations.RunPython.noop, elidable=True
),
]
|
142924ae0a533bf207fc7287c1f211facd2d1fba
|
d5a3aa96b30a5a6a355b4e004e494a6ef41a339c
|
/dataviz/euoscar.py
|
be20c807bf5bfbe453829c308b37e3070872d6f6
|
[
"MIT"
] |
permissive
|
Udzu/pudzu
|
4c1c134503f62fd1cc08a56e257b864033b38561
|
df5019802bc32064870f31cda8397ad14868cda0
|
refs/heads/master
| 2023-07-10T06:16:35.342990
| 2023-07-04T06:28:00
| 2023-07-04T06:28:00
| 97,936,607
| 120
| 28
|
MIT
| 2021-02-21T16:15:31
| 2017-07-21T10:34:16
|
Roff
|
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
euoscar.py
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
df = pd.read_csv(f"datasets/euoscar.csv").set_index('country')
winners = tmap(RGBA, sns.color_palette("Blues", 6))
nominated = RGBA(204,85,85).brighten(0.25) #= tmap(RGBA, sns.color_palette("Reds", 6))[1]
nominated = RGBA(255,200,200).darken(0.2)
never = RGBA(215,215,215)
def colorfn(c):
if c not in df.index:
return "white" if c in ['Sea', 'Borders'] else never
n = int(float(df.n[c].strip("*")))
if n == 0:
return nominated
elif n > 4:
return winners[-1]
else:
return winners[n]
@ignoring_exceptions
def labelfn(c):
return df.n[c].lstrip("0")
map = map_chart("maps/Europe2.png", colorfn, labelfn, label_font=arial(28, bold=True))
legend = generate_legend(
list(winners[5:0:-1]) + [nominated], [
"5+ winners", "4 winners", "3 winners", "2 winners", "1 winner", "1+ nominations",
],
font_family=partial(arial, 24),
)
chart = map.place(legend, align=(1,0), padding=10)
# title
title = Image.from_column([
Image.from_text("best international film oscar".upper(), arial(64, bold=True)),
Image.from_text("winners of the Academy Award for non-English language films produced outside the US", arial(28, italics=True)),
],
bg="white")
footer = """* includes 3 Russian-produced Soviet winners and 1 Ukrainian-produced nominee; 1 Slovak and 1 Czech winner from
Czechoslovakia; 1 winner from West Germany; and Yugoslav nominations produced in both Serbia and Croatia."""
subtitle = Image.from_text(footer, arial(24, italics=True), align="center", max_width=chart.width - 100, padding=10)
img = Image.from_column([title, chart, subtitle], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", arial(16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save(f"output/euoscar.png")
|
3b1735a7cc475a6444f513299fb21cb0b13bec08
|
5bb8e08e2d2bff503b4425cffe2e60dc32499089
|
/menpo/visualize/viewmatplotlib.py
|
25f4841b5c7a3254f3067744f1be2d461b58cf07
|
[
"BSD-3-Clause"
] |
permissive
|
menpo/menpo
|
2e731560bfd480bf3008317368af35612e55ddc2
|
a61500656c4fc2eea82497684f13cc31a605550b
|
refs/heads/master
| 2023-08-15T21:04:41.768318
| 2022-01-02T22:10:04
| 2022-01-02T22:10:04
| 9,731,325
| 332
| 89
|
BSD-3-Clause
| 2022-01-02T22:10:05
| 2013-04-28T14:52:38
|
Python
|
UTF-8
|
Python
| false
| false
| 42,386
|
py
|
viewmatplotlib.py
|
import numpy as np
from menpo.visualize.base import Renderer
# The colour map used for all lines and markers
GLOBAL_CMAP = "jet"
class MatplotlibRenderer(Renderer):
r"""
Abstract class for rendering visualizations using Matplotlib.
Parameters
----------
figure_id : `int` or ``None``
A figure id or ``None``. ``None`` assumes we maintain the Matplotlib
state machine and use `plt.gcf()`.
new_figure : `bool`
If ``True``, it creates a new figure to render on.
"""
def __init__(self, figure_id, new_figure):
super(MatplotlibRenderer, self).__init__(figure_id, new_figure)
# Set up data for saving
self._supported_ext = self.figure.canvas.get_supported_filetypes().keys()
# Create the extensions map, have to add . in front of the extensions
# and map every extension to the savefig method
n_ext = len(self._supported_ext)
func_list = [lambda obj, fp, **kwargs: self.figure.savefig(fp, **obj)] * n_ext
self._extensions_map = dict(
zip(["." + s for s in self._supported_ext], func_list)
)
def get_figure(self):
r"""
Gets the figure specified by the combination of ``self.figure_id`` and
``self.new_figure``. If ``self.figure_id == None`` then ``plt.gcf()``
is used. ``self.figure_id`` is also set to the correct id of the figure
if a new figure is created.
Returns
-------
figure : Matplotlib figure object
The figure we will be rendering on.
"""
import matplotlib.pyplot as plt
if self.new_figure or self.figure_id is not None:
self.figure = plt.figure(self.figure_id)
else:
self.figure = plt.gcf()
self.figure_id = self.figure.number
return self.figure
def save_figure(
self,
filename,
format="png",
dpi=None,
face_colour="w",
edge_colour="w",
orientation="portrait",
paper_type="letter",
transparent=False,
pad_inches=0.1,
overwrite=False,
):
r"""
Method for saving the figure of the current `figure_id` to file.
Parameters
----------
filename : `str` or `file`-like object
The string path or file-like object to save the figure at/into.
format : `str`
The format to use. This must match the file path if the file path is
a `str`.
dpi : `int` > 0 or ``None``, optional
The resolution in dots per inch.
face_colour : See Below, optional
The face colour of the figure rectangle.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of len 3
edge_colour : See Below, optional
The edge colour of the figure rectangle.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of len 3
orientation : {``portrait``, ``landscape``}, optional
The page orientation.
paper_type : See Below, optional
The type of the paper.
Example options ::
{``letter``, ``legal``, ``executive``, ``ledger``,
``a0`` through ``a10``, ``b0` through ``b10``}
transparent : `bool`, optional
If ``True``, the axes patches will all be transparent; the figure
patch will also be transparent unless `face_colour` and/or
`edge_colour` are specified. This is useful, for example, for
displaying a plot on top of a coloured background on a web page.
The transparency of these patches will be restored to their original
values upon exit of this function.
pad_inches : `float`, optional
Amount of padding around the figure.
overwrite : `bool`, optional
If ``True``, the file will be overwritten if it already exists.
"""
from menpo.io.output.base import _export
save_fig_args = {
"dpi": dpi,
"facecolour": face_colour,
"edgecolour": edge_colour,
"orientation": orientation,
"papertype": paper_type,
"format": format,
"transparent": transparent,
"pad_inches": pad_inches,
"bbox_inches": "tight",
"frameon": None,
}
# Use the export code so that we have a consistent interface
_export(
save_fig_args, filename, self._extensions_map, format, overwrite=overwrite
)
def clear_figure(self):
r"""
Method for clearing the current figure.
"""
self.figure.clf()
def force_draw(self):
r"""
Method for forcing the current figure to render.
"""
import matplotlib.pyplot as plt
plt.show()
class MatplotlibSubplots(object):
def _subplot_layout(self, num_subplots):
if num_subplots < 2:
return [1, 1]
while self._is_prime(num_subplots) and num_subplots > 4:
num_subplots += 1
p = self._factor(num_subplots)
if len(p) == 1:
p.insert(0, 1)
return p
while len(p) > 2:
if len(p) >= 4:
p[0] = p[0] * p[-2]
p[1] = p[1] * p[-1]
del p[-2:]
else:
p[0] = p[0] * p[1]
del p[1]
p.sort()
# Reformat if the column/row ratio is too large: we want a roughly
# square design
while (p[1] / p[0]) > 2.5:
p = self._subplot_layout(num_subplots + 1)
return p
def _factor(self, n):
gaps = [1, 2, 2, 4, 2, 4, 2, 4, 6, 2, 6]
length, cycle = 11, 3
f, fs, next_ind = 2, [], 0
while f * f <= n:
while n % f == 0:
fs.append(f)
n /= f
f += gaps[next_ind]
next_ind += 1
if next_ind == length:
next_ind = cycle
if n > 1:
fs.append(n)
return fs
def _is_prime(self, n):
if n == 2 or n == 3:
return True
if n < 2 or n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
r = int(n ** 0.5)
f = 5
while f <= r:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def _parse_cmap(cmap_name=None, image_shape_len=3):
import matplotlib.cm as cm
if cmap_name is not None:
return cm.get_cmap(cmap_name)
else:
if image_shape_len == 2:
# Single channels are viewed in Gray by default
return cm.gray
else:
return None
def _parse_axes_limits(min_x, max_x, min_y, max_y, axes_x_limits, axes_y_limits):
if isinstance(axes_x_limits, int):
axes_x_limits = float(axes_x_limits)
if isinstance(axes_y_limits, int):
axes_y_limits = float(axes_y_limits)
if isinstance(axes_x_limits, float):
pad = (max_x - min_x) * axes_x_limits
axes_x_limits = [min_x - pad, max_x + pad]
if isinstance(axes_y_limits, float):
pad = (max_y - min_y) * axes_y_limits
axes_y_limits = [min_y - pad, max_y + pad]
return axes_x_limits, axes_y_limits
def _set_axes_options(
ax,
render_axes=True,
inverted_y_axis=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
axes_x_label=None,
axes_y_label=None,
title=None,
):
if render_axes:
# render axes
ax.set_axis_on()
# set font options
for l in ax.get_xticklabels() + ax.get_yticklabels():
l.set_fontsize(axes_font_size)
l.set_fontname(axes_font_name)
l.set_fontstyle(axes_font_style)
l.set_fontweight(axes_font_weight)
# set ticks
if axes_x_ticks is not None:
ax.set_xticks(axes_x_ticks)
if axes_y_ticks is not None:
ax.set_yticks(axes_y_ticks)
# set labels and title
if axes_x_label is None:
axes_x_label = ""
if axes_y_label is None:
axes_y_label = ""
if title is None:
title = ""
ax.set_xlabel(
axes_x_label,
fontsize=axes_font_size,
fontname=axes_font_name,
fontstyle=axes_font_style,
fontweight=axes_font_weight,
)
ax.set_ylabel(
axes_y_label,
fontsize=axes_font_size,
fontname=axes_font_name,
fontstyle=axes_font_style,
fontweight=axes_font_weight,
)
ax.set_title(
title,
fontsize=axes_font_size,
fontname=axes_font_name,
fontstyle=axes_font_style,
fontweight=axes_font_weight,
)
else:
# do not render axes
ax.set_axis_off()
# also remove the ticks to get rid of the white area
ax.set_xticks([])
ax.set_yticks([])
# set axes limits
if axes_x_limits is not None:
ax.set_xlim(np.sort(axes_x_limits))
if axes_y_limits is None:
axes_y_limits = ax.get_ylim()
if inverted_y_axis:
ax.set_ylim(np.sort(axes_y_limits)[::-1])
else:
ax.set_ylim(np.sort(axes_y_limits))
def _set_grid_options(render_grid=True, grid_line_style="--", grid_line_width=2):
import matplotlib.pyplot as plt
if render_grid:
plt.grid("on", linestyle=grid_line_style, linewidth=grid_line_width)
else:
plt.grid("off")
def _set_figure_size(fig, figure_size=(7, 7)):
if figure_size is not None:
fig.set_size_inches(np.asarray(figure_size))
def _set_numbering(
ax,
centers,
render_numbering=True,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
):
if render_numbering:
for k, p in enumerate(centers):
ax.annotate(
str(k),
xy=(p[0], p[1]),
horizontalalignment=numbers_horizontal_align,
verticalalignment=numbers_vertical_align,
size=numbers_font_size,
family=numbers_font_name,
fontstyle=numbers_font_style,
fontweight=numbers_font_weight,
color=numbers_font_colour,
)
def _set_legend(
ax,
legend_handles,
render_legend=True,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
):
if render_legend:
# Options related to legend's font
prop = {
"family": legend_font_name,
"size": legend_font_size,
"style": legend_font_style,
"weight": legend_font_weight,
}
# Render legend
ax.legend(
handles=legend_handles,
title=legend_title,
prop=prop,
loc=legend_location,
bbox_to_anchor=legend_bbox_to_anchor,
borderaxespad=legend_border_axes_pad,
ncol=legend_n_columns,
columnspacing=legend_horizontal_spacing,
labelspacing=legend_vertical_spacing,
frameon=legend_border,
borderpad=legend_border_padding,
shadow=legend_shadow,
fancybox=legend_rounded_corners,
markerscale=legend_marker_scale,
)
class MatplotlibImageViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, image):
super(MatplotlibImageViewer2d, self).__init__(figure_id, new_figure)
self.image = image
self.axes_list = []
def render(
self,
interpolation="bilinear",
cmap_name=None,
alpha=1.0,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
import matplotlib.pyplot as plt
# parse colour map argument
cmap = _parse_cmap(cmap_name=cmap_name, image_shape_len=len(self.image.shape))
# parse axes limits
axes_x_limits, axes_y_limits = _parse_axes_limits(
0.0,
self.image.shape[1],
0.0,
self.image.shape[0],
axes_x_limits,
axes_y_limits,
)
# render image
plt.imshow(self.image, cmap=cmap, interpolation=interpolation, alpha=alpha)
# store axes object
ax = plt.gca()
self.axes_list = [ax]
# set axes options
_set_axes_options(
ax,
render_axes=render_axes,
inverted_y_axis=True,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
)
# set figure size
_set_figure_size(self.figure, figure_size)
return self
class MatplotlibImageSubplotsViewer2d(MatplotlibRenderer, MatplotlibSubplots):
def __init__(self, figure_id, new_figure, image):
super(MatplotlibImageSubplotsViewer2d, self).__init__(figure_id, new_figure)
self.image = image
self.num_subplots = self.image.shape[2]
self.plot_layout = self._subplot_layout(self.num_subplots)
self.axes_list = []
def render(
self,
interpolation="bilinear",
cmap_name=None,
alpha=1.0,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
import matplotlib.pyplot as plt
# parse colour map argument
cmap = _parse_cmap(cmap_name=cmap_name, image_shape_len=2)
# parse axes limits
axes_x_limits, axes_y_limits = _parse_axes_limits(
0.0,
self.image.shape[1],
0.0,
self.image.shape[0],
axes_x_limits,
axes_y_limits,
)
p = self.plot_layout
for i in range(self.image.shape[2]):
# create subplot and append the axes object
ax = plt.subplot(p[0], p[1], 1 + i)
self.axes_list.append(ax)
# render image
plt.imshow(
self.image[:, :, i], cmap=cmap, interpolation=interpolation, alpha=alpha
)
# set axes options
_set_axes_options(
ax,
render_axes=render_axes,
inverted_y_axis=True,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
)
# set figure size
_set_figure_size(self.figure, figure_size)
return self
class MatplotlibPointGraphViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, points, edges):
super(MatplotlibPointGraphViewer2d, self).__init__(figure_id, new_figure)
self.points = points
self.edges = edges
def render(
self,
image_view=False,
render_lines=True,
line_colour="r",
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour="r",
marker_edge_colour="k",
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
label=None,
):
from matplotlib import collections as mc
import matplotlib.pyplot as plt
# Flip x and y for viewing if points are tied to an image
points = self.points[:, ::-1] if image_view else self.points
# parse axes limits
min_x, min_y = np.min(points, axis=0)
max_x, max_y = np.max(points, axis=0)
axes_x_limits, axes_y_limits = _parse_axes_limits(
min_x, max_x, min_y, max_y, axes_x_limits, axes_y_limits
)
# get current axes object
ax = plt.gca()
# Check if graph has edges to be rendered (for example a PointCloud
# won't have any edges)
if render_lines and np.array(self.edges).shape[0] > 0:
# Get edges to be rendered
lines = zip(points[self.edges[:, 0], :], points[self.edges[:, 1], :])
# Draw line objects
lc = mc.LineCollection(
lines,
colors=line_colour,
linestyles=line_style,
linewidths=line_width,
cmap=GLOBAL_CMAP,
label=label,
)
ax.add_collection(lc)
# If a label is defined, it should only be applied to the lines, of
# a PointGraph, which represent each one of the labels, unless a
# PointCloud is passed in.
label = None
ax.autoscale()
if render_markers:
plt.plot(
points[:, 0],
points[:, 1],
linewidth=0,
markersize=marker_size,
marker=marker_style,
markeredgewidth=marker_edge_width,
markeredgecolor=marker_edge_colour,
markerfacecolor=marker_face_colour,
label=label,
)
# set numbering
_set_numbering(
ax,
points,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
)
# set axes options
_set_axes_options(
ax,
render_axes=render_axes,
inverted_y_axis=image_view,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
)
# set equal aspect ratio
ax.set_aspect("equal", adjustable="box")
# set figure size
_set_figure_size(self.figure, figure_size)
return self
class MatplotlibLandmarkViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, group, landmark_group):
super(MatplotlibLandmarkViewer2d, self).__init__(figure_id, new_figure)
self.group = group
self.landmark_group = landmark_group
def render(
self,
image_view=False,
render_lines=True,
line_colour="r",
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour="r",
marker_edge_colour="k",
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_legend=True,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
# Regarding the labels colours, we may get passed either no colours (in
# which case we generate random colours) or a single colour to colour
# all the labels with
# TODO: All marker and line options could be defined as lists...
n_labels = self.landmark_group.n_labels
line_colour = _check_colours_list(
render_lines,
line_colour,
n_labels,
"Must pass a list of line colours with length n_labels or a single "
"line colour for all labels.",
)
marker_face_colour = _check_colours_list(
render_markers,
marker_face_colour,
n_labels,
"Must pass a list of marker face colours with length n_labels or "
"a single marker face colour for all labels.",
)
marker_edge_colour = _check_colours_list(
render_markers,
marker_edge_colour,
n_labels,
"Must pass a list of marker edge colours with length n_labels or "
"a single marker edge colour for all labels.",
)
# check axes limits
points = self.landmark_group.points
if image_view:
min_y, min_x = np.min(points, axis=0)
max_y, max_x = np.max(points, axis=0)
else:
min_x, min_y = np.min(points, axis=0)
max_x, max_y = np.max(points, axis=0)
axes_x_limits, axes_y_limits = _parse_axes_limits(
min_x, max_x, min_y, max_y, axes_x_limits, axes_y_limits
)
# get pointcloud of each label
sub_pointclouds = self._build_sub_pointclouds()
# initialize legend_handles list
legend_handles = []
# for each pointcloud
for i, (label, pc) in enumerate(sub_pointclouds):
# render pointcloud
pc.view(
figure_id=self.figure_id,
image_view=image_view,
render_lines=render_lines,
line_colour=line_colour[i],
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour[i],
marker_edge_colour=marker_edge_colour[i],
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=None,
)
# set legend entry
if render_legend:
tmp_line = line_style
tmp_marker = marker_style if render_markers else "None"
legend_handles.append(
mlines.Line2D(
[],
[],
linewidth=line_width,
linestyle=tmp_line,
color=line_colour[i],
marker=tmp_marker,
markersize=marker_size ** 0.5,
markeredgewidth=marker_edge_width,
markeredgecolor=marker_edge_colour[i],
markerfacecolor=marker_face_colour[i],
label="{0}: {1}".format(self.group, label),
)
)
# set legend
_set_legend(
plt.gca(),
legend_handles,
render_legend=render_legend,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
)
# set figure size
_set_figure_size(self.figure, figure_size)
# If no limits are set then ensure that all sub-pointclouds fit in the
# view
if axes_x_limits is None and axes_y_limits is None:
plt.autoscale()
return self
def _build_sub_pointclouds(self):
return [
(label, self.landmark_group.get_label(label))
for label in self.landmark_group.labels
]
class MatplotlibAlignmentViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, alignment_transform):
super(MatplotlibAlignmentViewer2d, self).__init__(figure_id, new_figure)
self.alignment_transform = alignment_transform
def render(self, image=False, **kwargs):
r"""
Visualize how points are affected by the warp in 2 dimensions.
"""
import matplotlib.pyplot as plt
source = self.alignment_transform.source.points
target = self.alignment_transform.target.points
# a factor by which the minimum and maximum x and y values of the warp
# will be increased by.
x_margin_factor, y_margin_factor = 0.5, 0.5
# the number of x and y samples to take
n_x, n_y = 50, 50
# {x y}_{min max} is the actual bounds on either source or target
# landmarks
x_min, y_min = np.vstack([target.min(0), source.min(0)]).min(0)
x_max, y_max = np.vstack([target.max(0), source.max(0)]).max(0)
x_margin = x_margin_factor * (x_max - x_min)
y_margin = y_margin_factor * (y_max - y_min)
# {x y}_{min max}_m is the bound once it has been grown by the factor
# of the spread in that dimension
x_min_m = x_min - x_margin
x_max_m = x_max + x_margin
y_min_m = y_min - y_margin
y_max_m = y_max + y_margin
# build sample points for the selected region
x = np.linspace(x_min_m, x_max_m, n_x)
y = np.linspace(y_min_m, y_max_m, n_y)
xx, yy = np.meshgrid(x, y)
sample_points = np.concatenate(
[xx.reshape([-1, 1]), yy.reshape([-1, 1])], axis=1
)
warped_points = self.alignment_transform.apply(sample_points)
delta = warped_points - sample_points
# plot the sample points result
x, y, = (
0,
1,
)
if image:
# if we are overlaying points onto an image,
# we have to account for the fact that axis 0 is typically
# called 'y' and axis 1 is typically called 'x'. Flip them here
x, y = y, x
plt.quiver(sample_points[:, x], sample_points[:, y], delta[:, x], delta[:, y])
delta = target - source
# plot how the landmarks move from source to target
plt.quiver(
source[:, x],
source[:, y],
delta[:, x],
delta[:, y],
angles="xy",
scale_units="xy",
scale=1,
)
# rescale to the bounds
plt.xlim((x_min_m, x_max_m))
plt.ylim((y_min_m, y_max_m))
if image:
# if we are overlaying points on an image, axis0 (the 'y' axis)
# is flipped.
plt.gca().invert_yaxis()
return self
class MatplotlibGraphPlotter(MatplotlibRenderer):
def __init__(
self,
figure_id,
new_figure,
x_axis,
y_axis,
title=None,
legend_entries=None,
x_label=None,
y_label=None,
x_axis_limits=None,
y_axis_limits=None,
x_axis_ticks=None,
y_axis_ticks=None,
):
super(MatplotlibGraphPlotter, self).__init__(figure_id, new_figure)
self.x_axis = x_axis
self.y_axis = y_axis
if legend_entries is None:
legend_entries = ["Curve {}".format(i) for i in range(len(y_axis))]
self.legend_entries = legend_entries
self.title = title
self.x_label = x_label
self.y_label = y_label
self.x_axis_ticks = x_axis_ticks
self.y_axis_ticks = y_axis_ticks
# parse axes limits
min_x = np.min(x_axis)
max_x = np.max(x_axis)
min_y = np.min([np.min(l) for l in y_axis])
max_y = np.max([np.max(l) for l in y_axis])
self.x_axis_limits, self.y_axis_limits = _parse_axes_limits(
min_x, max_x, min_y, max_y, x_axis_limits, y_axis_limits
)
def render(
self,
render_lines=True,
line_colour="r",
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=6,
marker_face_colour="r",
marker_edge_colour="k",
marker_edge_width=1.0,
render_legend=True,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
figure_size=(7, 7),
render_grid=True,
grid_line_style="--",
grid_line_width=1,
):
import matplotlib.pyplot as plt
# Check the viewer options that can be different for each plotted curve
n_curves = len(self.y_axis)
render_lines = _check_render_flag(
render_lines,
n_curves,
"Must pass a list of different "
"render_lines flag for each curve or "
"a single render_lines flag for all "
"curves.",
)
render_markers = _check_render_flag(
render_markers,
n_curves,
"Must pass a list of different "
"render_markers flag for each "
"curve or a single render_markers "
"flag for all curves.",
)
line_colour = _check_colours_list(
True,
line_colour,
n_curves,
"Must pass a list of line colours with length n_curves or a single "
"line colour for all curves.",
)
line_style = _check_colours_list(
True,
line_style,
n_curves,
"Must pass a list of line styles with length n_curves or a single "
"line style for all curves.",
)
line_width = _check_colours_list(
True,
line_width,
n_curves,
"Must pass a list of line widths with length n_curves or a single "
"line width for all curves.",
)
marker_style = _check_colours_list(
True,
marker_style,
n_curves,
"Must pass a list of marker styles with length n_curves or a "
"single marker style for all curves.",
)
marker_size = _check_colours_list(
True,
marker_size,
n_curves,
"Must pass a list of marker sizes with length n_curves or a single "
"marker size for all curves.",
)
marker_face_colour = _check_colours_list(
True,
marker_face_colour,
n_curves,
"Must pass a list of marker face colours with length n_curves or a "
"single marker face colour for all curves.",
)
marker_edge_colour = _check_colours_list(
True,
marker_edge_colour,
n_curves,
"Must pass a list of marker edge colours with length n_curves or a "
"single marker edge colour for all curves.",
)
marker_edge_width = _check_colours_list(
True,
marker_edge_width,
n_curves,
"Must pass a list of marker edge widths with length n_curves or a "
"single marker edge width for all curves.",
)
# plot all curves
ax = plt.gca()
for i, y in enumerate(self.y_axis):
linestyle = line_style[i]
if not render_lines[i]:
linestyle = "None"
marker = marker_style[i]
if not render_markers[i]:
marker = "None"
plt.plot(
self.x_axis,
y,
color=line_colour[i],
linestyle=linestyle,
linewidth=line_width[i],
marker=marker,
markeredgecolor=marker_edge_colour[i],
markerfacecolor=marker_face_colour[i],
markeredgewidth=marker_edge_width[i],
markersize=marker_size[i],
label=self.legend_entries[i],
)
# set legend
_set_legend(
ax,
legend_handles=None,
render_legend=render_legend,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
)
# set axes options
_set_axes_options(
ax,
render_axes=render_axes,
inverted_y_axis=False,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=self.x_axis_limits,
axes_y_limits=self.y_axis_limits,
axes_x_ticks=self.x_axis_ticks,
axes_y_ticks=self.y_axis_ticks,
axes_x_label=self.x_label,
axes_y_label=self.y_label,
title=self.title,
)
# set grid options
_set_grid_options(
render_grid=render_grid,
grid_line_style=grid_line_style,
grid_line_width=grid_line_width,
)
# set figure size
_set_figure_size(self.figure, figure_size)
return self
class MatplotlibMultiImageViewer2d(MatplotlibRenderer):
def __init__(self, figure_id, new_figure, image_list):
super(MatplotlibMultiImageViewer2d, self).__init__(figure_id, new_figure)
self.image_list = image_list
def render(self, interval=50, **kwargs):
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.animation as animation
if len(self.image_list[0].shape) == 2:
# Single channels are viewed in Gray
_ax = plt.imshow(self.image_list[0], cmap=cm.Greys_r, **kwargs)
else:
_ax = plt.imshow(self.image_list[0], **kwargs)
def init():
return (_ax,)
def animate(j):
_ax.set_data(self.image_list[j])
return (_ax,)
self._ani = animation.FuncAnimation(
self.figure,
animate,
init_func=init,
frames=len(self.image_list),
interval=interval,
blit=True,
)
return self
class MatplotlibMultiImageSubplotsViewer2d(MatplotlibRenderer, MatplotlibSubplots):
def __init__(self, figure_id, new_figure, image_list):
super(MatplotlibMultiImageSubplotsViewer2d, self).__init__(
figure_id, new_figure
)
self.image_list = image_list
self.num_subplots = self.image_list[0].shape[2]
self.plot_layout = self._subplot_layout(self.num_subplots)
def render(self, interval=50, **kwargs):
import matplotlib.cm as cm
import matplotlib.animation as animation
import matplotlib.pyplot as plt
p = self.plot_layout
_axs = []
for i in range(self.image_list[0].shape[2]):
plt.subplot(p[0], p[1], 1 + i)
# Hide the x and y labels
plt.axis("off")
_ax = plt.imshow(self.image_list[0][:, :, i], cmap=cm.Greys_r, **kwargs)
_axs.append(_ax)
def init():
return _axs
def animate(j):
for k, _ax in enumerate(_axs):
_ax.set_data(self.image_list[j][:, :, k])
return _axs
self._ani = animation.FuncAnimation(
self.figure,
animate,
init_func=init,
frames=len(self.image_list),
interval=interval,
blit=True,
)
return self
def sample_colours_from_colourmap(n_colours, colour_map):
import matplotlib.pyplot as plt
cm = plt.get_cmap(colour_map)
return [cm(1.0 * i / n_colours)[:3] for i in range(n_colours)]
def _check_colours_list(render_flag, colours_list, n_objects, error_str):
if render_flag:
if colours_list is None:
# sample colours from jet colour map
colours_list = sample_colours_from_colourmap(n_objects, GLOBAL_CMAP)
if isinstance(colours_list, list):
if len(colours_list) == 1:
colours_list *= n_objects
elif len(colours_list) != n_objects:
raise ValueError(error_str)
else:
colours_list = [colours_list] * n_objects
else:
colours_list = [None] * n_objects
return colours_list
def _check_render_flag(render_flag, n_objects, error_str):
if isinstance(render_flag, bool):
render_flag = [render_flag] * n_objects
elif isinstance(render_flag, list):
if len(render_flag) == 1:
render_flag *= n_objects
elif len(render_flag) != n_objects:
raise ValueError(error_str)
else:
raise ValueError(error_str)
return render_flag
|
7c2ac3c03f741e7be371cce54bb0fc4affe8b007
|
6e71db141e30aab7a8b3f32650646dc55ba6cc2a
|
/src/pkgcore/ebuild/profiles.py
|
660fcea8a49e62923bc22299e8bd4d1804ccc9b3
|
[
"BSD-3-Clause"
] |
permissive
|
pkgcore/pkgcore
|
69e7692863979a86c656c7af7937df1fbee89fd2
|
ad4c3d51a2aff49ed898382c58baa852f47e17b9
|
refs/heads/master
| 2023-08-14T23:51:47.935851
| 2023-03-20T04:09:43
| 2023-08-05T19:24:15
| 13,432,875
| 107
| 35
|
BSD-3-Clause
| 2023-09-14T17:03:19
| 2013-10-09T04:16:59
|
Python
|
UTF-8
|
Python
| false
| false
| 32,300
|
py
|
profiles.py
|
__all__ = (
"ProfileError",
"ProfileNode",
"EmptyRootNode",
"OnDiskProfile",
"UserProfile",
)
import os
from collections import defaultdict, namedtuple
from functools import partial
from itertools import chain
from snakeoil import caching, klass
from snakeoil.bash import iter_read_bash, read_bash_dict
from snakeoil.data_source import local_source
from snakeoil.fileutils import readlines_utf8
from snakeoil.mappings import ImmutableDict
from snakeoil.osutils import abspath, pjoin
from snakeoil.sequences import split_negations, stable_unique
from ..config import errors
from ..config.hint import ConfigHint
from ..fs.livefs import sorted_scan
from ..log import logger
from . import const, cpv
from . import errors as ebuild_errors
from . import misc, repo_objs
from .atom import atom
from .eapi import EAPI, get_eapi
class ProfileError(errors.ParsingError):
def __init__(self, path, filename, error):
self.path, self.filename, self.error = path, filename, error
def __str__(self):
if self.filename:
return f"failed parsing {self.filename!r} in {self.path!r}: {self.error}"
return f"failed parsing {self.path!r}: {self.error}"
class NonexistentProfile(ProfileError):
"""Profile for a nonexistent directory."""
def __init__(self, path):
super().__init__(path, "", "nonexistent profile directory")
def _read_profile_files(files, allow_line_cont=False):
"""Read all the given data files."""
for path in files:
# determine file path relative to the profiles dir
try:
relpath = path.split("/profiles/")[1]
except IndexError:
# profiles base path
relpath = os.path.basename(path)
for lineno, line in iter_read_bash(
path, allow_line_cont=allow_line_cont, enum_line=True
):
yield line, lineno, relpath
def load_property(
filename,
*,
read_func=_read_profile_files,
fallback=(),
parse_func=lambda x: x,
allow_line_cont=False,
allow_recurse=False,
eapi_optional=None,
):
"""Decorator simplifying parsing profile files to generate a profile property.
:param filename: The filename to parse within that profile directory.
:keyword read_func: An invokable used to read the specified file.
:keyword fallback: What to return if the file does not exist for this profile. Must be immutable.
:keyword parse_func: An invokable used to parse the data.
:keyword allow_line_cont: Controls whether line continuations are respected.
:keyword allow_recurse: Controls whether or not this specific content can be a directory
of files, rather than just a file. Only is consulted if we're parsing the profile
in non pms strict mode.
:keyword eapi_optional: If given, the EAPI for this profile node is checked to see if
the given optional evaluates to True; if so, then parsing occurs. If False, then
the fallback is returned and no ondisk activity occurs.
:return: A :py:`klass.jit.attr_named` property instance.
"""
def f(func):
f2 = klass.jit_attr_named(f"_{func.__name__}")
return f2(
partial(
_load_and_invoke,
func,
filename,
read_func,
fallback,
allow_recurse,
allow_line_cont,
parse_func,
eapi_optional,
)
)
return f
def _load_and_invoke(
func,
filename,
read_func,
fallback,
allow_recurse,
allow_line_cont,
parse_func,
eapi_optional,
self,
):
if eapi_optional is not None and not getattr(
self.eapi.options, eapi_optional, None
):
return func(self, fallback)
profile_path = self.path.rstrip("/")
base = pjoin(profile_path, filename)
files = []
if self.pms_strict or not allow_recurse:
if os.path.exists(base):
files.append(base)
else:
# Skip hidden files and backup files, those beginning with '.' or
# ending with '~', respectively.
files.extend(sorted_scan(base, hidden=False, backup=False))
try:
if files:
if read_func is None:
data = parse_func(files)
else:
data = parse_func(read_func(files, allow_line_cont=allow_line_cont))
else:
data = fallback
return func(self, data)
except (ValueError, IndexError, EnvironmentError) as e:
raise ProfileError(profile_path, filename, e) from e
except IsADirectoryError as e:
raise ProfileError(
self.path,
filename,
"path is a directory, but this profile is PMS format- "
"directories aren't allowed. See layout.conf profile-formats "
"to enable directory support",
) from e
_make_incrementals_dict = partial(misc.IncrementalsDict, const.incrementals)
_Packages = namedtuple("_Packages", ("system", "profile"))
class ProfileNode(metaclass=caching.WeakInstMeta):
__inst_caching__ = True
_repo_map = None
def __init__(self, path, pms_strict=True):
self.path = str(path).rstrip(os.path.sep)
if not os.path.isdir(self.path):
raise NonexistentProfile(self.path)
self.pms_strict = pms_strict
def __str__(self):
return f"profile at {self.path!r}"
def __repr__(self):
return "<%s path=%r, @%#8x>" % (self.__class__.__name__, self.path, id(self))
system = klass.alias_attr("packages.system")
profile_set = klass.alias_attr("packages.profile")
@klass.jit_attr
def name(self):
"""Relative path to the profile from the profiles directory."""
try:
return self.path.split("/profiles/")[1]
except IndexError:
# profiles base path
return ""
@load_property("packages")
def packages(self, data):
repo_config = self.repoconfig
# TODO: get profile-set support into PMS
profile_set = (
repo_config is not None and "profile-set" in repo_config.profile_formats
)
sys, neg_sys, pro, neg_pro = [], [], [], []
neg_wildcard = False
for line, lineno, relpath in data:
try:
if line[0] == "-":
if line == "-*":
neg_wildcard = True
elif line[1] == "*":
neg_sys.append(self.eapi_atom(line[2:]))
elif profile_set:
neg_pro.append(self.eapi_atom(line[1:]))
else:
logger.error(
f"{relpath!r}: invalid line format, line {lineno}: {line!r}"
)
else:
if line[0] == "*":
sys.append(self.eapi_atom(line[1:]))
elif profile_set:
pro.append(self.eapi_atom(line))
else:
logger.error(
f"{relpath!r}: invalid line format, line {lineno}: {line!r}"
)
except ebuild_errors.MalformedAtom as e:
logger.error(f"{relpath!r}, line {lineno}: parsing error: {e}")
system = [tuple(neg_sys), tuple(sys)]
profile = [tuple(neg_pro), tuple(pro)]
if neg_wildcard:
system.append(neg_wildcard)
profile.append(neg_wildcard)
return _Packages(tuple(system), tuple(profile))
@load_property("parent")
def parent_paths(self, data):
repo_config = self.repoconfig
if repo_config is not None and "portage-2" in repo_config.profile_formats:
l = []
for line, lineno, relpath in data:
repo_id, separator, profile_path = line.partition(":")
if separator:
if repo_id:
try:
location = self._repo_map[repo_id]
except KeyError:
# check if requested repo ID matches the current
# repo which could be the case when running against
# unconfigured, external repos.
if repo_id == repo_config.repo_id:
location = repo_config.location
else:
logger.error(
f"repo {repo_config.repo_id!r}: "
f"{relpath!r} (line {lineno}), "
f"bad profile parent {line!r}: "
f"unknown repo {repo_id!r}"
)
continue
l.append(
(
abspath(pjoin(location, "profiles", profile_path)),
line,
lineno,
)
)
else:
l.append((abspath(pjoin(self.path, repo_id)), line, lineno))
return tuple(l)
return tuple(
(abspath(pjoin(self.path, line)), line, lineno)
for line, lineno, relpath in data
)
@klass.jit_attr
def parents(self):
kls = getattr(self, "parent_node_kls", self.__class__)
parents = []
for path, line, lineno in self.parent_paths:
try:
parents.append(kls(path))
except ProfileError as e:
repo_id = self.repoconfig.repo_id
logger.error(
f"repo {repo_id!r}: '{self.name}/parent' (line {lineno}), "
f"bad profile parent {line!r}: {e.error}"
)
continue
return tuple(parents)
@load_property(
"package.provided", allow_recurse=True, eapi_optional="profile_pkg_provided"
)
def pkg_provided(self, data):
def _parse_cpv(s):
try:
return cpv.VersionedCPV(s)
except cpv.InvalidCPV:
logger.error(f"invalid package.provided entry: {s!r}")
data = (x[0] for x in data)
return split_negations(data, _parse_cpv)
def _parse_atom_negations(self, data):
"""Parse files containing optionally negated package atoms."""
neg, pos = [], []
for line, lineno, relpath in data:
if line[0] == "-":
line = line[1:]
if not line:
logger.error(
f"{relpath!r}, line {lineno}: '-' negation without an atom"
)
continue
l = neg
else:
l = pos
try:
l.append(self.eapi_atom(line))
except ebuild_errors.MalformedAtom as e:
logger.error(f"{relpath!r}, line {lineno}: parsing error: {e}")
return tuple(neg), tuple(pos)
def _package_keywords_splitter(self, iterable):
"""Parse package keywords files."""
for line, lineno, relpath in iterable:
v = line.split()
try:
yield (atom(v[0]), tuple(stable_unique(v[1:])))
except ebuild_errors.MalformedAtom as e:
logger.error(f"{relpath!r}, line {lineno}: parsing error: {e}")
@load_property("package.mask", allow_recurse=True)
def masks(self, data):
return self._parse_atom_negations(data)
@load_property("package.unmask", allow_recurse=True)
def unmasks(self, data):
return self._parse_atom_negations(data)
@load_property("package.deprecated", allow_recurse=True)
def pkg_deprecated(self, data):
return self._parse_atom_negations(data)
@load_property("package.keywords", allow_recurse=True)
def keywords(self, data):
return tuple(self._package_keywords_splitter(data))
@load_property("package.accept_keywords", allow_recurse=True)
def accept_keywords(self, data):
return tuple(self._package_keywords_splitter(data))
@load_property("package.use", allow_recurse=True)
def pkg_use(self, data):
c = misc.ChunkedDataDict()
c.update_from_stream(
chain.from_iterable(self._parse_package_use(data).values())
)
c.freeze()
return c
@load_property("deprecated", read_func=None, fallback=None)
def deprecated(self, data):
if data is not None:
data = iter(readlines_utf8(data[0]))
try:
replacement = next(data).strip()
msg = "\n".join(x.lstrip("#").strip() for x in data)
data = (replacement, msg)
except StopIteration:
data = None
return data
def _parse_package_use(self, data):
d = defaultdict(list)
# split the data down ordered cat/pkg lines
for line, lineno, relpath in data:
l = line.split()
try:
a = self.eapi_atom(l[0])
except ebuild_errors.MalformedAtom as e:
logger.error(f"{relpath!r}, line {lineno}: parsing error: {e}")
continue
if len(l) == 1:
logger.error(
f"{relpath!r}, line {lineno}: missing USE flag(s): {line!r}"
)
continue
if any(s.endswith(":") for s in l[1:]):
logger.error(
f"{relpath!r}, line {lineno}: USE_EXPAND syntax is invalid in this context: {line!r}"
)
continue
d[a.key].append(misc.chunked_data(a, *split_negations(l[1:])))
return ImmutableDict(
(k, misc._build_cp_atom_payload(v, atom(k))) for k, v in d.items()
)
def _parse_use(self, data):
c = misc.ChunkedDataDict()
data = (x[0] for x in data)
neg, pos = split_negations(data)
if neg or pos:
c.add_bare_global(neg, pos)
c.freeze()
return c
@load_property("use.force", allow_recurse=True)
def use_force(self, data):
return self._parse_use(data)
@load_property(
"use.stable.force", allow_recurse=True, eapi_optional="profile_stable_use"
)
def use_stable_force(self, data):
return self._parse_use(data)
@load_property("package.use.force", allow_recurse=True)
def pkg_use_force(self, data):
return self._parse_package_use(data)
@load_property(
"package.use.stable.force",
allow_recurse=True,
eapi_optional="profile_stable_use",
)
def pkg_use_stable_force(self, data):
return self._parse_package_use(data)
@load_property("use.mask", allow_recurse=True)
def use_mask(self, data):
return self._parse_use(data)
@load_property(
"use.stable.mask", allow_recurse=True, eapi_optional="profile_stable_use"
)
def use_stable_mask(self, data):
return self._parse_use(data)
@load_property("package.use.mask", allow_recurse=True)
def pkg_use_mask(self, data):
return self._parse_package_use(data)
@load_property(
"package.use.stable.mask",
allow_recurse=True,
eapi_optional="profile_stable_use",
)
def pkg_use_stable_mask(self, data):
return self._parse_package_use(data)
@klass.jit_attr
def masked_use(self):
c = self.use_mask
if self.pkg_use_mask:
c = c.clone(unfreeze=True)
c.update_from_stream(chain.from_iterable(self.pkg_use_mask.values()))
c.freeze()
return c
@klass.jit_attr
def stable_masked_use(self):
c = self.use_mask.clone(unfreeze=True)
if self.use_stable_mask:
c.merge(self.use_stable_mask)
if self.pkg_use_mask:
c.update_from_stream(chain.from_iterable(self.pkg_use_mask.values()))
if self.pkg_use_stable_mask:
c.update_from_stream(chain.from_iterable(self.pkg_use_stable_mask.values()))
c.freeze()
return c
@klass.jit_attr
def forced_use(self):
c = self.use_force
if self.pkg_use_force:
c = c.clone(unfreeze=True)
c.update_from_stream(chain.from_iterable(self.pkg_use_force.values()))
c.freeze()
return c
@klass.jit_attr
def stable_forced_use(self):
c = self.use_force.clone(unfreeze=True)
if self.use_stable_force:
c.merge(self.use_stable_force)
if self.pkg_use_force:
c.update_from_stream(chain.from_iterable(self.pkg_use_force.values()))
if self.pkg_use_stable_force:
c.update_from_stream(
chain.from_iterable(self.pkg_use_stable_force.values())
)
c.freeze()
return c
@load_property("make.defaults", read_func=None, fallback=None)
def make_defaults(self, data):
d = {}
if data is not None:
d.update(read_bash_dict(data[0]))
return ImmutableDict(d)
@load_property("make.defaults", read_func=None, fallback=None)
def default_env(self, data):
rendered = _make_incrementals_dict()
for parent in self.parents:
rendered.update(parent.default_env.items())
if data is not None:
data = read_bash_dict(data[0], vars_dict=rendered)
rendered.update(data.items())
return ImmutableDict(rendered)
@klass.jit_attr
def bashrc(self):
path = pjoin(self.path, "profile.bashrc")
if os.path.exists(path):
return local_source(path)
return None
@load_property("package.bashrc", allow_recurse=True)
def pkg_bashrc(self, data):
repo_config = self.repoconfig
if repo_config is None or "profile-bashrcs" not in repo_config.profile_formats:
return ()
d = defaultdict(list)
for line, lineno, relpath in data:
l = line.split()
try:
a = self.eapi_atom(l[0])
except ebuild_errors.MalformedAtom as exc:
logger.error(f"{relpath!r}, line {lineno}: parsing error: {exc}")
continue
if len(l) == 1:
logger.error(
f"{relpath!r}, line {lineno}: missing bashrc files: {line!r}"
)
continue
for filename in l[1:]:
d[a].append(local_source(pjoin(self.path, "bashrc", filename)))
return tuple((k, tuple(v)) for k, v in d.items())
@load_property("eapi", fallback="0")
def eapi(self, data):
# handle fallback
if isinstance(data, str):
return get_eapi(data)
try:
line, lineno, relpath = next(data)
except StopIteration:
relpath = pjoin(self.name, "eapi")
logger.error(f"{relpath!r}: empty file")
return get_eapi("0")
try:
next(data)
logger.error(f"{relpath!r}: multiple lines detected")
except StopIteration:
pass
eapi_str = line.strip()
if eapi_str not in EAPI.known_eapis:
logger.error(f"{relpath!r}: unknown EAPI {eapi_str!r}")
return get_eapi(eapi_str)
eapi_atom = klass.alias_attr("eapi.atom_kls")
@klass.jit_attr
def repoconfig(self):
return self._load_repoconfig_from_path(self.path)
@staticmethod
def _load_repoconfig_from_path(path):
path = abspath(path)
# strip '/' so we don't get '/usr/portage' == ('', 'usr', 'portage')
chunks = path.lstrip("/").split("/")
try:
pindex = max(idx for idx, x in enumerate(chunks) if x == "profiles")
except ValueError:
# not in a repo...
return None
repo_path = pjoin("/", *chunks[:pindex])
return repo_objs.RepoConfig(repo_path)
@classmethod
def _autodetect_and_create(cls, path):
repo_config = cls._load_repoconfig_from_path(path)
# note while this else seems pointless, we do it this
# way so that we're not passing an arg unless needed- instance
# caching is a bit overprotective, even if pms_strict defaults to True,
# cls(path) is not cls(path, pms_strict=True)
if repo_config is not None and "pms" not in repo_config.profile_formats:
profile = cls(path, pms_strict=False)
else:
profile = cls(path)
# optimization to avoid re-parsing what we already did.
object.__setattr__(profile, "_repoconfig", repo_config)
return profile
class EmptyRootNode(ProfileNode):
__inst_caching__ = True
parents = ()
deprecated = None
pkg_use = (
masked_use
) = stable_masked_use = forced_use = stable_forced_use = misc.ChunkedDataDict()
forced_use.freeze()
pkg_bashrc = ()
pkg_use_force = pkg_use_mask = ImmutableDict()
pkg_provided = system = profile_set = ((), ())
class ProfileStack:
_node_kls = ProfileNode
def __init__(self, profile):
self.profile = profile
self.node = self._node_kls._autodetect_and_create(profile)
@property
def arch(self):
return self.default_env.get("ARCH")
deprecated = klass.alias_attr("node.deprecated")
eapi = klass.alias_attr("node.eapi")
name = klass.alias_attr("node.name")
@klass.jit_attr
def stack(self):
def f(node):
for path, line, lineno in node.parent_paths:
try:
x = self._node_kls._autodetect_and_create(path)
except ProfileError as exc:
repo_id = node.repoconfig.repo_id
logger.error(
f"repo {repo_id!r}: '{self.name}/parent' (line {lineno}), "
f"bad profile parent {line!r}: {exc.error}"
)
continue
yield from f(x)
yield node
return tuple(f(self.node))
@klass.jit_attr
def _system_profile(self):
"""User-selected system profile.
This should map directly to the profile linked to /etc/portage/make.profile.
"""
# prefer main system profile; otherwise, fallback to custom user profile
for profile in reversed(self.stack):
if not isinstance(profile, UserProfileNode):
break
return profile
def _collapse_use_dict(self, attr):
stack = (getattr(x, attr) for x in self.stack)
d = misc.ChunkedDataDict()
for mapping in stack:
d.merge(mapping)
d.freeze()
return d
@klass.jit_attr
def forced_use(self):
return self._collapse_use_dict("forced_use")
@klass.jit_attr
def masked_use(self):
return self._collapse_use_dict("masked_use")
@klass.jit_attr
def stable_forced_use(self):
return self._collapse_use_dict("stable_forced_use")
@klass.jit_attr
def stable_masked_use(self):
return self._collapse_use_dict("stable_masked_use")
@klass.jit_attr
def pkg_use(self):
return self._collapse_use_dict("pkg_use")
def _collapse_generic(self, attr, clear=False):
s = set()
for node in self.stack:
val = getattr(node, attr)
if clear and len(val) > 2 and val[2]:
s.clear()
s.difference_update(val[0])
s.update(val[1])
return s
@klass.jit_attr
def default_env(self):
d = dict(self.node.default_env.items())
for incremental in const.incrementals:
v = d.pop(incremental, "").split()
if v:
if incremental in const.incrementals_unfinalized:
d[incremental] = tuple(v)
else:
v = misc.incremental_expansion(
v, msg_prefix=f"While expanding {incremental}, value {v!r}: "
)
if v:
d[incremental] = tuple(v)
return ImmutableDict(d.items())
@property
def profile_only_variables(self):
if "PROFILE_ONLY_VARIABLES" in const.incrementals:
return frozenset(self.default_env.get("PROFILE_ONLY_VARIABLES", ()))
return frozenset(self.default_env.get("PROFILE_ONLY_VARIABLES", "").split())
@klass.jit_attr
def use_expand(self):
"""USE_EXPAND variables defined by the profile."""
if "USE_EXPAND" in const.incrementals:
return frozenset(self.default_env.get("USE_EXPAND", ()))
return frozenset(self.default_env.get("USE_EXPAND", "").split())
@klass.jit_attr
def use(self):
"""USE flag settings for the profile."""
return tuple(list(self.default_env.get("USE", ())) + list(self.expand_use()))
def expand_use(self, env=None):
"""Expand USE_EXPAND settings to USE flags."""
if env is None:
env = self.default_env
use = []
for u in self.use_expand:
value = env.get(u)
if value is None:
continue
u2 = u.lower() + "_"
use.extend(u2 + x for x in value.split())
return tuple(use)
@property
def use_expand_hidden(self):
if "USE_EXPAND_HIDDEN" in const.incrementals:
return frozenset(self.default_env.get("USE_EXPAND_HIDDEN", ()))
return frozenset(self.default_env.get("USE_EXPAND_HIDDEN", "").split())
@property
def iuse_implicit(self):
if "IUSE_IMPLICIT" in const.incrementals:
return frozenset(self.default_env.get("IUSE_IMPLICIT", ()))
return frozenset(self.default_env.get("IUSE_IMPLICIT", "").split())
@property
def use_expand_implicit(self):
if "USE_EXPAND_IMPLICIT" in const.incrementals:
return frozenset(self.default_env.get("USE_EXPAND_IMPLICIT", ()))
return frozenset(self.default_env.get("USE_EXPAND_IMPLICIT", "").split())
@property
def use_expand_unprefixed(self):
if "USE_EXPAND_UNPREFIXED" in const.incrementals:
return frozenset(self.default_env.get("USE_EXPAND_UNPREFIXED", ()))
return frozenset(self.default_env.get("USE_EXPAND_UNPREFIXED", "").split())
@klass.jit_attr
def iuse_effective(self):
iuse_effective = []
# EAPI 5 and above allow profile defined IUSE injection (see PMS)
if self._system_profile.eapi.options.profile_iuse_injection:
iuse_effective.extend(self.iuse_implicit)
for v in self.use_expand_implicit.intersection(self.use_expand_unprefixed):
iuse_effective.extend(
self.default_env.get("USE_EXPAND_VALUES_" + v, "").split()
)
for v in self.use_expand.intersection(self.use_expand_implicit):
for x in self.default_env.get("USE_EXPAND_VALUES_" + v, "").split():
iuse_effective.append(v.lower() + "_" + x)
else:
iuse_effective.extend(self._system_profile.repoconfig.known_arches)
for v in self.use_expand:
for x in self.default_env.get("USE_EXPAND_VALUES_" + v, "").split():
iuse_effective.append(v.lower() + "_" + x)
return frozenset(iuse_effective)
@klass.jit_attr
def provides_repo(self):
# delay importing to avoid circular imports
from .repository import ProvidesRepo
pkgs = self._collapse_generic("pkg_provided")
try:
arches = self._system_profile.repoconfig.known_arches
except AttributeError:
# TODO: repoconfig is None when using fake repos
arches = ()
return ProvidesRepo(pkgs, arches)
@klass.jit_attr
def masks(self):
return frozenset(chain(self._collapse_generic("masks")))
@klass.jit_attr
def unmasks(self):
return frozenset(self._collapse_generic("unmasks"))
@klass.jit_attr
def pkg_deprecated(self):
return frozenset(chain(self._collapse_generic("pkg_deprecated")))
@klass.jit_attr
def keywords(self):
return tuple(chain.from_iterable(x.keywords for x in self.stack))
@klass.jit_attr
def accept_keywords(self):
return tuple(chain.from_iterable(x.accept_keywords for x in self.stack))
def _incremental_masks(self, stack_override=None):
if stack_override is None:
stack_override = self.stack
return tuple(node.masks for node in stack_override if any(node.masks))
def _incremental_unmasks(self, stack_override=None):
if stack_override is None:
stack_override = self.stack
return tuple(node.unmasks for node in stack_override if any(node.unmasks))
@klass.jit_attr
def bashrcs(self):
return tuple(x.bashrc for x in self.stack if x.bashrc is not None)
@klass.jit_attr
def pkg_bashrcs(self):
return tuple(chain.from_iterable(x.pkg_bashrc for x in self.stack))
bashrc = klass.alias_attr("bashrcs")
path = klass.alias_attr("node.path")
@klass.jit_attr
def system(self):
return frozenset(self._collapse_generic("system", clear=True))
@klass.jit_attr
def profile_set(self):
return frozenset(self._collapse_generic("profile_set", clear=True))
class OnDiskProfile(ProfileStack):
pkgcore_config_type = ConfigHint(
types={"basepath": "str", "profile": "str"},
required=("basepath", "profile"),
typename="profile",
)
def __init__(self, basepath, profile, load_profile_base=True):
super().__init__(pjoin(basepath, profile))
self.basepath = basepath
self.load_profile_base = load_profile_base
@staticmethod
def split_abspath(path):
path = abspath(path)
# filter's heavy, but it handles '/' while also
# suppressing the leading '/'
chunks = [x for x in path.split("/") if x]
try:
# poor mans rindex.
pbase = max(idx for idx, x in enumerate(chunks) if x == "profiles")
except ValueError:
# no base found.
return None
return pjoin("/", *chunks[: pbase + 1]), "/".join(chunks[pbase + 1 :])
@classmethod
def from_abspath(cls, path):
vals = cls.split_abspath(path)
if vals is not None:
vals = cls(load_profile_base=True, *vals)
return vals
@klass.jit_attr
def stack(self):
l = ProfileStack.stack.function(self)
if self.load_profile_base:
l = (EmptyRootNode._autodetect_and_create(self.basepath),) + l
return l
@klass.jit_attr
def _incremental_masks(self):
stack = self.stack
if self.load_profile_base:
stack = stack[1:]
return ProfileStack._incremental_masks(self, stack_override=stack)
@klass.jit_attr
def _incremental_unmasks(self):
stack = self.stack
if self.load_profile_base:
stack = stack[1:]
return ProfileStack._incremental_unmasks(self, stack_override=stack)
class UserProfileNode(ProfileNode):
parent_node_kls = ProfileNode
def __init__(self, path, parent_path):
self.override_path = pjoin(path, parent_path)
super().__init__(path, pms_strict=False)
@klass.jit_attr
def parents(self):
return (ProfileNode(self.override_path),)
@klass.jit_attr
def parent_paths(self):
return ((self.override_path, None, None),)
class UserProfile(OnDiskProfile):
pkgcore_config_type = ConfigHint(
types={"user_path": "str", "parent_path": "str", "parent_profile": "str"},
required=("user_path", "parent_path", "parent_profile"),
typename="profile",
)
def __init__(self, user_path, parent_path, parent_profile, load_profile_base=True):
super().__init__(parent_path, parent_profile, load_profile_base)
self.node = UserProfileNode(user_path, pjoin(parent_path, parent_profile))
|
243436f618ce9bf1093ba078049a83e093ff7f37
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/security/azure-mgmt-security/azure/mgmt/security/models.py
|
47d82a3301e26bc86f008b6e6eca87aedb86651c
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
models.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .v2015_06_01_preview.models import *
from .v2017_08_01.models import *
from .v2017_08_01_preview.models import *
from .v2019_01_01.models import *
from .v2019_01_01_preview.models import *
from .v2019_08_01.models import *
from .v2020_01_01.models import *
from .v2020_01_01_preview.models import *
from .v2021_01_15_preview.models import *
from .v2021_05_01_preview.models import *
from .v2021_06_01.models import *
from .v2021_07_01_preview.models import *
from .v2021_10_01_preview.models import *
from .v2022_01_01.models import *
from .v2022_01_01_preview.models import *
from .v2022_05_01.models import *
from .v2022_07_01_preview.models import *
from .v2022_11_20_preview.models import *
from .v2023_01_01.models import *
|
75f826b56a25bf4f12a2ecca206cbda227e0a149
|
49b25a6c420048e630884a3446eb0fe2eb7a0cfc
|
/tests/commands/pcustom.py
|
1d435c8dfa0826aba6b175da57cee8e848b8357e
|
[
"MIT",
"Python-2.0"
] |
permissive
|
hugsy/gef
|
dc7a90e33bd2bcc2fada6a81dc32534e1b07461b
|
6a6e2a05ca8e08ac6845dce655a432fc4e029486
|
refs/heads/main
| 2023-09-02T08:18:02.038475
| 2023-08-26T17:00:13
| 2023-08-26T17:00:13
| 32,955,140
| 6,152
| 920
|
MIT
| 2023-09-12T14:02:46
| 2015-03-26T22:25:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,626
|
py
|
pcustom.py
|
"""
pcustom command test module
"""
import tempfile
import pathlib
from tests.utils import (
gdb_run_cmd,
gdb_run_silent_cmd,
is_64b,
_target,
GEF_DEFAULT_TEMPDIR,
GefUnitTestGeneric,
)
struct = b"""from ctypes import *
class foo_t(Structure):
_fields_ = [("a", c_int32),("b", c_int32),]
class goo_t(Structure):
_fields_ = [("a", c_int32), ("b", c_int32), ("c", POINTER(foo_t)), ("d", c_int32), ("e", c_int32),]
"""
class PcustomCommand(GefUnitTestGeneric):
"""`pcustom` command test module"""
def test_cmd_pcustom(self):
with tempfile.TemporaryDirectory(prefix=GEF_DEFAULT_TEMPDIR) as dd:
dirpath = pathlib.Path(dd).absolute()
with tempfile.NamedTemporaryFile(dir = dirpath, suffix=".py") as fd:
fd.write(struct)
fd.seek(0)
fd.flush()
res = gdb_run_cmd("gef config pcustom.struct_path",
before=[f"gef config pcustom.struct_path {dirpath}",])
self.assertNoException(res)
self.assertIn(f"pcustom.struct_path (str) = \"{dirpath}\"", res)
res = gdb_run_cmd("pcustom", before=[f"gef config pcustom.struct_path {dirpath}",])
self.assertNoException(res)
structline = [x for x in res.splitlines() if x.startswith(f" → {dirpath}") ][0]
self.assertIn("goo_t", structline)
self.assertIn("foo_t", structline)
# bad structure name with address
res = gdb_run_cmd("pcustom meh_t 0x1337100",
before=[f"gef config pcustom.struct_path {dirpath}",])
self.assertNoException(res)
self.assertIn("Session is not active", res)
def test_cmd_pcustom_show(self):
with tempfile.TemporaryDirectory(prefix=GEF_DEFAULT_TEMPDIR) as dd:
dirpath = pathlib.Path(dd).absolute()
with tempfile.NamedTemporaryFile(dir = dirpath, suffix=".py") as fd:
fd.write(struct)
fd.seek(0)
fd.flush()
# no address
res = gdb_run_cmd("pcustom foo_t",
before=[f"gef config pcustom.struct_path {dirpath}",])
self.assertNoException(res)
if is_64b():
self.assertIn("0000 a c_int /* size=0x4 */", res)
self.assertIn("0004 b c_int /* size=0x4 */", res)
else:
self.assertIn("0000 a c_long /* size=0x4 */", res)
self.assertIn("0004 b c_long /* size=0x4 */", res)
# with address
res = gdb_run_silent_cmd("pcustom goo_t 0x1337100", target=_target("pcustom"),
before=[f"gef config pcustom.struct_path {dirpath}",])
self.assertNoException(res)
if is_64b():
self.assertIn(f"""0x1337100+0x00 a : 3 (c_int)
0x1337100+0x04 b : 4 (c_int)
0x1337100+0x08 c : """, res)
self.assertIn(f""" 0x1337000+0x00 a : 1 (c_int)
0x1337000+0x04 b : 2 (c_int)
0x1337100+0x10 d : 12 (c_int)
0x1337100+0x14 e : 13 (c_int)""", res)
else:
self.assertIn(f"""0x1337100+0x00 a : 3 (c_long)
0x1337100+0x04 b : 4 (c_long)
0x1337100+0x08 c : """, res)
self.assertIn(f""" 0x1337000+0x00 a : 1 (c_long)
0x1337000+0x04 b : 2 (c_long)
0x1337100+0x0c d : 12 (c_long)
0x1337100+0x10 e : 13 (c_long)""", res)
# bad structure name
res = gdb_run_cmd("pcustom meh_t",
before=[f"gef config pcustom.struct_path {dirpath}",])
self.assertNoException(res)
self.assertIn("No structure named 'meh_t' found", res)
# bad structure name with address
res = gdb_run_silent_cmd("pcustom meh_t 0x1337100", target=_target("pcustom"),
before=[f"gef config pcustom.struct_path {dirpath}",])
self.assertNoException(res)
self.assertIn("No structure named 'meh_t' found", res)
|
01bd7e6c97d6e33a3bab0713f8cd6d6427bc80d7
|
94ea884a9f15f49d5f60713a2255be4f30a859cb
|
/setup.py
|
98524dcc3699b2c89c7e9a3c313999a9afd7e15f
|
[] |
no_license
|
coleifer/simpledb
|
f4780176827746c642993224deb768bd3c5ba4b2
|
65bb82531b2088a2ae87c33e3525f3f984e25c72
|
refs/heads/master
| 2022-12-22T19:39:29.536641
| 2022-12-14T02:42:14
| 2022-12-14T02:42:14
| 117,564,565
| 144
| 27
| null | 2018-02-08T14:06:27
| 2018-01-15T15:48:28
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
setup.py
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as fh:
readme = fh.read()
setup(
name='simpledb',
version=__import__('simpledb').__version__,
description='simpledb',
long_description=readme,
author='Charles Leifer',
author_email='coleifer@gmail.com',
url='http://github.com/coleifer/simpledb/',
packages=[],
py_modules=['simpledb'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
scripts=['simpledb.py'],
test_suite='tests')
|
1a1071532fdd266c041e23214ca277ba6db9d6c8
|
0588960cdda458d1f3d631921af96eb017591c59
|
/paperboy/resources/config.py
|
f71609666f0531ac8c06b96d69a4042a2ac3a5bc
|
[
"Apache-2.0"
] |
permissive
|
timkpaine/paperboy
|
c9383ca9d1582637847a211c37f72822b2ba423b
|
b27bfdbb4ed27dea597ff1d6346eb831542ae81f
|
refs/heads/main
| 2022-02-11T03:02:45.425983
| 2022-02-02T18:57:50
| 2022-02-02T18:57:50
| 152,772,708
| 245
| 29
|
Apache-2.0
| 2022-02-09T18:41:39
| 2018-10-12T15:37:49
|
Python
|
UTF-8
|
Python
| false
| false
| 853
|
py
|
config.py
|
import falcon
import json
from .base import BaseResource
class ConfigResource(BaseResource):
'''Falcon resource to get form entries'''
def __init__(self, *args, **kwargs):
super(ConfigResource, self).__init__(*args, **kwargs)
def on_get(self, req, resp):
'''Get configuration page to create a new notebook/job/report'''
resp.content_type = 'application/json'
type = req.params.get('type', None)
if type is None:
resp.body = json.dumps(self.config.to_dict())
elif type == 'notebooks':
resp.body = json.dumps(self.db.notebooks.form())
elif type == 'jobs':
resp.body = json.dumps(self.db.jobs.form())
elif type == 'reports':
resp.body = json.dumps(self.db.reports.form())
else:
resp.status = falcon.HTTP_404
|
8b53fa21e0cd4c33363064f47bd3de6372060695
|
689475bb04dab2677d5ec30fbbbddc480876579f
|
/configs/kfiou/rotated_retinanet_hbb_kfiou_r50_fpn_1x_dota_le135.py
|
ee3d40620940cc67ab3ae13241e3c8bfbf725d0a
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmrotate
|
07e539936f59b0033d648dc805f0f7f6da8dc2eb
|
9ea1aeeef2da8b2cd5161b72f4e33e1e8293dcb2
|
refs/heads/main
| 2023-08-31T21:23:33.304594
| 2023-07-02T07:24:56
| 2023-07-02T07:24:56
| 460,236,391
| 1,473
| 449
|
Apache-2.0
| 2023-09-08T08:49:03
| 2022-02-17T01:19:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
rotated_retinanet_hbb_kfiou_r50_fpn_1x_dota_le135.py
|
_base_ = [
'../rotated_retinanet/rotated_retinanet_obb_r50_fpn_1x_dota_le135.py'
]
angle_version = 'le135'
model = dict(
bbox_head=dict(
_delete_=True,
type='KFIoURRetinaHead',
num_classes=15,
in_channels=256,
stacked_convs=4,
feat_channels=256,
assign_by_circumhbbox=angle_version,
anchor_generator=dict(
type='RotatedAnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[1.0, 0.5, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHAOBBoxCoder',
angle_range=angle_version,
norm_factor=1,
edge_swap=False,
proj_xy=True,
target_means=(.0, .0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='KFLoss', loss_weight=5.0)))
|
033ea91666992ed07b2661e22ccfe869b400d65b
|
a0736beb3269a71b2f5b13cafe8fb5e7f6f540f4
|
/src/lib/concurrent.py
|
44c5f37b17385cfca87609ef1e55288c101a67ca
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
GoogleCloudPlatform/covid-19-open-data
|
d9e467fadbfc71686e30e28f3ce1d438e8fd92ba
|
1123ce02a0b4059d6acd4c4446f3f9b8335018f1
|
refs/heads/main
| 2023-08-02T23:57:12.785363
| 2022-10-23T22:26:29
| 2022-10-23T22:26:29
| 282,079,576
| 489
| 146
|
Apache-2.0
| 2022-09-05T12:00:37
| 2020-07-23T23:43:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,368
|
py
|
concurrent.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent.futures import ProcessPoolExecutor as Pool
from concurrent.futures import ThreadPoolExecutor as ThreadPool
from functools import partial
from multiprocessing import cpu_count, get_context
from typing import Any, Callable, Dict, Iterable, Type, Union
from pandas import DataFrame, Series
from .io import pbar
def _get_pool(pool_type: Type, max_workers: int) -> Pool:
if pool_type == ThreadPool:
pool = ThreadPool(max_workers)
setattr(pool, "imap", pool.map)
return pool
elif pool_type == Pool:
pool = Pool(max_workers, mp_context=get_context("spawn"))
setattr(pool, "imap", pool.map)
return pool
else:
raise TypeError(f"Unknown pool type: {pool_type}")
def _parallel_map(
pool_type: Type, map_func: Callable, map_iter: Iterable[Any], **tqdm_kwargs
) -> Iterable[Any]:
chunk_size = tqdm_kwargs.pop("chunk_size", 1)
max_workers = tqdm_kwargs.pop("max_workers", min(32, cpu_count() + 4))
total = tqdm_kwargs.pop("total", len(map_iter) if hasattr(map_iter, "__len__") else None)
progress_bar = pbar(total=total, **tqdm_kwargs)
with _get_pool(pool_type, max_workers) as pool:
for result in pool.imap(map_func, map_iter, chunksize=chunk_size):
progress_bar.update(1)
yield result
progress_bar.close()
def process_map(map_func: Callable, map_iter: Iterable[Any], **tqdm_kwargs):
return _parallel_map(Pool, map_func, map_iter, **tqdm_kwargs)
def thread_map(map_func: Callable, map_iter: Iterable[Any], **tqdm_kwargs):
return _parallel_map(ThreadPool, map_func, map_iter, **tqdm_kwargs)
def parallel_apply(
data: Union[DataFrame, Series], map_func: Callable, index: bool = False, **tqdm_kwargs
) -> Iterable[Any]:
if isinstance(data, Series):
map_iter = data.items() if index else data.values
elif isinstance(data, DataFrame):
map_iter = data.iterrows() if index else data.values
else:
raise TypeError(f"Expected Series or DataFrame, found {type(data)}")
return thread_map(map_func, map_iter, total=len(data), **tqdm_kwargs)
def _parallel_column_func(
data: DataFrame, map_func_dict: Dict[str, Callable], column: str
) -> Iterable[Any]:
return data[column].apply(map_func_dict[column])
def parallel_column_process(
data: DataFrame, map_func_dict: Dict[str, Callable], **tqdm_kwargs
) -> DataFrame:
map_iter = list(map_func_dict.keys())
map_func = partial(_parallel_column_func, data, map_func_dict)
map_opts = {"total": len(map_iter), "disable": True, **tqdm_kwargs}
data_out = DataFrame(index=data.index, columns=map_iter)
for name, values in zip(map_iter, thread_map(map_func, map_iter, **map_opts)):
data_out[name] = values
return data_out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.