id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
1,000 | get converter | import numpy as np
from qutip import convert_unit, clebsch, n_thermal
import qutip.utilities as utils
from functools import partial
import pytest
@pytest.mark.parametrize(['w', 'w_th', 'expected'], [
pytest.param(np.log(2), 1, 1, id='log(2)'),
pytest.param(np.log(2)*5, 5, 1, id='5*log(2)'),
pytest.param(0, 1, 0, id="0_energy"),
pytest.param(1, -1, 0, id="neg_temp"),
pytest.param(np.array([np.log(2), np.log(3), np.log(4)]), 1,
np.array([1, 1/2, 1/3]), id="array"),
])
def test_n_thermal(w, w_th, expected):
np.testing.assert_allclose(n_thermal(w, w_th), expected)
def METHOD_NAME(orig, target):
"""get funtion 'convert_{}_to_{}' when available for coverage """
try:
func = getattr(utils, f'convert_{orig}_to_{target}')
except AttributeError:
func = partial(convert_unit, orig=orig, to=target)
return func
@pytest.mark.parametrize('orig', ["J", "eV", "meV", "GHz", "mK"])
@pytest.mark.parametrize('target', ["J", "eV", "meV", "GHz", "mK"])
def test_unit_conversions(orig, target):
T = np.random.rand() * 100.0
T_converted = convert_unit(T, orig=orig, to=target)
T_back = convert_unit(T_converted, orig=target, to=orig)
assert T == pytest.approx(T_back)
T_converted = METHOD_NAME(orig=orig, target=target)(T)
T_back = METHOD_NAME(orig=target, target=orig)(T_converted)
assert T == pytest.approx(T_back)
@pytest.mark.parametrize('orig', ["J", "eV", "meV", "GHz", "mK"])
@pytest.mark.parametrize('middle', ["J", "eV", "meV", "GHz", "mK"])
@pytest.mark.parametrize('target', ["J", "eV", "meV", "GHz", "mK"])
def test_unit_conversions_loop(orig, middle, target):
T = np.random.rand() * 100.0
T_middle = convert_unit(T, orig=orig, to=middle)
T_converted = convert_unit(T_middle, orig=middle, to=target)
T_back = convert_unit(T_converted, orig=target, to=orig)
assert T == pytest.approx(T_back)
def test_unit_conversions_bad_unit():
with pytest.raises(TypeError):
convert_unit(10, orig="bad", to="J")
with pytest.raises(TypeError):
convert_unit(10, orig="J", to="bad")
@pytest.mark.parametrize('j1', [0.5, 1.0, 1.5, 2.0, 5, 7.5, 10, 12.5])
@pytest.mark.parametrize('j2', [0.5, 1.0, 1.5, 2.0, 5, 7.5, 10, 12.5])
def test_unit_clebsch_delta_j(j1, j2):
"""sum_m1 sum_m2 C(j1,j2,j3,m1,m2,m3) * C(j1,j2,j3',m1,m2,m3') =
delta j3,j3' delta m3,m3'"""
for _ in range(10):
j3 = np.random.choice(np.arange(abs(j1-j2), j1+j2+1))
j3p = np.random.choice(np.arange(abs(j1-j2), j1+j2+1))
m3 = np.random.choice(np.arange(-j3, j3+1))
m3p = np.random.choice(np.arange(-j3p, j3p+1))
sum_match = 0
sum_differ = 0
for m1 in np.arange(-j1, j1+1):
for m2 in np.arange(-j2, j2+1):
c1 = clebsch(j1, j2, j3, m1, m2, m3)
c2 = clebsch(j1, j2, j3p, m1, m2, m3p)
sum_match += c1**2
sum_differ += c1*c2
assert sum_match == pytest.approx(1)
assert sum_differ == pytest.approx(int(j3 == j3p and m3 == m3p))
@pytest.mark.parametrize('j1', [0.5, 1.0, 1.5, 2.0, 5, 7.5, 10, 12.5])
@pytest.mark.parametrize('j2', [0.5, 1.0, 1.5, 2.0, 5, 7.5, 10, 12.5])
def test_unit_clebsch_delta_m(j1, j2):
"""sum_j3 sum_m3 C(j1,j2,j3,m1,m2,m3)*C(j1,j2,j3,m1',m2',m3) =
delta m1,m1' delta m2,m2'"""
for _ in range(10):
m1 = np.random.choice(np.arange(-j1, j1+1))
m1p = np.random.choice(np.arange(-j1, j1+1))
m2 = np.random.choice(np.arange(-j2, j2+1))
m2p = np.random.choice(np.arange(-j2, j2+1))
sum_match = 0
sum_differ = 0
for j3 in np.arange(abs(j1-j2), j1+j2+1):
for m3 in np.arange(-j3, j3+1):
c1 = clebsch(j1, j2, j3, m1, m2, m3)
c2 = clebsch(j1, j2, j3, m1p, m2p, m3)
sum_match += c1**2
sum_differ += c1*c2
assert sum_match == pytest.approx(1)
assert sum_differ == pytest.approx(int(m1 == m1p and m2 == m2p))
def test_cpu_count(monkeypatch):
from qutip.settings import available_cpu_count
ncpus = available_cpu_count()
assert isinstance(ncpus, int)
assert ncpus >= 1
monkeypatch.setenv("QUTIP_NUM_PROCESSES", str(ncpus + 2))
new_ncpus = available_cpu_count()
assert new_ncpus == ncpus + 2
monkeypatch.setenv("QUTIP_NUM_PROCESSES", str(0))
new_ncpus = available_cpu_count()
assert new_ncpus >= 1 |
1,001 | kind | from .context import Context as Context
from .query import (
AND as AND, OR as OR, Query as Query, Node as Node,
ConjunctionNode as ConjunctionNode, DisjunctionNode as DisjunctionNode,
FilterNode as FilterNode)
from google.cloud import datastore
from redis import StrictRedis
from typing import (
Any, Callable, Dict, Generic, Iterable, Iterator, List, Literal, Optional,
Sequence, Type, TypeVar, Tuple, Union, overload)
TYPE_MODEL = TypeVar('TYPE_MODEL', bound='Model')
class Client:
def context(
self,
namespace: Optional[str],
global_cache: Optional[RedisCache]
) -> Context: ...
# Model Stubs
class Model(type):
key: Key = ...
_key: Key = ...
_values: Dict[str, Any] = ...
_properties: Dict[str, Any] = ...
def __init__(*args: Any, **kwds: Any) -> None: ...
def populate(self, **constructor_options: Any) -> None: ...
def to_dict(
self, exclude: Optional[List[str]] = None
) -> Dict[str, Any]: ...
@classmethod
def query(cls: Type[TYPE_MODEL], *args: Any, **kwds: Any) -> Query: ...
def put(self, **ctx_options: Any) -> None: ...
@classmethod
def get_by_id(
cls: Type[TYPE_MODEL], id: str, **ctx_options: Any
) -> TYPE_MODEL:...
def _pre_put_hook(self) -> None: ...
@classmethod
def _lookup_model(cls, METHOD_NAME: Optional[str]) -> TYPE_MODEL: ...
@classmethod
def _get_kind(cls) -> str: ...
def get_context(**kwds: Any) -> Context: ...
def get_multi(
keys: List[Key], **ctx_options: Any) -> List[Optional[TYPE_MODEL]]: ...
def put_multi(
entities: (List[TYPE_MODEL]), **ctx_options: Any) -> List[str]: ...
def delete_multi(keys: Sequence[Key], **ctx_options: Any) -> List[None]: ...
# Property Stubs
class Property(object):
_name: str
_repeated: bool
def __init__(
self, name: Optional[str] = ..., indexed: Optional[bool] = ...,
repeated: Optional[bool] = ..., required: Optional[bool] = ...,
default: Optional[Any] = ...,
choices: Union[List[Any], Tuple[Any, ...], None] = ...,
validator: Optional[Callable[..., Any]] = ...,
verbose_name: Optional[str] = ...
) -> None: ...
def __eq__(self, value: Any) -> bool: ...
def __ne__(self, value: Any) -> bool: ...
def __lt__(self, value: Any) -> bool: ...
def __le__(self, value: Any) -> bool: ...
def __gt__(self, value: Any) -> bool: ...
def __ge__(self, value: Any) -> bool: ...
IN: Any = ...
def __neg__(self) -> Any: ...
def __pos__(self) -> Any: ...
def __get__(self, entity: Any, unused_cls: Optional[Any] = ...) -> Any: ...
def __set__(self, entity: Any, value: Any) -> None: ...
def __delete__(self, entity: Any) -> None: ...
class BooleanProperty(Property): ...
class DateTimeProperty(Property):
def __init__(
self, name: Optional[str] = ..., auto_now: bool = ...,
auto_now_add: bool = ..., **kwds: Any
) -> None: ...
class DateProperty(DateTimeProperty): ...
class ComputedProperty(Property): ...
class IntegerProperty(Property): ...
class FloatProperty(Property): ...
class JsonProperty(Property):
def __init__(
self, name: Optional[str] = ..., compressed: bool = ...,
json_type: Optional[Any] = ..., **kwds: Any
) -> None: ...
class UserProperty(Property):
def __init__(
self, name: Optional[str] = ..., auto_current_user: bool = ...,
auto_current_user_add: bool = ..., **kwds: Any
) -> None: ...
class TextProperty(Property): ...
class StringProperty(TextProperty): ...
class Cursor:
def __init__(
self, urlsafe: Optional[str]
) -> None: ...
def urlsafe(self) -> bytes: ...
# Key Stubs
class Key:
def __new__(cls, *_args: Any, **kwargs: Any) -> Key: ...
def namespace(self) -> Optional[str]: ...
def app(self) -> Optional[str]: ...
def project(self) -> Optional[str]: ...
def id(self) -> str: ...
def flat(self) -> Optional[Iterable[Union[str, int]]]: ...
def METHOD_NAME(self) -> Optional[str]: ...
def get(self, **ctx_options: Any) -> Optional[Model]: ...
def delete(self, **ctx_options: Any) -> None: ...
@classmethod
def _from_ds_key(cls, ds_key: datastore.Key) -> Key: ...
class RedisCache:
def __init__(self, redis_instance: StrictRedis[str]): ...
# Transaction Options Stubs
class TransactionOptions(object):
NESTED = 1 # join=False
MANDATORY = 2 # join=True
ALLOWED = 3 # join=True
INDEPENDENT = 4 # join=False |
1,002 | get stream info | from logging import debug, info, warning, error, exception
import re
from datetime import datetime, timedelta
from .. import AbstractServiceHandler
from data.models import Episode, UnprocessedStream
class ServiceHandler(AbstractServiceHandler):
_show_url = "http://crunchyroll.com/{id}"
_show_re = re.compile("crunchyroll.com/([\w-]+)", re.I)
_episode_rss = "http://crunchyroll.com/{id}.rss"
_backup_rss = "http://crunchyroll.com/rss/anime"
_season_url = "http://crunchyroll.com/lineup"
def __init__(self):
super().__init__("crunchyroll", "Crunchyroll", False)
# Episode finding
def get_all_episodes(self, stream, **kwargs):
info("Getting live episodes for Crunchyroll/{}".format(stream.show_key))
episode_datas = self._get_feed_episodes(stream.show_key, **kwargs)
# Check data validity and digest
episodes = []
for episode_data in episode_datas:
if _is_valid_episode(episode_data, stream.show_key):
try:
episodes.append(_digest_episode(episode_data))
except:
exception("Problem digesting episode for Crunchyroll/{}".format(stream.show_key))
if len(episode_datas) > 0:
debug(" {} episodes found, {} valid".format(len(episode_datas), len(episodes)))
else:
debug(" No episodes found")
return episodes
def _get_feed_episodes(self, show_key, **kwargs):
"""
Always returns a list.
"""
info("Getting episodes for Crunchyroll/{}".format(show_key))
url = self._get_feed_url(show_key)
# Send request
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get latest show for Crunchyroll/{}".format(show_key))
return list()
# Parse RSS feed
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
return response.get("entries", list())
@classmethod
def _get_feed_url(cls, show_key):
# Sometimes shows don't have an RSS feed
# Use the backup global feed when it doesn't
if show_key is not None:
return cls._episode_rss.format(id=show_key)
else:
debug(" Using backup feed")
return cls._backup_rss
# Remote info getting
_title_fix = re.compile("(.*) Episodes", re.I)
_title_fix_fr = re.compile("(.*) Épisodes", re.I)
def get_stream_info(self, stream, **kwargs):
info("Getting stream info for Crunchyroll/{}".format(stream.show_key))
url = self._get_feed_url(stream.show_key)
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get feed")
return None
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
stream.name = response.feed.title
match = self._title_fix.match(stream.name)
if match:
stream.name = match.group(1)
match = self._title_fix_fr.match(stream.name)
if match:
stream.name = match.group(1)
return stream
def get_seasonal_streams(self, **kwargs):
debug("Getting season shows")
# Request page
response = self.request(self._season_url, html=True, **kwargs)
if response is None:
error("Failed to get seasonal streams page")
return list()
# Find sections (continuing simulcast, new simulcast, new catalog)
lists = response.find_all(class_="lineup-grid")
if len(lists) < 2:
error("Unsupported structure of lineup page")
return list()
elif len(lists) < 2 or len(lists) > 3:
warning("Unexpected number of lineup grids")
# Parse individual shows
# WARNING: Some may be dramas and there's nothing distinguishing them from anime
show_elements = lists[1].find_all(class_="element-lineup-anime")
raw_streams = list()
for show in show_elements:
title = show["title"]
if "to be announced" not in title.lower():
debug(" Show: {}".format(title))
url = show["href"]
debug(" URL: {}".format(url))
url_match = self._show_re.search(url)
if not url_match:
error("Failed to parse show URL: {}".format(url))
continue
key = url_match.group(1)
debug(" Key: {}".format(key))
remote_offset, display_offset = self.METHOD_NAME(key)
raw_stream = UnprocessedStream(self.key, key, None, title, remote_offset, display_offset)
raw_streams.append(raw_stream)
return raw_streams
def METHOD_NAME(self, show_key):
#TODO: load show page and figure out offsets based on contents
return 0, 0
# Local info formatting
def get_stream_link(self, stream):
# Just going to assume it's the correct service
return self._show_url.format(id=stream.show_key)
def extract_show_key(self, url):
match = self._show_re.search(url)
if match:
if match.group(1) != 'series':
return match.group(1)
return None
# Episode feeds
def _verify_feed(feed):
debug("Verifying feed")
if feed.bozo:
debug(" Feed was malformed")
return False
if "crunchyroll" not in feed.namespaces or feed.namespaces["crunchyroll"] != "http://www.crunchyroll.com/rss":
debug(" Crunchyroll namespace not found or invalid")
return False
if feed.feed.language != "en-us":
debug(" Language not en-us")
return False
debug(" Feed verified")
return True
def _is_valid_episode(feed_episode, show_id):
# We don't want non-episodes (PVs, VA interviews, etc.)
if feed_episode.get("crunchyroll_isclip", False) or not hasattr(feed_episode, "crunchyroll_episodenumber"):
debug("Is PV, ignoring")
return False
# Don't check really old episodes
episode_date = datetime(*feed_episode.published_parsed[:6])
date_diff = datetime.utcnow() - episode_date
if date_diff >= timedelta(days=2):
debug(" Episode too old")
return False
return True
_episode_name_correct = re.compile("Episode \d+ - (.*)")
_episode_count_fix = re.compile("([0-9]+)[abc]?", re.I)
def _digest_episode(feed_episode):
debug("Digesting episode")
# Get data
num_match = _episode_count_fix.match(feed_episode.crunchyroll_episodenumber)
if num_match:
num = int(num_match.group(1))
else:
warning("Unknown episode number format \"{}\"".format(feed_episode.crunchyroll_episodenumber))
num = 0
debug(" num={}".format(num))
name = feed_episode.title
match = _episode_name_correct.match(name)
if match:
debug(" Corrected title from \"{}\"".format(name))
name = match.group(1)
debug(" name={}".format(name))
link = feed_episode.link
debug(" link={}".format(link))
date = feed_episode.published_parsed
debug(" date={}".format(date))
return Episode(num, name, link, date)
_slug_regex = re.compile("crunchyroll.com/([a-z0-9-]+)/", re.I)
def _get_slug(episode_link):
match = _slug_regex.search(episode_link)
if match:
return match.group(1)
return None
# Season page |
1,003 | test cite class | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
from ..core import *
# skip function tests utilizing ads.ExportQuery for now as it is unstable
# get file path of a static data file for testing
# def data_path(filename):
# data_dir = os.path.join(os.path.dirname(__file__), 'data')
# return os.path.join(data_dir, filename)
# @pytest.mark.remote_data
# def test_text():
# reset()
# track()
# neatm = NEATM()
# assert (['sbpy.thermal.NEATM:', 'method:', 'Harris', '1998',
# 'Icarus', 'Vol', '131', '2', '291'] ==
# to_text().replace(',', '').split())
# reset()
# stop()
# time.sleep(1)
# @pytest.mark.remote_data
# def test_bibtex():
# reset()
# track()
# neatm = NEATM()
# with open(data_path('neatm.bib')) as bib_file:
# assert to_bibtex().strip() == bib_file.read().strip()
# reset()
# stop()
# time.sleep(1)
# @pytest.mark.remote_data
# def test_aastex():
# reset()
# track()
# register('faketask', {'fakesubtask': '2018ApJS..238...22H'})
# with open(data_path('hora.aas')) as aas_file:
# assert to_aastex().strip() == aas_file.read().strip()
# reset()
# stop()
# time.sleep(1)
# @pytest.mark.remote_data
# def test_icarus():
# reset()
# track()
# register('faketask', {'fakesubtask': '1996DPS....28.2504G'})
# with open(data_path('giorgini.icar')) as icar_file:
# assert to_icarus().strip() == icar_file.read().strip()
# reset()
# stop()
# print(to_text().split())
def test_register_single():
reset()
with Tracking():
register('test1', {'track_this': 'bibcode1'})
assert (set(['sbpy:', 'software:', '2019JOSS....4.1426M',
'test1:', 'track_this:', 'bibcode1'])
== set(show().split()))
stop()
reset()
def test_register_list():
reset()
with Tracking():
register('test1', {'track_this': ['bibcode1', 'bibcode2']})
assert (set(['sbpy:', 'software:', '2019JOSS....4.1426M',
'test1:', 'track_this:', 'bibcode1', 'bibcode2'])
== set(show().split()))
stop()
reset()
def test_register_double():
reset()
with Tracking():
register('test1', {'track_this': ['bibcode1', 'bibcode2']})
register('test1', {'track_this': ['bibcode2']})
register('test1', {'track_this': ['bibcode3']})
assert show().count('bibcode2') == 1
stop()
reset()
def test_Tracking():
reset()
with Tracking():
assert status()
register('test1', {'track_this': 'bibcode1'})
register('test1', {'track_this': 'bibcode2'})
register('test1', {'track_this_too': 'bibcode'})
register('test2', {'track_this': 'bibcode'})
register('test3', {'track_this': 'bibcode',
'and_track_that': 'bibcode'})
assert not status()
register('test', {'do not track this': 'bibcode'})
assert set(['sbpy:', 'software:', '2019JOSS....4.1426M',
'test1:', 'track_this:', 'bibcode1', 'bibcode2',
'track_this_too:', 'bibcode', 'test2:', 'track_this:',
'bibcode', 'test3:', 'track_this:', 'bibcode',
'and_track_that:', 'bibcode']) == set(show().split())
# different builds will have different orders for bibcode 1 and 2, to
# avoid the build failing because of this we use sets
stop()
reset()
def test_Tracking_issue_64():
from sbpy.activity import photo_lengthscale
reset()
with Tracking():
gamma_H2O = photo_lengthscale('H2O')
gamma_OH = photo_lengthscale('OH')
words = show().split()
assert 'OH' in words
assert 'H2O' in words
stop()
reset()
def test_Tracking_reporter(capsys):
reset()
with Tracking(reporter=show):
register('test1', {'track_this': 'bibcode1'})
captured = capsys.readouterr()
assert (set(['sbpy:', 'software:', '2019JOSS....4.1426M',
'test1:', 'track_this:', 'bibcode1'])
== set(captured.out.split()))
stop()
reset()
def test_cite_function():
@cite({'method': '1687pnpm.book.....N'})
def force(mass, acceleration):
return mass * acceleration
reset()
track()
force(1, 2)
assert (set(
['sbpy:', 'software:', '2019JOSS....4.1426M',
'sbpy.bib.tests.test_bib.test_cite_function.<locals>.force:',
'method:', '1687pnpm.book.....N'])
== set(show().split()))
stop()
reset()
def test_cite_function_twice():
@cite({'method': '1687pnpm.book.....N'})
@cite({'interpretation': 'philosophical reference'})
def force(mass, acceleration):
return mass * acceleration
reset()
track()
force(1, 2)
assert (set(
['sbpy:', 'software:', '2019JOSS....4.1426M',
'sbpy.bib.tests.test_bib.test_cite_function_twice.<locals>.force:',
'method:', '1687pnpm.book.....N', 'interpretation:',
'philosophical', 'reference'])
== set(show().split()))
stop()
reset()
def test_cite_class_method():
reset()
class Physics:
@staticmethod
@cite({'method': '1687pnpm.book.....N'})
def force(mass, acceleration):
return mass * acceleration
with Tracking():
p = Physics()
p.force(1, 2)
assert (set([
'sbpy:', 'software:', '2019JOSS....4.1426M',
'sbpy.bib.tests.test_bib.test_cite_class_method'
'.<locals>.Physics.force:',
'method:', '1687pnpm.book.....N'])
== set(show().split()))
reset()
def METHOD_NAME():
reset()
@cite({'method': '1687pnpm.book.....N'})
class Force:
def __call__(self, mass, acceleration):
return mass * acceleration
with Tracking():
f = Force()
f(1, 2)
assert (set([
'sbpy:', 'software:', '2019JOSS....4.1426M',
'sbpy.bib.tests.test_bib.test_cite_class'
'.<locals>.Force:', 'method:', '1687pnpm.book.....N'])
== set(show().split()))
reset()
def test_filter():
reset()
with Tracking():
register('test1', {'track_this': 'bibcode1'})
register('test1', {'software': 'bibcode2'})
register('test1', {'track_this_too': 'bibcode'})
register('test2', {'software': 'bibcode'})
register('test3', {'track_this': 'bibcode',
'software': 'bibcode'})
assert set(['sbpy:', 'software:', '2019JOSS....4.1426M',
'test1:', 'software:', 'bibcode2',
'test2:', 'software:', 'bibcode',
'test3:', 'software:',
'bibcode']) == set(show(filter='software').split())
# different builds will have different orders for bibcode 1 and 2, to
# avoid the build failing because of this we use sets
stop()
reset() |
1,004 | installed solvers | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import scipy # For version checks
import cvxpy.settings as s
from cvxpy.reductions.solvers.conic_solvers.cbc_conif import CBC as CBC_con
from cvxpy.reductions.solvers.conic_solvers.clarabel_conif import CLARABEL as CLARABEL_con
from cvxpy.reductions.solvers.conic_solvers.copt_conif import COPT as COPT_con
from cvxpy.reductions.solvers.conic_solvers.cplex_conif import CPLEX as CPLEX_con
from cvxpy.reductions.solvers.conic_solvers.cvxopt_conif import CVXOPT as CVXOPT_con
# Conic interfaces
from cvxpy.reductions.solvers.conic_solvers.diffcp_conif import DIFFCP as DIFFCP_con
from cvxpy.reductions.solvers.conic_solvers.ecos_bb_conif import ECOS_BB as ECOS_BB_con
from cvxpy.reductions.solvers.conic_solvers.ecos_conif import ECOS as ECOS_con
from cvxpy.reductions.solvers.conic_solvers.glop_conif import GLOP as GLOP_con
from cvxpy.reductions.solvers.conic_solvers.glpk_conif import GLPK as GLPK_con
from cvxpy.reductions.solvers.conic_solvers.glpk_mi_conif import GLPK_MI as GLPK_MI_con
from cvxpy.reductions.solvers.conic_solvers.gurobi_conif import GUROBI as GUROBI_con
from cvxpy.reductions.solvers.conic_solvers.mosek_conif import MOSEK as MOSEK_con
from cvxpy.reductions.solvers.conic_solvers.nag_conif import NAG as NAG_con
from cvxpy.reductions.solvers.conic_solvers.pdlp_conif import PDLP as PDLP_con
from cvxpy.reductions.solvers.conic_solvers.scip_conif import SCIP as SCIP_con
from cvxpy.reductions.solvers.conic_solvers.scipy_conif import SCIPY as SCIPY_con
from cvxpy.reductions.solvers.conic_solvers.scs_conif import SCS as SCS_con
from cvxpy.reductions.solvers.conic_solvers.sdpa_conif import SDPA as SDPA_con
from cvxpy.reductions.solvers.conic_solvers.xpress_conif import XPRESS as XPRESS_con
from cvxpy.reductions.solvers.qp_solvers.copt_qpif import COPT as COPT_qp
from cvxpy.reductions.solvers.qp_solvers.cplex_qpif import CPLEX as CPLEX_qp
from cvxpy.reductions.solvers.qp_solvers.gurobi_qpif import GUROBI as GUROBI_qp
# QP interfaces
from cvxpy.reductions.solvers.qp_solvers.osqp_qpif import OSQP as OSQP_qp
from cvxpy.reductions.solvers.qp_solvers.piqp_qpif import PIQP as PIQP_qp
from cvxpy.reductions.solvers.qp_solvers.proxqp_qpif import PROXQP as PROXQP_qp
from cvxpy.reductions.solvers.qp_solvers.xpress_qpif import XPRESS as XPRESS_qp
from cvxpy.utilities.versioning import Version
solver_conic_intf = [DIFFCP_con(), ECOS_con(),
CVXOPT_con(), GLPK_con(), COPT_con(),
GLPK_MI_con(), CBC_con(), CLARABEL_con(), SCS_con(), SDPA_con(),
GUROBI_con(), MOSEK_con(), CPLEX_con(), NAG_con(), XPRESS_con(),
SCIP_con(), SCIPY_con(), GLOP_con(), PDLP_con(),
ECOS_BB_con()]
solver_qp_intf = [OSQP_qp(),
GUROBI_qp(),
CPLEX_qp(),
XPRESS_qp(),
COPT_qp(),
PIQP_qp(),
PROXQP_qp()
]
SOLVER_MAP_CONIC = {solver.name(): solver for solver in solver_conic_intf}
SOLVER_MAP_QP = {solver.name(): solver for solver in solver_qp_intf}
# CONIC_SOLVERS and QP_SOLVERS are sorted in order of decreasing solver
# preference. QP_SOLVERS are those for which we have written interfaces
# and are supported by QpSolver.
CONIC_SOLVERS = [s.MOSEK, s.ECOS, s.CLARABEL, s.SCS, s.SDPA,
s.CPLEX, s.GUROBI, s.COPT, s.GLPK, s.NAG,
s.GLPK_MI, s.CBC, s.CVXOPT, s.XPRESS, s.DIFFCP,
s.SCIP, s.SCIPY, s.GLOP, s.PDLP, s.ECOS_BB]
QP_SOLVERS = [s.OSQP,
s.GUROBI,
s.CPLEX,
s.XPRESS,
s.COPT,
s.PIQP,
s.PROXQP]
MI_SOLVERS = [s.GLPK_MI, s.MOSEK, s.GUROBI, s.CPLEX,
s.XPRESS, s.CBC, s.SCIP, s.COPT, s.ECOS_BB]
MI_SOCP_SOLVERS = [s.MOSEK, s.GUROBI, s.CPLEX, s.XPRESS,
s.SCIP, s.ECOS_BB]
# Acknowledge MI solver support for SciPy >= 1.9.
if not (Version(scipy.__version__) < Version('1.9.0')):
MI_SOLVERS.append(s.SCIPY)
def METHOD_NAME():
"""List the installed solvers.
"""
installed = []
# Check conic solvers
for name, solver in SOLVER_MAP_CONIC.items():
if solver.is_installed():
installed.append(name)
# Check QP solvers
for name, solver in SOLVER_MAP_QP.items():
if solver.is_installed():
installed.append(name)
# Remove duplicate names (for solvers that handle both conic and QP)
return np.unique(installed).tolist()
INSTALLED_SOLVERS = METHOD_NAME()
INSTALLED_CONIC_SOLVERS = [
slv for slv in INSTALLED_SOLVERS if slv in CONIC_SOLVERS]
INSTALLED_MI_SOLVERS = [
slv for slv in INSTALLED_SOLVERS if slv in MI_SOLVERS] |
1,005 | build tf graph | # Copyright 2023 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for melspec_input TF/tflite input feature library."""
import tempfile
from magenta.models.onsets_frames_transcription import melspec_input
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def _TmpFilePath(suffix):
"""Returns the path to a new temporary file."""
f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
return f.name
class MelspecInputTest(tf.test.TestCase):
# We make this a class method so we can keep it close to Verify...
def MakeTestWaveform(self): # self is unused.
"""Generate a 1 sec sweep waveform as a test input."""
sample_rate = 16000.0
duration = 0.975 # Exactly 96 frames at 25 ms / 10 ms.
start_freq = 400.0
end_freq = 1600.0
times = np.arange(0, duration, 1 / sample_rate)
# Exponential frequency sweep.
frequencies = start_freq * np.exp(
times / duration * np.log(end_freq / start_freq))
delta_phases = frequencies * 1 / sample_rate * 2 * np.pi
phases = np.cumsum(delta_phases)
# Raised cosine envelope.
envelope = 0.5 * (1.0 - np.cos(2 * np.pi * times / duration))
# Final test waveform.
return envelope * np.cos(phases)
# We make this a class method so it can use the TestCase assert methods.
def VerifyMelSpectrumPatch(self, features):
"""Perform tests on melspectrum as calculated for test waveform."""
expected_time_steps = 96
expected_mel_bands = 64
self.assertEqual((expected_time_steps, expected_mel_bands), features.shape)
# Expect peak magnitude to be somewhere near expected_time_steps/2 == 48
# (due to raised cosine envelope.) It isn't *exactly* in the middle because
# the interaction between the sweeping tone and the mel bands causes some
# ripple atop the columnwise max. The peok is actually at frame 43.
peak_frame = np.argmax(np.max(features, axis=1))
self.assertGreater(peak_frame, 40)
self.assertLess(peak_frame, 56)
# Expect peak frequencies to move up with sweep. These are "golden",
# but agree with predictions from instantaneous frequencies and mel scale.
self.assertEqual(np.argmax(features[20]), 11)
self.assertEqual(np.argmax(features[42]), 15)
self.assertEqual(np.argmax(features[64]), 20)
def METHOD_NAME(self, tflite_compatible=False):
"""Setup the TF graph using the single function under test."""
if tflite_compatible:
# tflite requires explicit input sizing.
input_length = len(self._test_waveform)
else:
input_length = None
with self._graph.as_default():
waveform_input = tf.placeholder(tf.float32, [input_length])
# This is the single function provided by the library.
features = melspec_input.build_mel_calculation_graph(
waveform_input, tflite_compatible=tflite_compatible)
self._input = waveform_input
self._output = features
def RunTfGraph(self):
"""Return output of running the current graph under TF."""
feature_output = self._session.run(
self._output, feed_dict={self._input: self._test_waveform})
return feature_output
def BuildAndRunTfGraph(self, tflite_compatible=False):
"""Build the graph then run it."""
self.METHOD_NAME(tflite_compatible)
return self.RunTfGraph()
def setUp(self):
super().setUp()
self._test_waveform = self.MakeTestWaveform()
# Initialize TensorFlow.
self._graph = tf.Graph()
self._session = tf.Session(graph=self._graph)
def testPlainTfFeatureCalculation(self):
"""Test simple TF feature calculation."""
feature_output = self.BuildAndRunTfGraph(tflite_compatible=False)
# Only one patch returned.
self.assertEqual(1, feature_output.shape[0])
self.VerifyMelSpectrumPatch(feature_output[0])
def testTfLiteGraphAgainstPlainTf(self):
"""Test the tflite graph running under plain TF."""
plain_tf_output = self.BuildAndRunTfGraph(tflite_compatible=False)
tflite_output = self.BuildAndRunTfGraph(tflite_compatible=True)
# Results don't match to 6 decimals, 1 is OK.
# TODO(fjord): Eventually switch to implementation that has fewer
# differences.
np.testing.assert_allclose(
tflite_output[0], plain_tf_output[0], rtol=.05, atol=.3)
def RunTfliteCompiler(self):
# Attempt to run the tflite-style conversion to the current graph.
converter = tf.lite.TFLiteConverter.from_session(self._session,
[self._input],
[self._output])
converter.inference_type = tf.lite.constants.FLOAT
tflite_model = converter.convert()
output_filename = _TmpFilePath(suffix='.tflite')
open(output_filename, 'wb').write(tflite_model)
return output_filename
def testTfLiteCompiles(self):
"""Check that we can compile the tflite graph (i.e., no invalid ops)."""
self.METHOD_NAME(tflite_compatible=True)
self.RunTfliteCompiler()
def testTfLiteCompilesWithDynamicShape(self):
self.METHOD_NAME(tflite_compatible=False)
self.RunTfliteCompiler()
def RunTfliteModel(self, tflite_model_path):
"""Load and run TFLite model under the interpreter."""
interpreter = tf.lite.Interpreter(tflite_model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]['index'],
np.array(self._test_waveform, dtype=np.float32))
interpreter.invoke()
output_details = interpreter.get_output_details()
output_data = interpreter.get_tensor(output_details[0]['index'])
return output_data
def testTfLiteGraphUnderTfLite(self):
"""Verify output of tflite interpreter matches plain TF output."""
self.METHOD_NAME(tflite_compatible=True)
tf_output = self.RunTfGraph()
# Graph is now built in the current session, ready for tflite conversion.
tflite_filename = self.RunTfliteCompiler()
# Run the tflite model with the tflite interpreter.
tflite_output = self.RunTfliteModel(tflite_filename)
# Be satisfied with 1 d.p. (i.e., 2 sf) agreement.
# At 2 d.p., we got 0.07% disagreement, probably just 1 value.)
np.testing.assert_array_almost_equal(
tflite_output[0], tf_output[0], decimal=1)
if __name__ == '__main__':
tf.test.main() |
1,006 | test search acquisition date | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from builtins import str
import pytest
from datetime import date
from datetime import datetime
from datetime import timedelta
from omero.testlib.cli import CLITest
from omero.cli import NonZeroReturnCode
from omero.model import DatasetI
from omero.plugins.search import SearchControl
from omero.rtypes import rstring
class TestSearch(CLITest):
def mkimage(self, with_acquisitionDate=False):
self._uuid = self.uuid().replace("-", "")
if with_acquisitionDate:
filename_date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
self._image = self.import_fake_file(
name=self._uuid, acquisitionDate=filename_date)[0]
else:
self._image = self.import_fake_file(name=self._uuid)[0]
self.root.sf.getUpdateService().indexObject(self._image)
def mkdataset(self):
self._uuid_ds = self.uuid().replace("-", "")
self._dataset = DatasetI()
self._dataset.name = rstring(self._uuid_ds)
update = self.client.sf.getUpdateService()
self._dataset = update.saveAndReturnObject(self._dataset)
self.root.sf.getUpdateService().indexObject(self._dataset)
def short(self):
return self._uuid[0:8]
def days_ago(self, ago=1):
t = date.today() - timedelta(ago)
t = t.strftime("%Y-%m-%d")
return t
def setup_method(self, method):
super(TestSearch, self).setup_method(method)
self.cli.register("search", SearchControl, "TEST")
self.args += ["search"]
self.setup_mock()
def go(self):
self.cli.invoke(self.args, strict=True)
return self.cli.get("search.results")
def assertSearch(self, args, success=True, name=None):
if name is None:
name = self._uuid
self.args.extend(list(args))
if success:
results = self.go()
assert 1 == len(results)
assert name in results[0].name.val
else:
with pytest.raises(NonZeroReturnCode):
results = self.go()
def test_search_basic(self):
self.mkimage()
self.assertSearch(("Image", self._uuid + "*"))
def test_search_wildcard(self):
self.mkimage()
short = self.short()
self.assertSearch(("Image", short + "*"))
def test_search_name_field(self):
self.mkimage()
short = self.short()
self.assertSearch(("Image", short + "*", "--field=name"))
def test_search_description_field(self):
self.mkimage()
short = self.short()
with pytest.raises(NonZeroReturnCode):
# Not set on description
self.assertSearch(("Image", short + "*",
"--field=description"))
def test_search_style(self, capsys):
self.mkimage()
short = self.short()
self.assertSearch(("Image", short + "*", "--style=plain"))
o, e = capsys.readouterr()
parts = o.split(",")
assert "ImageI" == parts[1]
assert ("%s" % self._image.id.val) == parts[2]
def test_search_ids_only(self, capsys):
self.mkimage()
short = self.short()
self.assertSearch(("Image", short + "*", "--ids-only"))
o, e = capsys.readouterr()
assert ("ImageI:%s" % self._image.id.val) in o
@pytest.mark.parametrize("data", (
(1, None, True, True),
(1, None, False, False),
))
def METHOD_NAME(self, data):
from_ago, to_ago, with_acquisitionDate, success = data
self.mkimage(with_acquisitionDate=with_acquisitionDate)
short = self.short()
args = ["Image", short + "*"]
if from_ago:
args += ["--from=%s" % self.days_ago(from_ago)]
if to_ago:
args += ["--to=%s" % self.days_ago(to_ago)]
args += ["--date-type=acquisitionDate"]
self.assertSearch(args, success=success)
@pytest.mark.parametrize("data", (
(1, None, None, True),
(1, None, "import", True),
(1, -1, None, True),
(None, 1, None, False),
(-1, None, None, False),
))
def test_search_other_dates(self, data):
from_ago, to_ago, date_type, success = data
self.mkimage()
short = self.short()
args = ["Image", short + "*"]
if from_ago:
args += ["--from=%s" % self.days_ago(from_ago)]
if to_ago:
args += ["--to=%s" % self.days_ago(to_ago)]
if date_type:
args += ["--date-type=%s" % date_type]
self.assertSearch(args, success=success)
def test_search_no_parse(self):
self.mkimage()
short = self.short()
args = ["Image", short + "*", "--no-parse"]
self.assertSearch(args)
def test_search_dataset_acquisition(self):
self.mkdataset()
txt = self._uuid_ds[0:8] + "*"
_from = "--from=%s" % self.days_ago(1)
_to = "--to=%s" % self.days_ago(-1)
args = ["Dataset", txt, _from, _to]
self.assertSearch(args, name=self._uuid_ds)
def test_search_index_by_user(self, capsys):
self.mkimage()
short = self.short()
self.args.extend(("Image", short + "*", "--index"))
self.cli.invoke(self.args, strict=False)
o, e = capsys.readouterr()
assert 'Only admin can index object' in str(e) |
1,007 | get mn connection count | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import DashTestFramework
from test_framework.util import *
'''
llmq-connections.py
Checks intra quorum connections
'''
class LLMQConnections(DashTestFramework):
def set_test_params(self):
self.set_dash_test_params(15, 14, fast_dip3_enforcement=True)
self.set_dash_llmq_test_params(5, 3)
def run_test(self):
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
q = self.mine_quorum(expected_connections=2)
self.log.info("checking for old intra quorum connections")
total_count = 0
for mn in self.get_quorum_masternodes(q):
count = self.METHOD_NAME(mn.node)
total_count += count
assert_greater_than_or_equal(count, 2)
assert(total_count < 40)
self.check_reconnects(2)
self.log.info("activating SPORK_21_QUORUM_ALL_CONNECTED")
self.nodes[0].spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
self.wait_for_sporks_same()
self.log.info("mining one block and waiting for all members to connect to each other")
self.nodes[0].generate(1)
for mn in self.get_quorum_masternodes(q):
self.wait_for_mnauth(mn.node, 4)
self.log.info("mine a new quorum and verify that all members connect to each other")
q = self.mine_quorum(expected_connections=4)
self.log.info("checking that all MNs got probed")
for mn in self.get_quorum_masternodes(q):
wait_until(lambda: self.get_mn_probe_count(mn.node, q, False) == 4)
self.log.info("checking that probes age")
self.bump_mocktime(60)
for mn in self.get_quorum_masternodes(q):
wait_until(lambda: self.get_mn_probe_count(mn.node, q, False) == 0)
self.log.info("mine a new quorum and re-check probes")
q = self.mine_quorum(expected_connections=4)
for mn in self.get_quorum_masternodes(q):
wait_until(lambda: self.get_mn_probe_count(mn.node, q, True) == 4)
self.check_reconnects(4)
def check_reconnects(self, expected_connection_count):
self.log.info("disable and re-enable networking on all masternodes")
for mn in self.mninfo:
mn.node.setnetworkactive(False)
for mn in self.mninfo:
wait_until(lambda: len(mn.node.getpeerinfo()) == 0)
for mn in self.mninfo:
mn.node.setnetworkactive(True)
self.bump_mocktime(60)
self.log.info("verify that all masternodes re-connected")
for q in self.nodes[0].quorum('list')['llmq_test']:
for mn in self.get_quorum_masternodes(q):
self.wait_for_mnauth(mn.node, expected_connection_count)
# Also re-connect non-masternode connections
for i in range(1, len(self.nodes)):
connect_nodes(self.nodes[i], 0)
self.nodes[i].ping()
# wait for ping/pong so that we can be sure that spork propagation works
time.sleep(1) # needed to make sure we don't check before the ping is actually sent (fPingQueued might be true but SendMessages still not called)
for i in range(1, len(self.nodes)):
wait_until(lambda: all('pingwait' not in peer for peer in self.nodes[i].getpeerinfo()))
def METHOD_NAME(self, node):
peers = node.getpeerinfo()
count = 0
for p in peers:
if 'verified_proregtx_hash' in p and p['verified_proregtx_hash'] != '':
count += 1
return count
def get_mn_probe_count(self, node, q, check_peers):
count = 0
mnList = node.protx('list', 'registered', 1)
peerList = node.getpeerinfo()
mnMap = {}
peerMap = {}
for mn in mnList:
mnMap[mn['proTxHash']] = mn
for p in peerList:
if 'verified_proregtx_hash' in p and p['verified_proregtx_hash'] != '':
peerMap[p['verified_proregtx_hash']] = p
for mn in self.get_quorum_masternodes(q):
pi = mnMap[mn.proTxHash]
if pi['metaInfo']['lastOutboundSuccessElapsed'] < 60:
count += 1
elif check_peers and mn.proTxHash in peerMap:
count += 1
return count
if __name__ == '__main__':
LLMQConnections().main() |
1,008 | convert val | from traitlets import Any, Bool, List, observe
from jdaviz.core.registries import tray_registry
from jdaviz.core.template_mixin import PluginTemplateMixin, DatasetSelectMixin
from jdaviz.core.user_api import PluginUserApi
from jdaviz.utils import PRIHDR_KEY, COMMENTCARD_KEY
__all__ = ['MetadataViewer']
@tray_registry('g-metadata-viewer', label="Metadata")
class MetadataViewer(PluginTemplateMixin, DatasetSelectMixin):
"""
See the :ref:`Metadata Viewer Plugin Documentation <imviz_metadata-viewer>` for more details.
Only the following attributes and methods are available through the
:ref:`public plugin API <plugin-apis>`:
* :meth:`~jdaviz.core.template_mixin.PluginTemplateMixin.show`
* :meth:`~jdaviz.core.template_mixin.PluginTemplateMixin.open_in_tray`
* ``dataset`` (:class:`~jdaviz.core.template_mixin.DatasetSelect`):
Dataset to expose the metadata.
* :attr:`show_primary`:
Whether to show MEF primary header metadata instead.
* :attr:`metadata`:
Read-only metadata. If the data is loaded from a multi-extension FITS file,
this can be the extension header or the primary header, depending on
``show_primary`` setting.
"""
template_file = __file__, "metadata_viewer.vue"
has_metadata = Bool(False).tag(sync=True)
has_primary = Bool(False).tag(sync=True)
show_primary = Bool(False).tag(sync=True)
has_comments = Bool(False).tag(sync=True)
metadata = List([]).tag(sync=True)
metadata_filter = Any().tag(sync=True) # string or None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# override the default filters on dataset entries to require metadata in entries
self.dataset.add_filter('not_from_plugin')
@property
def user_api(self):
return PluginUserApi(self, expose=('dataset', 'show_primary'), readonly=('metadata',))
def reset(self):
self.has_metadata = False
self.has_primary = False
self.show_primary = False
self.has_comments = False
self.metadata = []
@observe("dataset_selected")
def show_metadata(self, event):
data = self.dataset.selected_dc_item
if (data is None or not hasattr(data, 'meta') or not isinstance(data.meta, dict)
or len(data.meta) < 1):
self.reset()
return
if PRIHDR_KEY in data.meta:
self.has_primary = True
else:
self.has_primary = False
self.show_primary = False
self.find_public_metadata(data.meta, primary_only=self.show_primary)
@observe("show_primary")
def handle_show_primary(self, event):
if not self.show_primary:
self.show_metadata(event)
return
data = self.dataset.selected_dc_item
if (data is None or not hasattr(data, 'meta') or not isinstance(data.meta, dict)
or len(data.meta) < 1):
self.reset()
return
self.find_public_metadata(data.meta, primary_only=True)
def find_public_metadata(self, meta, primary_only=False):
if primary_only:
if PRIHDR_KEY in meta:
meta = meta[PRIHDR_KEY]
else:
self.reset()
return
d = flatten_nested_dict(meta)
# Some FITS keywords cause "# ipykernel cannot clean for JSON" messages.
# Also, we want to hide internal metadata that starts with underscore.
badkeys = ['COMMENT', 'HISTORY', ''] + [k for k in d if k.startswith('_')]
for badkey in badkeys:
if badkey in d:
del d[badkey]
if COMMENTCARD_KEY in meta:
has_comments = True
def get_comment(key):
if key in meta[COMMENTCARD_KEY]._header:
val = meta[COMMENTCARD_KEY][key]
else:
val = ''
return val
else:
has_comments = False
def get_comment(key):
return ''
# TODO: Option to not sort?
public_meta = sorted(zip(d.keys(), map(str, d.values()), map(get_comment, d.keys())))
if len(public_meta) > 0:
self.metadata = public_meta
self.has_metadata = True
self.has_comments = has_comments
else:
self.reset()
# TODO: If this generalized in stdatamodels in the future, replace with native function.
# See https://github.com/spacetelescope/stdatamodels/issues/131
# This code below is taken code from stdatamodels/model_base.py, and the method to_flat_dict()
def flatten_nested_dict(asdfnode, include_arrays=True):
"""
Returns a dictionary of all of the schema items as a flat dictionary.
Each dictionary key is a dot-separated name. For example, the
schema element ``meta.observation.date`` at the root node
will end up in the dictionary as::
{ "meta.observation.date": "2012-04-22T03:22:05.432" }
"""
import datetime
import numpy as np
from astropy.time import Time
def METHOD_NAME(val):
if isinstance(val, datetime.datetime): # pragma: no cover
return val.isoformat()
elif isinstance(val, Time): # pragma: no cover
return str(val)
return val
if include_arrays:
return dict((key, METHOD_NAME(val)) for (key, val) in _iteritems(asdfnode))
else: # pragma: no cover
return dict((key, METHOD_NAME(val)) for (key, val) in _iteritems(asdfnode)
if not isinstance(val, np.ndarray))
def _iteritems(asdfnode):
"""
Iterates over all of the schema items in a flat way.
Each element is a pair (`key`, `value`). Each `key` is a
dot-separated name. For example, the schema element
`meta.observation.date` will end up in the result as::
("meta.observation.date": "2012-04-22T03:22:05.432")
"""
def recurse(asdfnode, path=[]):
if isinstance(asdfnode, dict):
for key, val in asdfnode.items():
for x in recurse(val, path + [key]):
yield x
elif isinstance(asdfnode, (list, tuple)):
for i, val in enumerate(asdfnode):
for x in recurse(val, path + [i]):
yield x
else:
yield ('.'.join(str(x) for x in path), asdfnode)
for x in recurse(asdfnode):
yield x |
1,009 | test set dist type deb | import logging
from mock import (
patch, Mock, MagicMock
)
from pytest import (
raises, fixture
)
from kiwi.defaults import Defaults
from kiwi.solver.sat import Sat
from kiwi.exceptions import (
KiwiSatSolverPluginError,
KiwiSatSolverJobProblems,
KiwiSatSolverJobError
)
class TestSat:
@fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
@patch('importlib.import_module')
def setup(self, mock_import_module):
self.sat = Sat()
self.solver = MagicMock()
self.transaction = Mock()
self.transaction.newpackages = Mock(
return_value=[Mock()]
)
self.selection = Mock()
self.solver.transaction = Mock(
return_value=self.transaction
)
self.sat.pool.Solver = Mock(
return_value=self.solver
)
self.sat.pool.select = Mock(
return_value=self.selection
)
mock_import_module.assert_called_once_with('solv')
self.solv = mock_import_module.return_value
self.sat.pool.setarch.assert_called_once_with()
self.sat.pool.setarch.reset_mock()
@patch('importlib.import_module')
def setup_method(self, cls, mock_import_module):
self.setup()
@patch('importlib.import_module')
def test_setup_no_sat_plugin(self, mock_import_module):
mock_import_module.side_effect = Exception
with raises(KiwiSatSolverPluginError):
Sat()
def test_set_dist_type_raises(self):
Defaults.set_platform_name('x86_64')
self.sat.pool.setdisttype.return_value = -1
with raises(KiwiSatSolverPluginError):
self.sat.set_dist_type('deb')
def METHOD_NAME(self):
Defaults.set_platform_name('x86_64')
self.sat.pool.setdisttype.return_value = 0
self.sat.set_dist_type('deb')
self.sat.pool.setdisttype.assert_called_once_with(
self.solv.Pool.DISTTYPE_DEB
)
self.sat.pool.setarch.assert_called_once_with(
'amd64'
)
def test_add_repository(self):
solver_repository = Mock()
solver_repository.uri.uri = 'some-uri'
solvable = Mock()
solver_repository.create_repository_solvable.return_value = solvable
pool_repository = Mock()
self.sat.pool.add_repo.return_value = pool_repository
self.sat.add_repository(solver_repository)
solver_repository.create_repository_solvable.assert_called_once_with()
self.sat.pool.add_repo.assert_called_once_with('some-uri')
pool_repository.add_solv.assert_called_once_with(solvable)
self.sat.pool.addfileprovides.assert_called_once_with()
self.sat.pool.createwhatprovides.assert_called_once_with()
@patch.object(Sat, '_setup_jobs')
def test_solve_has_problems(self, mock_setup_jobs):
packages = ['vim']
problem = Mock()
problem.id = 42
info = Mock()
info.problemstr = Mock(
return_value='some-problem'
)
findproblemrule = Mock()
findproblemrule.info = Mock(
return_value=info
)
problem.findproblemrule.return_value = findproblemrule
option = Mock()
option.str = Mock(
return_value='some-option'
)
solution = Mock()
solution.id = 42
solution.elements = Mock(
return_value=[option]
)
problem.solutions.return_value = [solution]
self.solver.solve = Mock(
return_value=[problem]
)
with raises(KiwiSatSolverJobProblems):
self.sat.solve(packages)
def test_solve_package_not_found_and_skipped(self):
packages = ['vim']
self.solver.solve = Mock(
return_value=None
)
self.sat.solv.Selection.SELECTION_PROVIDES = 0
self.selection.flags = 0
self.selection.isempty = Mock(
return_value=True
)
with self._caplog.at_level(logging.INFO):
self.sat.solve(packages, skip_missing=True)
assert '--> Package vim not found: skipped' in self._caplog.text
def test_solve_package_not_found_raises(self):
packages = ['vim']
self.solver.solve = Mock(
return_value=None
)
self.selection.isempty = Mock(
return_value=True
)
with raises(KiwiSatSolverJobError):
self.sat.solve(packages)
def test_solve(self):
packages = ['vim']
self.solver.solve = Mock(
return_value=None
)
self.selection.isempty = Mock(
return_value=False
)
self.selection.jobs = Mock(
return_value=packages
)
self.sat.solve(packages)
self.solver.solve.assert_called_once_with(['vim'])
self.solver.transaction.assert_called_once_with()
def test_solve_with_capabilities(self):
packages = ['kernel-base']
self.solver.solve = Mock(
return_value=None
)
self.sat.solv.Selection.SELECTION_PROVIDES = 1
self.selection.flags = 1
self.selection.isempty = Mock(
return_value=False
)
self.selection.jobs = Mock(
return_value=packages
)
with self._caplog.at_level(logging.INFO):
self.sat.solve(packages)
assert '--> Using capability match for kernel-base' in \
self._caplog.text |
1,010 | none to center | # coding=utf-8
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
import copy
from libopensesame.exceptions import InvalidValue
from functools import partial
from openexp.color import Color
INF = float('inf')
NUMERIC_PROPERTIES = 'x', 'y', 'w', 'h', 'r'
COLOR_PROPERTIES = 'color', 'col1', 'col2'
class Element:
"""A base class for sketchpad elements.
Parameters
----------
canvas : Canvas
The canvas of which this element is part.
**properties : dict
A dict with style arguments such as color, fill, etc.
"""
# A property that indicates whether style properties (color etc) can be
# changed or not.
read_only = False
def __init__(self, canvas, **properties):
self._canvas = canvas
if 'visible' not in properties:
properties['visible'] = True
for prop in COLOR_PROPERTIES:
if prop in properties:
properties[prop] = Color(self.experiment, properties[prop])
self._assert_numeric(**{
prop: val
for prop, val in properties.items()
if prop in NUMERIC_PROPERTIES
})
self._properties = properties
for prop in self.property_names:
self._create_property(prop)
if canvas.auto_prepare and self.visible:
self.prepare()
def __contains__(self, xy):
x, y, w, h = self.rect
return x <= xy[0] and x+w >= xy[0] and y <= xy[1] and y+h >= xy[1]
def __iter__(self):
"""Elements are iterable, but by default contain only themselves.
However, Group objects can contain other elements.
"""
yield self
def __len__(self):
"""Elements have a length, but by default this is 1. However, Group
objects can have a different length.
"""
return 1
def __add__(self, element):
"""Implements the + syntax, which combines Element objects into Group
objects.
Parameters
----------
element : Element
The element to add.
Returns
-------
Group
A group of elements.
"""
from openexp._canvas._element.group import Group
return Group(self.canvas, [self, element])
def copy(self, canvas):
"""Creates a deep copy of the current element. This new copy becomes
part of the provided canvas.
Parameters
----------
canvas : Canvas
The canvas of which the copied element is part.
Returns
-------
Element
A copy of the current element.
"""
try:
e = copy.deepcopy(self)
except (ValueError, TypeError):
# Silently fall back to shallow copies if deep copies are not
# possible. This happens for example with the c-types-based
# PsychoPy stimuli.
e = copy.copy(self)
e._canvas = canvas
return e
def prepare(self):
"""Is called when the canvas is prepared. This should be implemented
by backend-specific element objects.
"""
pass
def show(self):
"""Is called when the canvas is shown. This should be implemented by
backend-specfic element objects.
"""
pass
def _on_attribute_change(self, **kwargs):
"""Is called when an attribute, such as color, is changed.
Parameters
----------
**kwargs : dict
A dict with changed attributes.
"""
pass
@property
def experiment(self):
return self._canvas.experiment
@property
def to_xy(self):
return self._canvas.to_xy
@property
def METHOD_NAME(self):
return self._canvas.METHOD_NAME
@property
def property_names(self):
return (
set(self._properties.keys())
| set(self._canvas.configurables.keys())
)
@property
def rect(self):
raise NotImplementedError()
@property
def top(self):
return self.rect[1]
@property
def left(self):
return self.rect[0]
@property
def width(self):
return self.rect[2]
@property
def height(self):
return self.rect[3]
@property
def size(self):
return self.rect[2:]
@property
def position(self):
return self.rect[:2]
def _create_property(self, key):
"""Dynamically creates a getter/setter property. This is used to make
style arguments such as color get-able and set-able.
"""
setattr(self.__class__, key, property(
partial(self._getter, key),
partial(self._setter, key),
self._deller, ''))
@staticmethod
def _getter(key, self):
"""A getter for dynamically created properties.
Parameters
----------
key : str
A property name.
self : Element.
The Element instance. For technical reasons this is passed as the
second argument.
"""
try:
return self._properties[key]
except KeyError:
return self._canvas.__cfg__[key]
@staticmethod
def _setter(key, self, val):
"""A setter for dynamically created properties.
Parameters
----------
key : str
A property name.
self : Element.
The Element instance. For technical reasons this is passed as the
second argument.
val : Any
A property value.
"""
if key in NUMERIC_PROPERTIES:
self._assert_numeric(**{key: val})
if key == 'color':
val = color(self.experiment, val)
self._properties[key] = val
self._on_attribute_change(**{key: val})
@staticmethod
def _deller(self, key):
"""A deller for dynamically created properties.
Parameters
----------
key : str
A property name.
"""
pass
@staticmethod
def _assert_numeric(**kwdict):
for name, v in kwdict.items():
try:
v = float(v)
except ValueError:
raise InvalidValue(
'%s should be int or float, not %s' % (name, v))
if v != v:
raise InvalidValue(
'%s should be int or float, not nan' % name)
if v == INF:
raise InvalidValue(
'%s should be int or float, not inf' % name)
@staticmethod
def _rect(x, y, w, h):
"""Fixes negative width and heights when defining a rect
Parameters
----------
x
An X coordinate
y
A Y coordinate
w
A width
h
A height
Returns
-------
"""
Element._assert_numeric(x=x, y=y, w=w, h=h)
if w < 0:
x += w
w = abs(w)
if h < 0:
y += h
h = abs(h)
return x, y, w, h |
1,011 | circle | """ An example of how to use Chaco to render a visual TraitsUI editor.
This particular editor allows the user to set two endpoints of an
interval.
"""
from traits.api import HasTraits
from traits.etsconfig.api import ETSConfig
if ETSConfig.toolkit == "wx":
from traitsui.wx.editor import Editor
else:
from traitsui.qt4.editor import Editor
from traitsui.api import EditorFactory, Item, View
from enable.api import ColorTrait, Window
from chaco.api import OverlayPlotContainer, create_line_plot, LinePlot
from chaco.tools.api import RangeSelection, RangeSelectionOverlay
from traits.api import Int, TraitType, Instance, Float
from math import pi
class Interval(TraitType):
"""Trait that represents an interval."""
info_text = "an interval (x,y) where x < y"
def __init__(self, low=0, high=1, **metadata):
value = (low, high)
TraitType.__init__(self, value, **metadata)
self.value = (low, high)
def validate(self, object, name, value):
low, high = value
if low <= high:
return value
self.error(object, name, value)
def create_editor(self):
return IntervalEditor()
class IntervalEditorFactory(EditorFactory):
width = Int(300)
height = Int(40)
def simple_editor(self, ui, object, name, description, parent):
trait = object.trait(name).trait_type
low, high = trait.value
return IntervalEditorImpl(
parent,
factory=self,
ui=ui,
object=object,
name=name,
description=description,
low=low,
high=high,
)
class RangeKnobsOverlay(RangeSelectionOverlay):
radius = Float(3)
low_color = ColorTrait("red")
high_color = ColorTrait("red")
# Override the default alpha and border color, inherited from
# RangeSelectionOverlay; these are more appropriate for our application.
alpha = Float(0.8)
border_color = ColorTrait("black")
def overlay(self, component, gc, view_bounds=None, mode="normal"):
mid_y = component.position[1] + component.bounds[1] / 2
# Draw each of a possibly disjoint set of selections
coords = self._get_selection_screencoords()
for coord in coords:
start, end = coord
with gc:
gc.set_alpha(self.alpha)
gc.set_stroke_color(self.border_color_)
gc.set_line_width(self.border_width)
gc.rect(
start + self.radius,
mid_y - 1,
(end - start - 2 * self.radius),
2,
)
gc.draw_path()
gc.set_fill_color(self.low_color_)
self.METHOD_NAME(gc, start, mid_y, self.radius)
# Have to stroke/fill the path before we change out the
# fill color
gc.draw_path()
gc.set_fill_color(self.high_color_)
self.METHOD_NAME(gc, end, mid_y, self.radius)
gc.draw_path()
def METHOD_NAME(self, gc, x, y, radius):
with gc:
gc.translate_ctm(x, y)
gc.arc(0, 0, 2 * radius, 0, 2 * pi)
class IntervalEditorImpl(Editor):
low = Int
high = Int
plot = Instance(LinePlot)
def init(self, parent):
factory = self.factory
container = OverlayPlotContainer(
bgcolor="transparent", padding=0, spacing=0
)
window = Window(parent, component=container)
interval = self.high - self.low
data = ([self.low, self.high], [0.5] * 2)
plot = create_line_plot(data, color="black", bgcolor="sys_window")
plot.x_mapper.range.low = self.low - interval * 0.1
plot.x_mapper.range.high = self.high + interval * 0.1
plot.y_mapper.range.high = 1.0
plot.y_mapper.range.low = 0.0
range_selection = RangeSelection(plot, left_button_selects=True)
# Do not allow the user to reset the range
range_selection.event_state = "selected"
range_selection.deselect = lambda x: None
range_selection.observe(self.update_interval, "selection")
plot.tools.append(range_selection)
plot.overlays.append(RangeKnobsOverlay(plot))
self.plot = plot
container.add(self.plot)
# To set the low and high, we're actually going to set the
# 'selection' metadata on the line plot to the tuple (low,high).
plot.index.metadata["selections"] = (0, 1.0)
# Tell the editor what to display
self.control = window.control
if ETSConfig.toolkit == "wx":
self.control.SetSize((factory.width, factory.height))
else:
self.control.setMaximumSize(factory.width, factory.height)
def update_interval(self, event):
value = event.new
low, high = value
low = max(low, 0)
high = min(high, 1)
self.plot.index.metadata["selections"] = (low, high)
self.value = (low, high)
def update_editor(self):
pass
# The user normally uses the factory as if it were an editor, e.g.:
#
# View(Item('interval', editor=IntervalEditor()))
#
IntervalEditor = IntervalEditorFactory
class IntervalTest(HasTraits):
interval = Interval(low=0, high=1)
traits_view = View(
Item("interval", editor=IntervalEditor()), resizable=True
)
demo = IntervalTest()
if __name__ == "__main__":
demo.configure_traits() |
1,012 | merge | """Tools for working with NIDM-Experiment files"""
import click
from rdflib import Graph, util
from nidm.core import Constants
from nidm.experiment.Query import GetParticipantIDs
from nidm.experiment.tools.click_base import cli
# adding click argument parsing
@cli.command()
@click.option(
"--nidm_file_list",
"-nl",
required=True,
help="A comma separated list of NIDM files with full path",
)
@click.option(
"--s",
"-s",
required=False,
is_flag=True,
help="If parameter set then files will be merged by ndar:src_subjec_id of prov:agents",
)
@click.option(
"--out_file", "-o", required=True, help="File to write concatenated NIDM files"
)
def METHOD_NAME(nidm_file_list, s, out_file):
"""
This function will merge NIDM files. See command line parameters for supported merge operations.
"""
# graph = Graph()
# for nidm_file in nidm_file_list.split(','):
# graph.parse(nidm_file,format=util.guess_format(nidm_file))
# create empty graph
graph = Graph()
# start with the first NIDM file and merge the rest into the first
first = True
for nidm_file in nidm_file_list.split(","):
# if merging by subject:
if s:
if first:
# get list of all subject IDs
first_file_subjids = GetParticipantIDs([nidm_file])
first = False
first_graph = Graph()
first_graph.parse(nidm_file, format=util.guess_format(nidm_file))
else:
# load second graph
graph.parse(nidm_file, format=util.guess_format(nidm_file))
# get list of second file subject IDs
GetParticipantIDs([nidm_file])
# for each UUID / subject ID look in graph and see if you can find the same ID. If so get the UUID of
# that prov:agent and change all the UUIDs in nidm_file to match then concatenate the two graphs.
query = f"""
PREFIX prov:<http://www.w3.org/ns/prov#>
PREFIX sio: <http://semanticscience.org/ontology/sio.owl#>
PREFIX ndar: <https://ndar.nih.gov/api/datadictionary/v2/dataelement/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX prov:<http://www.w3.org/ns/prov#>
SELECT DISTINCT ?uuid ?ID
WHERE {{
?uuid a prov:Agent ;
{Constants.NIDM_SUBJECTID} ?ID .
FILTER(?ID =
"""
# add filters to above query to only look for subject IDs which are in the first file to merge into
temp = True
for ID in first_file_subjids["ID"]:
if temp:
query = query + '"' + ID + '"'
temp = False
else:
query = query + '|| ?ID= "' + ID + '"'
query = query + ") }"
qres = graph.query(query)
# if len(qres) > 0 then we have matches so load the nidm_file into a temporary graph so we can
# make changes to it then concatenate it.
if len(qres) > 0:
# tmp = Graph()
# tmp.parse(nidm_file,format=util.guess_format(nidm_file))
# for each ID in the merged graph that matches an ID in the nidm_file graph
for row in qres:
# find ID from first file that matches ID in this file
t = first_file_subjids["ID"].str.match(row["ID"])
# then get uuid for that match from first file
uuid_replacement = first_file_subjids.iloc[
[*filter(t.get, t.index)][0], 0
]
for s, p, o in graph.triples((None, None, None)):
if s == row["uuid"]:
# print(f"replacing subject in triple {s} {p} {o} with {uuid_to_replace}")
graph.add((uuid_replacement, p, o))
graph.remove((row["uuid"], p, o))
elif o == row["uuid"]:
# print(f"replacing object in triple {s} {p} {o} with {uuid_to_replace}")
graph.add((s, p, uuid_replacement))
graph.remove((s, p, row["uuid"]))
elif p == row["uuid"]:
# print(f"replacing predicate in triple {s} {p} {o} with {uuid_to_replace}")
graph.add((s, uuid_replacement, o))
graph.remove((s, row["uuid"], o))
# merge updated graph
graph = first_graph + graph
graph.serialize(out_file, format="turtle")
if __name__ == "__main__":
METHOD_NAME() |
1,013 | dumpd | from collections import OrderedDict
from operator import attrgetter
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Union,
no_type_check,
)
from funcy import post_processing
from dvc.dependency import ParamsDependency
from dvc.output import Annotation, Output
from dvc.utils.collections import apply_diff
from dvc.utils.serialize import parse_yaml_for_update
from .params import StageParams
from .utils import resolve_wdir, split_params_deps
if TYPE_CHECKING:
from dvc.stage import PipelineStage, Stage
PARAM_PARAMS = ParamsDependency.PARAM_PARAMS
PARAM_PATH = ParamsDependency.PARAM_PATH
PARAM_DEPS = StageParams.PARAM_DEPS
PARAM_OUTS = StageParams.PARAM_OUTS
PARAM_CACHE = Output.PARAM_CACHE
PARAM_METRIC = Output.PARAM_METRIC
PARAM_PLOT = Output.PARAM_PLOT
PARAM_PERSIST = Output.PARAM_PERSIST
PARAM_DESC = Annotation.PARAM_DESC
PARAM_REMOTE = Output.PARAM_REMOTE
PARAM_PUSH = Output.PARAM_PUSH
DEFAULT_PARAMS_FILE = ParamsDependency.DEFAULT_PARAMS_FILE
@post_processing(OrderedDict)
def _get_flags(out):
annot = out.annot.to_dict()
yield from annot.items()
if not out.use_cache:
yield PARAM_CACHE, False
if out.persist:
yield PARAM_PERSIST, True
if out.plot and isinstance(out.plot, dict):
# notice `out.plot` is not sorted
# `out.plot` is in the same order as is in the file when read
# and, should be dumped as-is without any sorting
yield from out.plot.items()
if out.remote:
yield PARAM_REMOTE, out.remote
if not out.can_push:
yield PARAM_PUSH, False
def _serialize_out(out):
flags = _get_flags(out)
return out.def_path if not flags else {out.def_path: flags}
@no_type_check
def _serialize_outs(outputs: List[Output]):
outs, metrics, plots = [], [], []
for out in sorted(outputs, key=attrgetter("def_path")):
bucket = outs
if out.plot:
bucket = plots
elif out.metric:
bucket = metrics
bucket.append(_serialize_out(out))
return outs, metrics, plots
def _serialize_params_keys(params: Iterable["ParamsDependency"]):
"""
Returns the following format of data:
['lr', 'train', {'params2.yaml': ['lr']}]
The output is sorted, with keys of params from default params file being
at the first, and then followed by entry of other files in lexicographic
order. The keys of those custom files are also sorted in the same order.
"""
keys: List[Union[str, Dict[str, Optional[List[str]]]]] = []
for param_dep in sorted(params, key=attrgetter("def_path")):
# when on no_exec, params are not filled and are saved as list
k: List[str] = sorted(param_dep.params)
if k and param_dep.def_path == DEFAULT_PARAMS_FILE:
keys = k + keys # type: ignore[operator,assignment]
else:
keys.append({param_dep.def_path: k or None})
return keys
@no_type_check
def _serialize_params_values(params: List[ParamsDependency]):
"""Returns output of following format, used for lockfile:
{'params.yaml': {'lr': '1', 'train': 2}, {'params2.yaml': {'lr': '1'}}
Default params file are always kept at the start, followed by others in
alphabetical order. The param values are sorted too(not recursively though)
"""
key_vals = OrderedDict()
for param_dep in sorted(params, key=attrgetter("def_path")):
dump = param_dep.dumpd()
path, params = dump[PARAM_PATH], dump[PARAM_PARAMS]
if isinstance(params, dict):
kv = [(key, params[key]) for key in sorted(params.keys())]
key_vals[path] = OrderedDict(kv)
if path == DEFAULT_PARAMS_FILE:
key_vals.move_to_end(path, last=False)
return key_vals
def to_pipeline_file(stage: "PipelineStage"):
wdir = resolve_wdir(stage.wdir, stage.path)
param_objs, deps_objs = split_params_deps(stage)
deps = sorted(d.def_path for d in deps_objs)
params = _serialize_params_keys(param_objs)
outs, metrics, plots = _serialize_outs(stage.outs)
cmd = stage.cmd
assert cmd, (
f"'{stage.PARAM_CMD}' cannot be empty for stage '{stage.name}', "
f"got: '{cmd}'(type: '{type(cmd).__name__}')"
)
res = [
(stage.PARAM_DESC, stage.desc),
(stage.PARAM_CMD, stage.cmd),
(stage.PARAM_WDIR, wdir),
(stage.PARAM_DEPS, deps),
(stage.PARAM_PARAMS, params),
(stage.PARAM_OUTS, outs),
(stage.PARAM_METRICS, metrics),
(stage.PARAM_PLOTS, plots),
(stage.PARAM_FROZEN, stage.frozen),
(stage.PARAM_ALWAYS_CHANGED, stage.always_changed),
(stage.PARAM_META, stage.meta),
]
return {stage.name: OrderedDict([(key, value) for key, value in res if value])}
def to_single_stage_lockfile(stage: "Stage", **kwargs) -> dict:
from dvc.cachemgr import LEGACY_HASH_NAMES
from dvc.output import (
_serialize_hi_to_dict,
_serialize_tree_obj_to_files,
split_file_meta_from_cloud,
)
from dvc_data.hashfile.tree import Tree
assert stage.cmd
def METHOD_NAME(item: "Output"):
ret: Dict[str, Any] = {item.PARAM_PATH: item.def_path}
if item.hash_name not in LEGACY_HASH_NAMES:
ret[item.PARAM_HASH] = "md5"
if item.hash_info.isdir and kwargs.get("with_files"):
obj = item.obj or item.get_obj()
if obj:
assert isinstance(obj, Tree)
ret[item.PARAM_FILES] = [
split_file_meta_from_cloud(f)
for f in _serialize_tree_obj_to_files(obj)
]
else:
meta_d = item.meta.to_dict()
meta_d.pop("isdir", None)
ret.update(_serialize_hi_to_dict(item.hash_info))
ret.update(split_file_meta_from_cloud(meta_d))
return ret
res = OrderedDict([("cmd", stage.cmd)])
params, deps = split_params_deps(stage)
deps, outs = (
[METHOD_NAME(item) for item in sorted(items, key=attrgetter("def_path"))]
for items in [deps, stage.outs]
)
params = _serialize_params_values(params)
if deps:
res[PARAM_DEPS] = deps
if params:
res[PARAM_PARAMS] = params
if outs:
res[PARAM_OUTS] = outs
return res
def to_lockfile(stage: "PipelineStage", **kwargs) -> dict:
assert stage.name
return {stage.name: to_single_stage_lockfile(stage, **kwargs)}
def to_single_stage_file(stage: "Stage", **kwargs):
state = stage.dumpd(**kwargs)
# When we load a stage we parse yaml with a fast parser, which strips
# off all the comments and formatting. To retain those on update we do
# a trick here:
# - reparse the same yaml text with a slow but smart ruamel yaml parser
# - apply changes to a returned structure
# - serialize it
text = stage._stage_text # pylint: disable=protected-access
if text is None:
return state
saved_state = parse_yaml_for_update(text, stage.path)
apply_diff(state, saved_state)
return saved_state |
1,014 | ssh keys | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListApplianceKeysResult',
'AwaitableListApplianceKeysResult',
'list_appliance_keys',
'list_appliance_keys_output',
]
@pulumi.output_type
class ListApplianceKeysResult:
"""
The List Cluster Keys Results appliance.
"""
def __init__(__self__, artifact_profiles=None, kubeconfigs=None, METHOD_NAME=None):
if artifact_profiles and not isinstance(artifact_profiles, dict):
raise TypeError("Expected argument 'artifact_profiles' to be a dict")
pulumi.set(__self__, "artifact_profiles", artifact_profiles)
if kubeconfigs and not isinstance(kubeconfigs, list):
raise TypeError("Expected argument 'kubeconfigs' to be a list")
pulumi.set(__self__, "kubeconfigs", kubeconfigs)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'ssh_keys' to be a dict")
pulumi.set(__self__, "ssh_keys", METHOD_NAME)
@property
@pulumi.getter(name="artifactProfiles")
def artifact_profiles(self) -> Mapping[str, 'outputs.ArtifactProfileResponse']:
"""
Map of artifacts that contains a list of ArtifactProfile used to upload artifacts such as logs.
"""
return pulumi.get(self, "artifact_profiles")
@property
@pulumi.getter
def kubeconfigs(self) -> Sequence['outputs.ApplianceCredentialKubeconfigResponse']:
"""
The list of appliance kubeconfigs.
"""
return pulumi.get(self, "kubeconfigs")
@property
@pulumi.getter(name="sshKeys")
def METHOD_NAME(self) -> Mapping[str, 'outputs.SSHKeyResponse']:
"""
Map of Customer User Public, Private SSH Keys and Certificate when available.
"""
return pulumi.get(self, "ssh_keys")
class AwaitableListApplianceKeysResult(ListApplianceKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListApplianceKeysResult(
artifact_profiles=self.artifact_profiles,
kubeconfigs=self.kubeconfigs,
METHOD_NAME=self.METHOD_NAME)
def list_appliance_keys(artifact_type: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListApplianceKeysResult:
"""
Returns the cluster customer credentials for the dedicated appliance.
Azure REST API version: 2022-10-27.
:param str artifact_type: This sets the type of artifact being returned, when empty no artifact endpoint is returned.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: Appliances name.
"""
__args__ = dict()
__args__['artifactType'] = artifact_type
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:resourceconnector:listApplianceKeys', __args__, opts=opts, typ=ListApplianceKeysResult).value
return AwaitableListApplianceKeysResult(
artifact_profiles=pulumi.get(__ret__, 'artifact_profiles'),
kubeconfigs=pulumi.get(__ret__, 'kubeconfigs'),
METHOD_NAME=pulumi.get(__ret__, 'ssh_keys'))
@_utilities.lift_output_func(list_appliance_keys)
def list_appliance_keys_output(artifact_type: Optional[pulumi.Input[Optional[str]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListApplianceKeysResult]:
"""
Returns the cluster customer credentials for the dedicated appliance.
Azure REST API version: 2022-10-27.
:param str artifact_type: This sets the type of artifact being returned, when empty no artifact endpoint is returned.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: Appliances name.
"""
... |
1,015 | test get patched otus | import pytest
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
import virtool.indexes.db
from aiohttp.test_utils import make_mocked_coro
from virtool.indexes.db import (
attach_files,
get_current_id_and_version,
get_next_version,
get_patched_otus,
update_last_indexed_versions,
)
from virtool.indexes.models import SQLIndexFile
@pytest.mark.parametrize("index_id", [None, "abc"])
async def test_create(
index_id, mocker, snapshot, mongo, test_random_alphanumeric, static_time
):
await mongo.references.insert_one({"_id": "foo"})
await mongo.history.insert_one(
{
"_id": "abc",
"index": {"id": "unbuilt", "version": "unbuilt"},
"reference": {"id": "foo"},
}
)
mocker.patch("virtool.references.db.get_manifest", make_mocked_coro("manifest"))
document = await virtool.indexes.db.create(
mongo, "foo", "test", "bar", index_id=index_id
)
assert document == snapshot
assert await mongo.history.find_one("abc") == snapshot
@pytest.mark.parametrize("exists", [True, False])
@pytest.mark.parametrize("has_ref", [True, False])
async def test_get_current_id_and_version(exists, has_ref, test_indexes, mongo):
if not exists:
test_indexes = [dict(i, ready=False, has_files=False) for i in test_indexes]
await mongo.indexes.insert_many(test_indexes, session=None)
ref_id = "hxn167" if has_ref else "foobar"
index_id, index_version = await get_current_id_and_version(mongo, ref_id)
if has_ref and exists:
assert index_id == "ptlrcefm"
assert index_version == 3
else:
assert index_id is None
assert index_version == -1
@pytest.mark.parametrize("empty", [False, True])
@pytest.mark.parametrize("has_ref", [True, False])
async def test_get_next_version(empty, has_ref, test_indexes, mongo):
if not empty:
await mongo.indexes.insert_many(test_indexes, session=None)
expected = 4
if empty or not has_ref:
expected = 0
assert await get_next_version(mongo, "hxn167" if has_ref else "foobar") == expected
async def test_processor(snapshot, fake2, mongo):
user = await fake2.users.create()
await mongo.history.insert_many(
[
{"_id": "foo.0", "index": {"id": "baz"}, "otu": {"id": "foo"}},
{"_id": "foo.1", "index": {"id": "baz"}, "otu": {"id": "foo"}},
{"_id": "bar.0", "index": {"id": "baz"}, "otu": {"id": "bar"}},
{"_id": "bar.1", "index": {"id": "baz"}, "otu": {"id": "bar"}},
{"_id": "bar.2", "index": {"id": "baz"}, "otu": {"id": "bar"}},
{"_id": "far.0", "index": {"id": "boo"}, "otu": {"id": "foo"}},
],
session=None,
)
assert (
await virtool.indexes.db.processor(
mongo, {"_id": "baz", "user": {"id": user.id}}
)
== snapshot
)
async def METHOD_NAME(mocker, mongo, config):
m = mocker.patch(
"virtool.history.db.patch_to_version",
make_mocked_coro((None, {"_id": "foo"}, None)),
)
manifest = {"foo": 2, "bar": 10, "baz": 4}
patched_otus = await get_patched_otus(mongo, config, manifest)
assert list(patched_otus) == [{"_id": "foo"}, {"_id": "foo"}, {"_id": "foo"}]
m.assert_has_calls(
[
mocker.call(config.data_path, mongo, "foo", 2),
mocker.call(config.data_path, mongo, "bar", 10),
mocker.call(config.data_path, mongo, "baz", 4),
]
)
async def test_update_last_indexed_versions(mongo, test_otu, spawn_client):
client = await spawn_client(authorize=True)
test_otu["version"] = 1
await client.db.otus.insert_one(test_otu)
async with mongo.create_session() as session:
await update_last_indexed_versions(mongo, "hxn167", session)
document = await client.db.otus.find_one({"reference.id": "hxn167"})
assert document["last_indexed_version"] == document["version"]
async def test_attach_files(snapshot, pg: AsyncEngine):
index_1 = SQLIndexFile(
id=1, name="reference.1.bt2", index="foo", type="bowtie2", size=1234567
)
index_2 = SQLIndexFile(
id=2, name="reference.2.bt2", index="foo", type="bowtie2", size=1234567
)
async with AsyncSession(pg) as session:
session.add_all([index_1, index_2])
await session.commit()
document = {"_id": "foo", "reference": {"id": "bar"}}
assert (
await attach_files(pg, "https://virtool.example.com/api", document) == snapshot
) |
1,016 | get n pct | """
Functions that make it easier to provide a default centering
for a view state
"""
import math
from ..bindings.view_state import ViewState
from .type_checking import is_pandas_df
def _squared_diff(x, x0):
return (x0 - x) * (x0 - x)
def euclidean(y, y1):
"""Euclidean distance in n-dimensions
Parameters
----------
y : tuple of float
A point in n-dimensions
y1 : tuple of float
A point in n-dimensions
Examples
--------
>>> EPSILON = 0.001
>>> euclidean((3, 6, 5), (7, -5, 1)) - 12.369 < EPSILON
True
"""
if not len(y) == len(y1):
raise Exception("Input coordinates must be of the same length")
return math.sqrt(sum([_squared_diff(x, x0) for x, x0 in zip(y, y1)]))
def geometric_mean(points):
"""Gets centroid in a series of points
Parameters
----------
points : list of list of float
List of (x, y) coordinates
Returns
-------
tuple
The centroid of a list of points
"""
avg_x = sum([float(p[0]) for p in points]) / len(points)
avg_y = sum([float(p[1]) for p in points]) / len(points)
return (avg_x, avg_y)
def get_bbox(points):
"""Get the bounding box around the data,
Parameters
----------
points : list of list of float
List of (x, y) coordinates
Returns
-------
dict
Dictionary containing the top left and bottom right points of a bounding box
"""
xs = [p[0] for p in points]
ys = [p[1] for p in points]
max_x = max(xs)
max_y = max(ys)
min_x = min(xs)
min_y = min(ys)
return ((min_x, max_y), (max_x, min_y))
def k_nearest_neighbors(points, center, k):
"""Gets the k furthest points from the center
Parameters
----------
points : list of list of float
List of (x, y) coordinates
center : list of list of float
Center point
k : int
Number of points
Returns
-------
list
Index of the k furthest points
Todo
---
Currently implemently naively, needs to be more efficient
"""
pts_with_distance = [(pt, euclidean(pt, center)) for pt in points]
sorted_pts = sorted(pts_with_distance, key=lambda x: x[1])
return [x[0] for x in sorted_pts][: int(k)]
def METHOD_NAME(points, proportion=1):
"""Computes the bounding box of the maximum zoom for the specified list of points
Parameters
----------
points : list of list of float
List of (x, y) coordinates
proportion : float, default 1
Value between 0 and 1 representing the minimum proportion of data to be captured
Returns
-------
list
k nearest data points
"""
if proportion == 1:
return points
# Compute the medioid of the data
centroid = geometric_mean(points)
# Retain the closest n*proportion points
n_to_keep = math.floor(proportion * len(points))
return k_nearest_neighbors(points, centroid, n_to_keep)
def bbox_to_zoom_level(bbox):
"""Computes the zoom level of a lat/lng bounding box
Parameters
----------
bbox : list of list of float
Northwest and southeast corners of a bounding box, given as two points in a list
Returns
-------
int
Zoom level of map in a WGS84 Mercator projection (e.g., like that of Google Maps)
"""
lat_diff = max(bbox[0][0], bbox[1][0]) - min(bbox[0][0], bbox[1][0])
lng_diff = max(bbox[0][1], bbox[1][1]) - min(bbox[0][1], bbox[1][1])
max_diff = max(lng_diff, lat_diff)
zoom_level = None
if max_diff < (360.0 / math.pow(2, 20)):
zoom_level = 21
else:
zoom_level = int(-1 * ((math.log(max_diff) / math.log(2.0)) - (math.log(360.0) / math.log(2))))
if zoom_level < 1:
zoom_level = 1
return zoom_level
def compute_view(points, view_proportion=1, view_type=ViewState):
"""Automatically computes a zoom level for the points passed in.
Parameters
----------
points : list of list of float or pandas.DataFrame
A list of points
view_propotion : float, default 1
Proportion of the data that is meaningful to plot
view_type : class constructor for pydeck.ViewState, default :class:`pydeck.bindings.view_state.ViewState`
Class constructor for a viewport. In the current version of pydeck,
users most likely do not have to modify this attribute.
Returns
-------
pydeck.Viewport
Viewport fitted to the data
"""
if is_pandas_df(points):
points = points.to_records(index=False)
bbox = get_bbox(METHOD_NAME(points, view_proportion))
zoom = bbox_to_zoom_level(bbox)
center = geometric_mean(points)
instance = view_type(latitude=center[1], longitude=center[0], zoom=zoom)
return instance |
1,017 | test whisper reader broken file | from .base import TestCase
import os
import mock
import shutil
import time
from django.conf import settings
import whisper
import gzip
from graphite.readers import WhisperReader, GzippedWhisperReader
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
class WhisperReadersTests(TestCase):
start_ts = 0
# Create/wipe test whisper files
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
worker1 = hostcpu.replace('hostname', 'worker1')
worker2 = hostcpu.replace('hostname', 'worker2')
worker3 = hostcpu.replace('hostname', 'worker3')
worker4 = hostcpu.replace('hostname', 'worker4')
worker4 = worker4.replace('cpu.wsp', 'cpu.wsp.gz')
def create_whisper_hosts(self):
self.start_ts = int(time.time())
try:
os.makedirs(self.worker1.replace('cpu.wsp', ''))
os.makedirs(self.worker2.replace('cpu.wsp', ''))
os.makedirs(self.worker3.replace('cpu.wsp', ''))
os.makedirs(self.worker4.replace('cpu.wsp.gz', ''))
except OSError:
pass
whisper.create(self.worker1, [(1, 60)])
whisper.create(self.worker2, [(1, 60)])
open(self.worker3, 'a').close()
whisper.update(self.worker1, 1, self.start_ts)
whisper.update(self.worker2, 2, self.start_ts)
with open(self.worker1, 'rb') as f_in, gzip.open(self.worker4, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def wipe_whisper_hosts(self):
try:
os.remove(self.worker1)
os.remove(self.worker2)
os.remove(self.worker3)
os.remove(self.worker4)
shutil.rmtree(os.path.join(settings.WHISPER_DIR, 'hosts'))
except OSError:
pass
#
# GzippedWHisper Reader tests
#
# Confirm the reader object is not none
def test_GzippedWhisperReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_GzippedWhisperReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
ts = int(time.time())
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start), ts - 60)
self.assertIn(int(interval.end), [ts, ts - 1])
# read it again to validate cache works
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start),ts - 60)
self.assertIn(int(interval.end), [ts, ts - 1])
# Confirm fetch works.
def test_GzippedWhisperReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
#
# WHisper Reader tests
#
# Confirm the reader object is not none
def test_WhisperReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_WhisperReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
ts = int(time.time())
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start),ts - 60)
self.assertIn(int(interval.end), [ts, ts - 1])
# read it again to validate cache works
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start),ts - 60)
self.assertIn(int(interval.end), [ts, ts - 1])
# Confirm get_raw_step works
def test_WhisperReader_get_raw_step(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
raw_step = reader.get_raw_step()
self.assertEqual(int(raw_step),1)
# read it again to validate cache works
raw_step = reader.get_raw_step()
self.assertEqual(int(raw_step),1)
# Confirm fetch works.
def test_WhisperReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
# Whisper Reader broken file
@mock.patch('whisper.fetch')
def test_WhisperReader_fetch_returns_no_data(self, whisper_fetch):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
whisper_fetch.return_value = None
self.assertEqual(reader.fetch(self.start_ts-5, self.start_ts), None)
# Whisper Reader broken file
def METHOD_NAME(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
# Test broken whisper file
f = open(self.worker2, 'rb+')
f.seek(10)
f.write(b'Bad Data')
f.close()
reader = WhisperReader(self.worker2, 'hosts.worker2.cpu')
with self.assertRaises(Exception):
reader.fetch(self.start_ts-5, self.start_ts)
# Whisper Reader missing file
@mock.patch('graphite.logger.log.exception')
def test_WhisperReader_missing_file(self, log_exception):
path = 'missing/file.wsp'
reader = WhisperReader(path, 'hosts.worker2.cpu')
self.assertEqual(reader.fetch(self.start_ts-5, self.start_ts), None)
log_exception.assert_called_with("Failed fetch of whisper file '%s'" % path)
# Whisper Reader CarbonLink Query returns a dict
@mock.patch('graphite.carbonlink.CarbonLinkPool.query')
def test_WhisperReader_CarbonLinkQuery(self, carbonlink_query):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
carbonlink_query.return_value = {}
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0]) |
1,018 | load db | #!/usr/bin/env python
# Copyright (c) 2009 Chris Moyer http://kopertop.blogspot.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
#
# Tools to dump and recover an SDB domain
#
VERSION = "%prog version 1.0"
import boto
import time
from boto import sdb
from boto.compat import json
def choice_input(options, default=None, title=None):
"""
Choice input
"""
if title == None:
title = "Please choose"
print title
objects = []
for n, obj in enumerate(options):
print "%s: %s" % (n, obj)
objects.append(obj)
choice = int(raw_input(">>> "))
try:
choice = objects[choice]
except:
choice = default
return choice
def confirm(message="Are you sure?"):
choice = raw_input("%s [yN] " % message)
return choice and len(choice) > 0 and choice[0].lower() == "y"
def dump_db(domain, file_name, use_json=False, sort_attributes=False):
"""
Dump SDB domain to file
"""
f = open(file_name, "w")
if use_json:
for item in domain:
data = {"name": item.name, "attributes": item}
print >> f, json.dumps(data, sort_keys=sort_attributes)
else:
doc = domain.to_xml(f)
def empty_db(domain):
"""
Remove all entries from domain
"""
for item in domain:
item.delete()
def METHOD_NAME(domain, file, use_json=False):
"""
Load a domain from a file, this doesn't overwrite any existing
data in the file so if you want to do a full recovery and restore
you need to call empty_db before calling this
:param domain: The SDB Domain object to load to
:param file: The File to load the DB from
"""
if use_json:
for line in file.readlines():
if line:
data = json.loads(line)
item = domain.new_item(data['name'])
item.update(data['attributes'])
item.save()
else:
domain.from_xml(file)
def check_valid_region(conn, region):
if conn is None:
print 'Invalid region (%s)' % region
sys.exit(1)
def create_db(domain_name, region_name):
"""Create a new DB
:param domain: Name of the domain to create
:type domain: str
"""
sdb = boto.sdb.connect_to_region(region_name)
check_valid_region(sdb, region_name)
return sdb.create_domain(domain_name)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(version=VERSION, usage="Usage: %prog [--dump|--load|--empty|--list|-l] [options]")
# Commands
parser.add_option("--dump", help="Dump domain to file", dest="dump", default=False, action="store_true")
parser.add_option("--load", help="Load domain contents from file", dest="load", default=False, action="store_true")
parser.add_option("--empty", help="Empty all contents of domain", dest="empty", default=False, action="store_true")
parser.add_option("-l", "--list", help="List All domains", dest="list", default=False, action="store_true")
parser.add_option("-c", "--create", help="Create domain", dest="create", default=False, action="store_true")
parser.add_option("-a", "--all-domains", help="Operate on all domains", action="store_true", default=False, dest="all_domains")
if json:
parser.add_option("-j", "--use-json", help="Load/Store as JSON instead of XML", action="store_true", default=False, dest="json")
parser.add_option("-s", "--sort-attibutes", help="Sort the element attributes", action="store_true", default=False, dest="sort_attributes")
parser.add_option("-d", "--domain", help="Do functions on domain (may be more then one)", action="append", dest="domains")
parser.add_option("-f", "--file", help="Input/Output file we're operating on", dest="file_name")
parser.add_option("-r", "--region", help="Region (e.g. us-east-1[default] or eu-west-1)", default="us-east-1", dest="region_name")
(options, args) = parser.parse_args()
if options.create:
for domain_name in options.domains:
create_db(domain_name, options.region_name)
exit()
sdb = boto.sdb.connect_to_region(options.region_name)
check_valid_region(sdb, options.region_name)
if options.list:
for db in sdb.get_all_domains():
print db
exit()
if not options.dump and not options.load and not options.empty:
parser.print_help()
exit()
#
# Setup
#
if options.domains:
domains = []
for domain_name in options.domains:
domains.append(sdb.get_domain(domain_name))
elif options.all_domains:
domains = sdb.get_all_domains()
else:
domains = [choice_input(options=sdb.get_all_domains(), title="No domain specified, please choose one")]
#
# Execute the commands
#
stime = time.time()
if options.empty:
if confirm("WARNING!!! Are you sure you want to empty the following domains?: %s" % domains):
stime = time.time()
for domain in domains:
print "--------> Emptying %s <--------" % domain.name
empty_db(domain)
else:
print "Canceling operations"
exit()
if options.dump:
for domain in domains:
print "--------> Dumping %s <---------" % domain.name
if options.file_name:
file_name = options.file_name
else:
file_name = "%s.db" % domain.name
dump_db(domain, file_name, options.json, options.sort_attributes)
if options.load:
for domain in domains:
print "---------> Loading %s <----------" % domain.name
if options.file_name:
file_name = options.file_name
else:
file_name = "%s.db" % domain.name
METHOD_NAME(domain, open(file_name, "rb"), options.json)
total_time = round(time.time() - stime, 2)
print "--------> Finished in %s <--------" % total_time |
1,019 | array | from collections import OrderedDict
import numpy as np
from pyop2.mpi import internal_comm, decref
import firedrake
from firedrake.petsc import PETSc
from firedrake.matrix import MatrixBase
__all__ = ['Vector', 'as_backend_type']
class VectorShim(object):
"""Compatibility layer to enable Dolfin-style as_backend_type to work."""
def __init__(self, vec):
self._vec = vec
def vec(self):
with self._vec.dat.vec as v:
return v
class MatrixShim(object):
"""Compatibility layer to enable Dolfin-style as_backend_type to work."""
def __init__(self, mat):
self._mat = mat
def mat(self):
return self._mat.petscmat
def as_backend_type(tensor):
"""Compatibility operation for Dolfin's backend switching
operations. This is for Dolfin compatibility only. There is no reason
for Firedrake users to ever call this."""
if isinstance(tensor, Vector):
return VectorShim(tensor)
elif isinstance(tensor, MatrixBase):
return MatrixShim(tensor)
else:
raise TypeError("Unknown tensor type %s" % type(tensor))
class Vector(object):
def __init__(self, x):
"""Build a `Vector` that wraps a :class:`pyop2.types.dat.Dat` for Dolfin compatibilty.
:arg x: an :class:`~.Function` to wrap or a :class:`Vector` to copy.
The former shares data, the latter copies data.
"""
if isinstance(x, Vector):
self.function = type(x.function)(x.function)
elif isinstance(x, firedrake.Function):
self.function = x
else:
raise RuntimeError("Don't know how to build a Vector from a %r" % type(x))
self.comm = self.function.function_space().comm
self._comm = internal_comm(self.comm)
def __del__(self):
if hasattr(self, "_comm"):
decref(self._comm)
@firedrake.utils.cached_property
def dat(self):
return self.function.dat
# Make everything mostly pretend to be like a Function
def __getattr__(self, name):
val = getattr(self.function, name)
setattr(self, name, val)
return val
def __dir__(self):
current = super(Vector, self).__dir__()
return list(OrderedDict.fromkeys(dir(self.function) + current))
def axpy(self, a, x):
"""Add a*x to self.
:arg a: a scalar
:arg x: a :class:`Vector` or :class:`.Function`"""
self.dat += a*x.dat
def _scale(self, a):
"""Scale self by `a`.
:arg a: a scalar (or something that contains a dat)
"""
try:
self.dat *= a.dat
except AttributeError:
self.dat *= a
return self
def __mul__(self, other):
"""Scalar multiplication by other."""
return self.copy()._scale(other)
def __imul__(self, other):
"""In place scalar multiplication by other."""
return self._scale(other)
def __rmul__(self, other):
"""Reverse scalar multiplication by other."""
return self.__mul__(other)
def __add__(self, other):
"""Add other to self"""
sum = self.copy()
try:
sum.dat += other.dat
except AttributeError:
sum += other
return sum
def __radd__(self, other):
return self + other
def __iadd__(self, other):
"""Add other to self"""
try:
self.dat += other.dat
except AttributeError:
self.dat += other
return self
def __sub__(self, other):
"""Add other to self"""
diff = self.copy()
try:
diff.dat -= other.dat
except AttributeError:
diff -= other
return diff
def __isub__(self, other):
"""Add other to self"""
try:
self.dat -= other.dat
except AttributeError:
self.dat -= other
return self
def __rsub__(self, other):
return -1.0 * self + other
def apply(self, action):
"""Finalise vector assembly. This is not actually required in
Firedrake but is provided for Dolfin compatibility."""
pass
def METHOD_NAME(self):
"""Return a copy of the process local data as a numpy array"""
with self.dat.vec_ro as v:
return np.copy(v.METHOD_NAME)
def copy(self):
"""Return a copy of this vector."""
return type(self)(self)
def get_local(self):
"""Return a copy of the process local data as a numpy array"""
return self.METHOD_NAME()
def set_local(self, values):
"""Set process local values
:arg values: a numpy array of values of length :func:`Vector.local_size`"""
with self.dat.vec_wo as v:
v.METHOD_NAME[:] = values
def local_size(self):
"""Return the size of the process local data (without ghost points)"""
return self.dat.dataset.size
def local_range(self):
"""Return the global indices of the start and end of the local part of
this vector."""
return self.dat.dataset.layout_vec.getOwnershipRange()
def max(self):
"""Return the maximum entry in the vector."""
with self.dat.vec_ro as v:
return v.max()[1]
def sum(self):
"""Return global sum of vector entries."""
with self.dat.vec_ro as v:
return v.sum()
def size(self):
"""Return the global size of the data"""
return self.dat.dataset.layout_vec.getSizes()[1]
def inner(self, other):
"""Return the l2-inner product of self with other"""
return self.dat.inner(other.dat)
def gather(self, global_indices=None):
"""Gather a :class:`Vector` to all processes
:arg global_indices: the globally numbered indices to gather
(should be the same on all processes). If
`None`, gather the entire :class:`Vector`."""
if global_indices is None:
N = self.size()
v = PETSc.Vec().createSeq(N, comm=PETSc.COMM_SELF)
is_ = PETSc.IS().createStride(N, 0, 1, comm=PETSc.COMM_SELF)
else:
global_indices = np.asarray(global_indices, dtype=np.int32)
N = len(global_indices)
v = PETSc.Vec().createSeq(N, comm=PETSc.COMM_SELF)
is_ = PETSc.IS().createGeneral(global_indices, comm=PETSc.COMM_SELF)
with self.dat.vec_ro as vec:
vscat = PETSc.Scatter().create(vec, is_, v, None)
vscat.scatterBegin(vec, v, addv=PETSc.InsertMode.INSERT_VALUES)
vscat.scatterEnd(vec, v, addv=PETSc.InsertMode.INSERT_VALUES)
return v.METHOD_NAME
def __setitem__(self, idx, value):
"""Set a value or values in the local data
:arg idx: the local idx, or indices to set.
:arg value: the value, or values to give them."""
self.dat.data[idx] = value
def __getitem__(self, idx):
"""Return a value or values in the local data
:arg idx: the local idx, or indices to set."""
return self.dat.data_ro[idx]
def __len__(self):
"""Return the length of the local data (not including ghost points)"""
return self.local_size() |
1,020 | decode event data message | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
"""
FILE: encode_and_decode_event_data_message_async.py
DESCRIPTION:
This sample demonstrates the following:
- Authenticating an async SchemaRegistryClient to be used by the JsonSchemaEncoder.
- Registering a schema with the SchemaRegistryClient.
- Passing in content, schema ID, and EventData class to the JsonSchemaEncoder, which will return an
EventData object containing encoded content and corresponding content type.
- Passing in an `EventData` object with `body` set to encoded content and `content_type`
set to JSON Schema Format MIME type and schema ID to the JsonSchemaEncoder for decoding content.
USAGE:
python encode_and_decode_event_data_message_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_TENANT_ID - The ID of the service principal's tenant. Also called its 'directory' ID.
2) AZURE_CLIENT_ID - The service principal's client ID. Also called its 'application' ID.
3) AZURE_CLIENT_SECRET - One of the service principal's client secrets.
4) SCHEMAREGISTRY_JSON_FULLY_QUALIFIED_NAMESPACE - The schema registry fully qualified namespace,
which should follow the format: `<your-namespace>.servicebus.windows.net`
5) SCHEMAREGISTRY_GROUP - The name of the JSON schema group.
This example uses ClientSecretCredential, which requests a token from Azure Active Directory.
For more information on ClientSecretCredential, see:
https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity.aio.clientsecretcredential?view=azure-python
"""
import os
import asyncio
import json
from typing import cast, Iterator
from azure.identity.aio import ClientSecretCredential
from azure.schemaregistry.aio import SchemaRegistryClient
from azure.schemaregistry.encoder.jsonencoder.aio import JsonSchemaEncoder
from azure.schemaregistry.encoder.jsonencoder import JsonSchemaDraftIdentifier
from azure.eventhub import EventData
TENANT_ID=os.environ['AZURE_TENANT_ID']
CLIENT_ID=os.environ['AZURE_CLIENT_ID']
CLIENT_SECRET=os.environ['AZURE_CLIENT_SECRET']
SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE = os.environ['SCHEMAREGISTRY_JSON_FULLY_QUALIFIED_NAMESPACE']
GROUP_NAME = os.environ['SCHEMAREGISTRY_GROUP']
SCHEMA_JSON = {
"$id": "https://example.com/person.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Person",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Person's name."
},
"favorite_color": {
"type": "string",
"description": "Favorite color."
},
"favorite_number": {
"description": "Favorite number.",
"type": "integer",
}
}
}
SCHEMA_STRING = json.dumps(SCHEMA_JSON)
token_credential = ClientSecretCredential(
tenant_id=TENANT_ID,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET
)
async def pre_register_schema(schema_registry: SchemaRegistryClient):
schema_properties = await schema_registry.register_schema(
group_name=GROUP_NAME,
name=cast(str, SCHEMA_JSON['title']),
definition=SCHEMA_STRING,
format="Json"
)
return schema_properties.id
async def encode_to_event_data_message(encoder: JsonSchemaEncoder, schema_id: str):
dict_content_ben = {"name": "Ben", "favorite_number": 7, "favorite_color": "red"}
dict_content_alice = {"name": "Alice", "favorite_number": 15, "favorite_color": "green"}
# Schema would be automatically registered into Schema Registry and cached locally.
event_data_ben = await encoder.encode(
dict_content_ben, schema_id=schema_id, message_type=EventData
)
# The second call won't trigger a service call.
event_data_alice = await encoder.encode(
dict_content_alice, schema_id=schema_id, message_type=EventData
)
print("Encoded content is: ", next(cast(Iterator[bytes], event_data_ben.body)))
print("Encoded content is: ", next(cast(Iterator[bytes], event_data_alice.body)))
print("Encoded content type is: ", event_data_ben.content_type)
print("Encoded content type is: ", event_data_alice.content_type)
return [event_data_ben, event_data_alice]
async def METHOD_NAME(encoder, event_data):
# encoder.decode would extract the schema id from the content_type,
# retrieve schema from Schema Registry and cache the schema locally.
# If the schema id is in the local cache, the call won't trigger a service call.
decoded_content = await encoder.decode(event_data)
print("Decoded content is: ", decoded_content)
return decoded_content
async def main():
schema_registry = SchemaRegistryClient(
fully_qualified_namespace=SCHEMAREGISTRY_FULLY_QUALIFIED_NAMESPACE,
credential=token_credential,
)
encoder = JsonSchemaEncoder(
client=schema_registry, validate=JsonSchemaDraftIdentifier.DRAFT2020_12
)
schema_id = await pre_register_schema(schema_registry)
event_data_ben, event_data_alice = await encode_to_event_data_message(encoder, schema_id)
decoded_content_ben = await METHOD_NAME(encoder, event_data_ben)
decoded_content_alice = await METHOD_NAME(encoder, event_data_alice)
await encoder.close()
await token_credential.close()
if __name__ == "__main__":
asyncio.run(main()) |
1,021 | handle errors | """
What is this tool
=================
This script allows you to pull down data from YouTube for a given playlist.
How to use this tool
====================
You'll need a Google API key:
- Go to https://console.developers.google.com
- Enable the "YouTube Data API v3" API
- Under credentials, create an API key. (DON'T SHARE THIS KEY!)
- Set an environmental variable named GOOGLE_API_KEY with your API key as its value
Get playlist ID to pull data from. For example, if you have a link to a
playlist like the following:
https://www.youtube.com/playlist?list=UUrJhliKNQ8g0qoE_zvL8eVg
The playlist ID would be the following: UUrJhliKNQ8g0qoE_zvL8eVg
Finally, (after setting the API key as an environment variable) you would
run this tool (from the root of the data repo) as follows:
python tools/youtube.py -l UUrJhliKNQ8g0qoE_zvL8eVg
Running this command will generate a new category/event folder with associated
JSON files. The folder will be named something funky like:
category-123456789.34567890
You will need to update this folder's name and files with the rest of
the appropriate information before submitting the change for a PR.
This tool can also format a category's or event's content in a more appropriate
style for our data repo. To use this feature, use the following:
python youtube.py -p path/to/event
This can save you a lot of time when renaming thumbnails!
"""
import argparse
import copy
import json
import os
import re
import sys
sys.path.insert(0, '.')
import time
import uuid
from urllib.parse import urlencode
import requests
from tools.constants import JSON_FORMAT_KWARGS
from tools.utils import slugify
ENV_VAR_API_KEY = 'GOOGLE_API_KEY'
YOUTUBE_VIDEO_TEMPLATE = 'https://www.youtube.com/watch?v={}'.format
YOUTUBE_THUMBNAIL_URL_TEMPLATE = 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format
URL_PATTERNS = {
re.compile(r'https://www.youtube.com/watch\?v=(\S*)'),
re.compile(r'http://youtu.be/(\S*)'),
}
class UrlStub:
def __init__(self, stub, default_query_parts):
self.stub = stub
self.default_query_parts = default_query_parts
def build(self, query_parts):
parts = copy.deepcopy(self.default_query_parts)
parts.update(query_parts)
query_string = urlencode(parts)
return self.stub + '?' + query_string
PLAY_LIST_ITEMS = UrlStub(
stub='https://www.googleapis.com/youtube/v3/playlistItems',
default_query_parts={
'part': 'snippet',
'maxResults': '50',
},
)
BASE_VIDEO_BLOB = {
'description': '',
'speakers': [],
'thumbnail_url': '',
'title': '',
'recorded': '',
'videos': [],
'language': '',
}
def METHOD_NAME(response_dict):
if 'error' not in response_dict:
return
error = response_dict.get('error')
raise RuntimeError(error.get('message'))
def get_response_dict_for_play_list(query_parts):
url = PLAY_LIST_ITEMS.build(query_parts)
response_dict = requests.get(url).json()
METHOD_NAME(response_dict)
return response_dict
def fetch_list(api_key, play_list_id):
query_parts = {
'playlistId': play_list_id,
'key': api_key,
}
response_dict = get_response_dict_for_play_list(query_parts)
next_page_token = response_dict.get('nextPageToken')
total_results = response_dict.get('pageInfo', {}).get('totalResults')
print('Found {} results. Gathering them now .'.format(total_results), end='')
items = response_dict.get('items', [])
while next_page_token:
query_parts.update({'pageToken': next_page_token})
response_dict = get_response_dict_for_play_list(query_parts)
items.extend(response_dict.get('items', []))
next_page_token = response_dict.get('nextPageToken')
print('.', end='', flush=True)
print(' Done. Parsing results ...')
# pull data from api structures
snippets = []
for item in items:
snippet = item.get('snippet', {})
video_id = snippet.get('resourceId', {}).get('videoId')
snippets.append({
'title': snippet.get('title'),
'description': snippet.get('description'),
'videos': [{
'type': 'youtube',
'url': YOUTUBE_VIDEO_TEMPLATE(video_id),
}],
'thumbnail_url': snippet.get('thumbnails', {}).get('high', {}).get('url'),
})
# build pyvideo compliant data structures
results = []
for snippet in snippets:
pyvideo_blob = copy.deepcopy(BASE_VIDEO_BLOB)
pyvideo_blob.update(snippet)
results.append(pyvideo_blob)
print('Done parsing results. Writing files to disk ', end='')
# make category dir
category = 'category-{}'.format(time.time())
os.makedirs(os.path.join(category, 'videos'))
with open(os.path.join(category, 'category.json') , 'w') as fp:
json.dump({'title': ''}, fp, **JSON_FORMAT_KWARGS)
print('.', end='', flush=True)
for result in results:
title = result.get('title')
file_name = slugify(title)
file_name = os.path.join(category, 'videos', file_name)
# add some randomness to the name if a file already exists with that name
if os.path.exists(file_name + '.json'):
file_name += '-{}.json'.format(str(uuid.uuid1())[:6])
else:
file_name += '.json'
with open(file_name, 'w') as fp:
json.dump(result, fp, **JSON_FORMAT_KWARGS)
print('.', end='', flush=True)
print(' Done.')
def get_api_key(api_key):
if api_key:
return api_key
api_key = os.environ.get(ENV_VAR_API_KEY)
return api_key
def parse_video_id(video_url):
for pattern in URL_PATTERNS:
match = pattern.match(video_url)
if match:
return match.group(1)
def normalize(path):
videos_dir_path = os.path.join(path, 'videos')
video_files = os.listdir(videos_dir_path)
for video_file in video_files:
video_file_path = os.path.join(videos_dir_path, video_file)
with open(video_file_path) as fp:
data = json.load(fp)
# -- Normalize Data --
# Get video id
for video_obj in data['videos']:
video_url = video_obj.get('url')
if not video_url:
continue
video_id = parse_video_id(video_url)
if video_id:
break
# Insert thumbnail url if video does not have one
if video_id and 'thumbnail_url' not in data:
data['thumbnail_url'] = YOUTUBE_THUMBNAIL_URL_TEMPLATE(video_id)
with open(video_file_path, 'w') as fp:
json.dump(data, fp, **JSON_FORMAT_KWARGS)
print('.', end='', flush=True)
def main():
print("""
This program is deprecated!!!
Instead use pyvideo_scrape (https://github.com/pyvideo/pyvideo_scrape)
Continue? yes/[no]
""")
stay = ("yes" == input().lower())
if not stay:
exit(0)
parser = argparse.ArgumentParser()
parser.add_argument(
'-k', '--api-key',
help='Can also be specified via the environment variable GOOGLE_API_KEY'
)
parser.add_argument('-l', '--list')
parser.add_argument(
'-p', '--path',
help='Path to event to normalize.'
)
args = parser.parse_args()
if args.list:
api_key = get_api_key(args.api_key)
if not api_key:
print('Please set an API key!')
parser.print_help()
sys.exit(0)
fetch_list(api_key, args.list)
elif args.path:
normalize(args.path)
else:
parser.print_help()
if __name__ == '__main__':
main()
|
1,022 | del optical image | import logging
from typing import Dict
import bottle
from sm.engine.db import DB
from sm.engine.es_export import ESExporter
from sm.engine.queue import QueuePublisher, SM_ANNOTATE, SM_DS_STATUS, SM_UPDATE, SM_LITHOPS
from sm.engine.errors import UnknownDSID, DSIsBusy
from sm.engine.config import SMConfig
from sm.rest.dataset_manager import SMapiDatasetManager, DatasetActionPriority
from sm.rest.utils import NOT_EXIST, INTERNAL_ERROR, body_to_json, OK
BUSY = {'status_code': 409, 'status': 'dataset_busy'}
sm_config: Dict
logger = logging.getLogger('api')
app = bottle.Bottle()
def init(sm_config_):
global sm_config # pylint: disable=global-statement
sm_config = sm_config_
def _create_queue_publisher(qdesc):
config = SMConfig.get_conf()
return QueuePublisher(config['rabbitmq'], qdesc, logger)
def _create_dataset_manager(db):
return SMapiDatasetManager(
db=db,
es=ESExporter(db, sm_config),
annot_queue=_create_queue_publisher(SM_ANNOTATE),
update_queue=_create_queue_publisher(SM_UPDATE),
lit_queue=_create_queue_publisher(SM_LITHOPS),
status_queue=_create_queue_publisher(SM_DS_STATUS),
logger=logger,
)
def sm_modify_dataset(request_name):
def _modify(handler):
def _func(ds_id=None):
try:
params = body_to_json(bottle.request)
logger.info(f'Received {request_name} request: {params}')
ds_man = _create_dataset_manager(DB())
res = handler(ds_man, ds_id, params)
return {'status': OK['status'], 'ds_id': ds_id or res.get('ds_id', None)}
except UnknownDSID as e:
logger.warning(e)
bottle.response.status = NOT_EXIST['status_code']
return {'status': NOT_EXIST['status'], 'ds_id': ds_id}
except DSIsBusy as e:
logger.warning(e)
bottle.response.status = BUSY['status_code']
return {'status': BUSY['status'], 'ds_id': ds_id}
except Exception as e:
logger.exception(e)
bottle.response.status = INTERNAL_ERROR['status_code']
return {'status': INTERNAL_ERROR['status'], 'ds_id': ds_id}
return _func
return _modify
@app.post('/<ds_id>/add')
@app.post('/add')
@sm_modify_dataset('ADD')
def add(ds_man, ds_id=None, params=None):
"""
:param ds_man: rest.SMapiDatasetManager
:param ds_id: string
:param params: {
doc {
name
input_path
upload_dt
metadata
is_public
(ds_config keys from sm.engine.dataset.FLAT_DS_CONFIG_KEYS)
}
priority
force
del_first
email
}
"""
doc = params.get('doc', None)
if not doc:
msg = 'No input to create a dataset'
logger.info(msg)
raise Exception(msg)
if ds_id:
doc['id'] = ds_id
ds_id = ds_man.add(
doc=doc,
del_first=params.get('del_first', False),
force=params.get('force', False),
email=params.get('email', None),
priority=params.get('priority', DatasetActionPriority.DEFAULT),
use_lithops=params.get('use_lithops', False),
perform_enrichment=params.get('perform_enrichment', False),
)
return {'ds_id': ds_id}
@app.post('/<ds_id>/update')
@sm_modify_dataset('UPDATE')
def update(ds_man, ds_id, params):
"""
:param ds_man: rest.SMapiDatasetManager
:param ds_id: string
:param params: {
doc {
name
input_path
upload_dt
metadata
config
is_public
submitter_id
group_id
project_ids
}
async_es_update
}
:return:
"""
doc = params.get('doc', None)
force = params.get('force', False)
async_es_update = params.get('async_es_update', False)
if not doc and not force:
logger.info(f'Nothing to update for "{ds_id}"')
else:
priority = params.get('priority', DatasetActionPriority.STANDARD)
ds_man.update(
ds_id=ds_id, doc=doc, force=force, priority=priority, async_es_update=async_es_update
)
@app.post('/<ds_id>/delete')
@sm_modify_dataset('DELETE')
def delete(ds_man, ds_id, params):
"""
:param ds_man: rest.SMapiDatasetManager
:param ds_id: string
:param params: {
del_raw
force
}
:return:
"""
del_raw = params.get('del_raw', False)
force = params.get('force', False)
ds_man.delete(ds_id=ds_id, del_raw_data=del_raw, force=force)
@app.post('/<ds_id>/add-optical-image')
@sm_modify_dataset('ADD_OPTICAL_IMAGE')
def add_optical_image(ds_man, ds_id, params):
"""
:param ds_man: rest.SMapiDatasetManager
:param ds_id: string
:param params: {
url
transform
}
:return:
"""
ds_man.add_optical_image(ds_id, params['url'], params['transform'])
@app.post('/<ds_id>/del-optical-image')
@sm_modify_dataset('DEL_OPTICAL_IMAGE')
def METHOD_NAME(ds_man, ds_id, params): # pylint: disable=unused-argument
"""
:param ds_man: rest.SMapiDatasetManager
:param ds_id: string
:param params: {}
:return:
"""
ds_man.METHOD_NAME(ds_id) |
1,023 | get data | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
from re import search, split
import requests
from cve_bin_tool.cve_scanner import CVEData
from cve_bin_tool.log import LOGGER
from cve_bin_tool.output_engine.util import ProductInfo, format_output
RH_CVE_API = "https://access.redhat.com/hydra/rest/securitydata/cve"
class RedhatCVETracker:
def __init__(self, distro_name: str, distro_codename: str):
self.distro_name = distro_name
self.distro_codename = distro_codename
def cve_info(
self,
all_cve_data: dict[ProductInfo, CVEData],
):
"""Produces the available fixes' info"""
cve_data = format_output(all_cve_data, None)
for cve in cve_data:
if cve["cve_number"] != "UNKNOWN":
json_data = self.METHOD_NAME(cve["cve_number"], cve["product"])
try:
if not json_data:
raise KeyError
package_state = json_data["package_state"]
affected_releases = json_data["affected_release"]
no_fix = True
for package in affected_releases:
if (
package["product_name"]
== f"Red Hat Enterprise Linux {self.distro_codename}"
):
package_data = self.parse_package_data(package["package"])
LOGGER.info(
f'{cve["product"]}: {cve["cve_number"]} - Status: Fixed - Fixed package: {package_data}'
)
no_fix = False
for package in package_state:
if (
package["product_name"]
== f"Red Hat Enterprise Linux {self.distro_codename}"
):
package_data = self.parse_package_data(
package["package_name"]
)
LOGGER.info(
f'{cve["product"]}: {cve["cve_number"]} - Status: {package["fix_state"]} - Related package: {package_data}'
)
no_fix = False
if no_fix:
LOGGER.info(
f'{cve["product"]}: No known fix for {cve["cve_number"]}.'
)
except (KeyError, TypeError):
if cve["cve_number"] != "UNKNOWN":
LOGGER.info(
f'{cve["product"]}: No known fix for {cve["cve_number"]}.'
)
def METHOD_NAME(self, cve_number: str, product: str):
try:
full_query = f"{RH_CVE_API}/{cve_number}.json"
# timeout = 300s = 5 minutes. This is total guess for a valid timeout.
return requests.get(full_query, timeout=300).json()
except requests.HTTPError as e:
LOGGER.debug(e)
def parse_package_data(self, package_data: str) -> str:
"""
Parses package name and version data from the package data provided by Red Hat.
Sample input:
nodejs:12-8040020210817133458.522a0ee4
edk2-0:20210527gite1999b264f1f-3.el8
dnsmasq-0:2.79-13.el8_3.1
Sample output:
nodejs v12
edk v2
dnsmasq v2.79
"""
parsed_package_data = ""
package_name = split(r"-\d", package_data, 1)[0]
if ":" in package_name:
package_name, package_version = split(":", package_name)
package_version = search(r"\d+", package_version)
if package_version:
package_version = package_version.group(0)
parsed_package_data = f"{package_name} v{package_version}"
else:
parsed_package_data = package_name
match = search(r"\d+\.\d+", package_data)
if match:
package_version = match.group(0)
parsed_package_data += f" v{package_version}"
return parsed_package_data |
1,024 | add brightness | # -*- coding: utf-8 -*-
"""
.. _vision__image_dataset_drift:
Image Dataset Drift
*******************
This notebooks provides an overview for using and understanding the image dataset
drift check, used to detect drift in simple image properties between train and
test datasets.
**Structure:**
* `What Is Image Dataset Drift? <#what-is-image-dataset-drift>`__
* `Which Image Properties Are Used? <#which-image-properties-are-used>`__
* `Loading The Data <#loading-the-data>`__
* `Run The Check <#run-the-check>`__
* `Define a Condition <#define-a-condition>`__
What Is Image Dataset Drift?
----------------------------
Drift is simply a change in the distribution of data over time, and it is
also one of the top reasons why machine learning model's performance degrades
over time.
Image dataset drift is a drift that occurs in more than one image property at a time,
and may even affect the relationships between those properties, which are undetectable by
univariate drift methods.
For more information on drift, please visit our :ref:`Drift Guide <drift_user_guide>`.
How Deepchecks Detects Dataset Drift
------------------------------------
This check detects multivariate drift by using :ref:`a domain classifier <drift_detection_by_domain_classifier>`.
Other methods to detect drift include :ref:`univariate measures <drift_detection_by_univariate_measure>`
which is used in other checks, such as :ref:`vision__image_property_drift` check.
Using Properties to Detect Image Drift
--------------------------------------
In computer vision specifically, we can't measure drift on the images directly, as the individual pixel has little
value when estimating drift. Therefore, we calculate drift on different
:ref:`properties of the image<vision__properties_guide>`,
on which we can directly measure drift.
Which Image Properties Are Used?
--------------------------------
============================== ==========
Property name What is it
============================== ==========
Aspect Ratio Ratio between height and width of image (height / width)
Area Area of image in pixels (height * width)
Brightness Average intensity of image pixels. Color channels have different weights according to
RGB-to-Grayscale formula
RMS Contrast Contrast of image, calculated by standard deviation of pixels
Mean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in
comparison to the other channels [r / (r + g + b)].
Mean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in
comparison to the other channels [g / (r + g + b)].
Mean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in
comparison to the other channels [b / (r + g + b)].
============================== ==========
"""
#%%
# Imports
# -------
#
# .. note::
# In this example, we use the pytorch version of the coco dataset and model. In order to run this example using
# tensorflow, please change the import statements to::
#
# from deepchecks.vision.datasets.detection.coco_tensorflow import load_dataset
import numpy as np
from deepchecks.vision.checks import ImageDatasetDrift
from deepchecks.vision.datasets.detection.coco_torch import load_dataset
#%%
# Loading the data
# ----------------
train_ds = load_dataset(train=True, object_type='VisionData')
test_ds = load_dataset(train=False, object_type='VisionData')
#%%
# Run the check
# -------------
# without drift
# ^^^^^^^^^^^^^
check = ImageDatasetDrift()
result = check.run(train_dataset=train_ds, test_dataset=test_ds)
result
#%%
# To display the results in an IDE like PyCharm, you can use the following code:
# result.show_in_window()
#%%
# The result will be displayed in a new window.
#%%
# Insert drift
# ^^^^^^^^^^^^
#
# Now, we will define a custom collate function that will insert a drift to the training set.
def METHOD_NAME(img):
reverse = 255 - img
addition_of_brightness = (reverse * 0.2).astype(int)
return img + addition_of_brightness
#%%
drifted_train_ds = load_dataset(train=True, object_type='VisionData')
def created_drifted_collate_function(collate_fn):
def drifted_collate_function(batch):
data_dict = collate_fn(batch)
data_dict['images'] = [METHOD_NAME(np.array(img)) for img in data_dict['images']]
return data_dict
return drifted_collate_function
drifted_train_ds._batch_loader.collate_fn = created_drifted_collate_function(drifted_train_ds._batch_loader.collate_fn)
#%%
# Run the check again
# ^^^^^^^^^^^^^^^^^^^
check = ImageDatasetDrift()
result = check.run(train_dataset=drifted_train_ds, test_dataset=test_ds)
result
#%%
# Define a Condition
# ------------------
# Now, we will define a condition that the maximum drift score is less than a certain threshold. In this example we will
# set the threshold at 0.2.
# In order to demonstrate the condition, we will use again the original (not drifted) train dataset.
check = ImageDatasetDrift().add_condition_drift_score_less_than(0.2)
result = check.run(train_dataset=train_ds, test_dataset=test_ds).show(show_additional_outputs=False)
result |
1,025 | bytes available | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @File: F2TTL\ARCOM.py
# @Author: Niccolo' Bonacchi (@nbonacchi)
# @Date: Tuesday, December 7th 2021, 12:01:15 pm
"""
----------------------------------------------------------------------------
This file is part of the Sanworks ArCOM repository
Copyright (C) 2021 Sanworks LLC, Rochester, New York, USA
----------------------------------------------------------------------------
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3.
This program is distributed WITHOUT ANY WARRANTY and without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import serial
class ArCOM(object):
def __init__(self, serialPortName, baudRate):
self.serialObject = 0
self.typeNames = (
"uint8",
"int8",
"char",
"uint16",
"int16",
"uint32",
"int32",
"single",
)
self.typeBytes = (1, 1, 1, 2, 2, 4, 4)
self.typeSymbols = ("B", "b", "c", "H", "h", "L", "l")
self.serialObject = serial.Serial(serialPortName, timeout=10, rtscts=True)
def open(self, serialPortName, baudRate):
self.serialObject = serial.Serial(serialPortName, baudRate, timeout=10)
def close(self):
self.serialObject.close()
def METHOD_NAME(self):
return self.serialObject.inWaiting()
def write(self, *arg):
"""
Raises:
ArCOMError: [description]
"""
nTypes = int(len(arg) / 2)
argPos = 0
messageBytes = b""
for i in range(0, nTypes):
data = arg[argPos]
argPos += 1
datatype = arg[argPos]
argPos += 1 # not needed
if datatype not in self.typeNames:
raise ArCOMError("Error: " + datatype + " is not a data type supported by ArCOM.")
# datatypePos = self.typeNames.index(datatype) # Not used?
if type(data).__module__ == np.__name__:
NPdata = data.astype(datatype)
else:
NPdata = np.array(data, dtype=datatype)
messageBytes += NPdata.tobytes()
self.serialObject.write(messageBytes)
def read(self, *arg): # Read an array of values
nTypes = int(len(arg) / 2)
argPos = 0
outputs = []
for i in range(0, nTypes):
nValues = arg[argPos]
argPos += 1
datatype = arg[argPos]
if (datatype in self.typeNames) is False:
raise ArCOMError("Error: " + datatype + " is not a data type supported by ArCOM.")
argPos += 1
typeIndex = self.typeNames.index(datatype)
byteWidth = self.typeBytes[typeIndex]
nBytes2Read = nValues * byteWidth
messageBytes = self.serialObject.read(nBytes2Read)
nBytesRead = len(messageBytes)
if nBytesRead < nBytes2Read:
raise ArCOMError(
"Error: serial port timed out. "
+ str(nBytesRead)
+ " bytes read. Expected "
+ str(nBytes2Read)
+ " byte(s)."
)
thisOutput = np.frombuffer(messageBytes, datatype)
outputs.append(thisOutput)
if nTypes == 1:
outputs = thisOutput
return outputs
def __del__(self):
self.serialObject.close()
class ArCOMError(Exception):
pass
if __name__ == "__main__":
import struct
port = "/dev/ttyACM3"
nsamples = 6
# Hello ser
ser = serial.Serial(port, 115200, timeout=1)
ser.write(b"C")
print(int.from_bytes(ser.read(1), byteorder="little", signed=False))
ser.write(struct.pack("c", b"#"))
print(int.from_bytes(ser.read(1), byteorder="little", signed=False))
s = 0
samples = []
while s < nsamples:
ser.write(b"V")
response = ser.read(4)
samples.append(int.from_bytes(response, byteorder="little", signed=False))
s += 1
print(samples)
# ser.write(struct.pack('cI', b"V", nsamples))
ser.write(b"V" + int.to_bytes(nsamples, 4, byteorder="little", signed=False))
serout = ser.read(nsamples * 2)
print(serout)
print(np.frombuffer(serout, "uint16"))
ser.close()
# Hello arc
arc = ArCOM(port, 115200)
arc.write(ord("C"), "uint8")
print(arc.read(1, "uint8"))
arc.write(ord("#"), "uint8")
print(arc.read(1, "uint8"))
arc.read(1, "uint8")
# arc.write(ord("V"), "uint8", nsamples, "uint32")
arc.write(ord("V"), "uint8")
arcout = arc.read(1, "uint16")
print(arcout)
del arc |
1,026 | get me | from fastapi import APIRouter, Depends, HTTPException, status
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from dispatch.config import DISPATCH_AUTH_REGISTRATION_ENABLED
from dispatch.auth.permissions import (
OrganizationMemberPermission,
PermissionsDependency,
)
from dispatch.auth.service import CurrentUser
from dispatch.exceptions import (
InvalidConfigurationError,
InvalidPasswordError,
InvalidUsernameError,
)
from dispatch.database.core import DbSession
from dispatch.database.service import CommonParameters, search_filter_sort_paginate
from dispatch.enums import UserRoles
from dispatch.models import OrganizationSlug, PrimaryKey
from dispatch.organization.models import OrganizationRead
from .models import (
UserLogin,
UserLoginResponse,
UserOrganization,
UserPagination,
UserRead,
UserRegister,
UserRegisterResponse,
UserCreate,
UserUpdate,
)
from .service import get, get_by_email, update, create
auth_router = APIRouter()
user_router = APIRouter()
@user_router.get(
"",
dependencies=[
Depends(
PermissionsDependency(
[
OrganizationMemberPermission,
]
)
)
],
response_model=UserPagination,
)
def get_users(organization: OrganizationSlug, common: CommonParameters):
"""Gets all organization users."""
common["filter_spec"] = {
"and": [{"model": "Organization", "op": "==", "field": "slug", "value": organization}]
}
items = search_filter_sort_paginate(model="DispatchUser", **common)
return {
"items": [
{
"id": u.id,
"email": u.email,
"projects": u.projects,
"role": u.get_organization_role(organization),
}
for u in items["items"]
],
"itemsPerPage": items["itemsPerPage"],
"page": items["page"],
"total": items["total"],
}
@user_router.post(
"",
response_model=UserRead,
)
def create_user(
user_in: UserCreate,
organization: OrganizationSlug,
db_session: DbSession,
current_user: CurrentUser,
):
"""Creates a new user."""
user = get_by_email(db_session=db_session, email=user_in.email)
if user:
raise ValidationError(
[
ErrorWrapper(
InvalidConfigurationError(msg="A user with this email already exists."),
loc="email",
)
],
model=UserCreate,
)
current_user_organization_role = current_user.get_organization_role(organization)
if current_user_organization_role != UserRoles.owner:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=[
{
"msg": "You don't have permissions to create a new user for this organization. Please, contact the organization's owner."
}
],
)
user = create(db_session=db_session, organization=organization, user_in=user_in)
return user
@user_router.get("/{user_id}", response_model=UserRead)
def get_user(db_session: DbSession, user_id: PrimaryKey):
"""Get a user."""
user = get(db_session=db_session, user_id=user_id)
if not user:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=[{"msg": "A user with this id does not exist."}],
)
return user
@user_router.put(
"/{user_id}",
response_model=UserRead,
)
def update_user(
db_session: DbSession,
user_id: PrimaryKey,
organization: OrganizationSlug,
user_in: UserUpdate,
current_user: CurrentUser,
):
"""Update a user."""
user = get(db_session=db_session, user_id=user_id)
if not user:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=[{"msg": "A user with this id does not exist."}],
)
if user_in.role:
user_organization_role = user.get_organization_role(organization)
if user_organization_role != user_in.role:
current_user_organization_role = current_user.get_organization_role(organization)
if current_user_organization_role != UserRoles.owner:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=[
{
"msg": "You don't have permissions to update the user's role. Please, contact the organization's owner."
}
],
)
# add organization information
user_in.organizations = [
UserOrganization(role=user_in.role, organization=OrganizationRead(name=organization))
]
return update(db_session=db_session, user=user, user_in=user_in)
@auth_router.get("/me", response_model=UserRead)
def METHOD_NAME(
*,
db_session: DbSession,
current_user: CurrentUser,
):
return current_user
@auth_router.post("/login", response_model=UserLoginResponse)
def login_user(
user_in: UserLogin,
organization: OrganizationSlug,
db_session: DbSession,
):
user = get_by_email(db_session=db_session, email=user_in.email)
if user and user.check_password(user_in.password):
projects = []
for user_project in user.projects:
projects.append(
{
"project": user_project.project,
"default": user_project.default,
"role": user_project.role,
}
)
return {"projects": projects, "token": user.token}
raise ValidationError(
[
ErrorWrapper(
InvalidUsernameError(msg="Invalid username."),
loc="username",
),
ErrorWrapper(
InvalidPasswordError(msg="Invalid password."),
loc="password",
),
],
model=UserLogin,
)
def register_user(
user_in: UserRegister,
organization: OrganizationSlug,
db_session: DbSession,
):
user = get_by_email(db_session=db_session, email=user_in.email)
if user:
raise ValidationError(
[
ErrorWrapper(
InvalidConfigurationError(msg="A user with this email already exists."),
loc="email",
)
],
model=UserRegister,
)
user = create(db_session=db_session, organization=organization, user_in=user_in)
return user
if DISPATCH_AUTH_REGISTRATION_ENABLED:
register_user = auth_router.post("/register", response_model=UserRegisterResponse)(
register_user
) |
1,027 | send ipc | # SPDX-License-Identifier: MIT
import struct
from ..common import *
from ...utils import *
from ..asc.base import *
class AFKEPMessage(Register64):
TYPE = 63, 48
class AFKEP_GetBuf(AFKEPMessage):
TYPE = 63, 48, Constant(0x89)
SIZE = 31, 16
TAG = 15, 0
class AFKEP_GetBuf_Ack(AFKEPMessage):
TYPE = 63, 48, Constant(0xa1)
DVA = 47, 0
class AFKEP_InitRB(AFKEPMessage):
OFFSET = 47, 32
SIZE = 31, 16
TAG = 15, 0
class AFKEP_Send(AFKEPMessage):
TYPE = 63, 48, Constant(0xa2)
WPTR = 31, 0
class AFKEP_Recv(AFKEPMessage):
TYPE = 63, 48, Constant(0x85)
WPTR = 31, 0
class AFKEP_Init(AFKEPMessage):
TYPE = 63, 48, Constant(0x80)
class AFKEP_Init_Ack(AFKEPMessage):
TYPE = 63, 48, Constant(0xa0)
class AFKEP_Start(AFKEPMessage):
TYPE = 63, 48, Constant(0xa3)
class AFKEP_Start_Ack(AFKEPMessage):
TYPE = 63, 48, Constant(0x86)
class AFKEP_Shutdown(AFKEPMessage):
TYPE = 63, 48, Constant(0xc0)
class AFKEP_Shutdown_Ack(AFKEPMessage):
TYPE = 63, 48, Constant(0xc1)
class AFKError(Exception):
pass
class AFKRingBuf(Reloadable):
BLOCK_SIZE = 0x40
def __init__(self, ep, base, size):
self.ep = ep
self.base = base
bs, unk = struct.unpack("<II", self.read_buf(0, 8))
assert (bs + 3 * self.BLOCK_SIZE) == size
self.bufsize = bs
self.rptr = 0
self.wptr = 0
def read_buf(self, off, size):
return self.ep.iface.readmem(self.base + off, size)
def write_buf(self, off, data):
return self.ep.iface.writemem(self.base + off, data)
def get_rptr(self):
return self.ep.asc.p.read32(self.base + self.BLOCK_SIZE)
def get_wptr(self):
return self.ep.asc.p.read32(self.base + 2 * self.BLOCK_SIZE)
def update_rptr(self, rptr):
self.ep.asc.p.write32(self.base + self.BLOCK_SIZE, self.rptr)
def update_wptr(self, rptr):
self.ep.asc.p.write32(self.base + 2 * self.BLOCK_SIZE, self.wptr)
def read(self):
self.wptr = self.get_wptr()
while self.wptr != self.rptr:
hdr = self.read_buf(3 * self.BLOCK_SIZE + self.rptr, 16)
self.rptr += 16
magic, size = struct.unpack("<4sI", hdr[:8])
assert magic in [b"IOP ", b"AOP "]
if size > (self.bufsize - self.rptr):
hdr = self.read_buf(3 * self.BLOCK_SIZE, 16)
self.rptr = 16
magic, size = struct.unpack("<4sI", hdr[:8])
assert magic in [b"IOP ", b"AOP "]
payload = self.read_buf(3 * self.BLOCK_SIZE + self.rptr, size)
self.rptr = (align_up(self.rptr + size, self.BLOCK_SIZE)) % self.bufsize
self.update_rptr(self.rptr)
yield hdr[8:] + payload
self.wptr = self.get_wptr()
self.update_rptr(self.rptr)
def write(self, data):
hdr2, data = data[:8], data[8:]
self.rptr = self.get_rptr()
if self.wptr < self.rptr and self.wptr + 0x10 >= self.rptr:
raise AFKError("Ring buffer is full")
hdr = struct.pack("<4sI", b"IOP ", len(data)) + hdr2
self.write_buf(3 * self.BLOCK_SIZE + self.wptr, hdr)
if len(data) > (self.bufsize - self.wptr - 16):
if self.rptr < 0x10:
raise AFKError("Ring buffer is full")
self.write_buf(3 * self.BLOCK_SIZE, hdr)
self.wptr = 0
if self.wptr < self.rptr and self.wptr + 0x10 + len(data) >= self.rptr:
raise AFKError("Ring buffer is full")
self.write_buf(3 * self.BLOCK_SIZE + self.wptr + 0x10, data)
self.wptr = align_up(self.wptr + 0x10 + len(data), self.BLOCK_SIZE) % self.bufsize
self.update_wptr(self.wptr)
return self.wptr
class AFKRingBufEndpoint(ASCBaseEndpoint):
BASE_MESSAGE = AFKEPMessage
SHORT = "afkep"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.txq = None
self.rxq = None
self.iface = self.asc.iface
self.alive = False
self.started = False
self.iobuffer = None
self.verbose = 2
self.msgid = 0
def start(self):
self.send(AFKEP_Init())
@msg_handler(0xa0, AFKEP_Init_Ack)
def Init_Ack(self, msg):
self.alive = True
return True
@msg_handler(0x89, AFKEP_GetBuf)
def GetBuf(self, msg):
size = msg.SIZE * AFKRingBuf.BLOCK_SIZE
if self.iobuffer:
print("WARNING: trying to reset iobuffer!")
self.iobuffer, self.iobuffer_dva = self.asc.ioalloc(size)
self.asc.p.write32(self.iobuffer, 0xdeadbeef)
self.send(AFKEP_GetBuf_Ack(DVA=self.iobuffer_dva))
self.log(f"Buffer: phys={self.iobuffer:#x} dva={self.iobuffer_dva:#x} size={size:#x}")
return True
def stop(self):
self.log("Shutting down")
self.send(AFKEP_Shutdown())
while self.alive:
self.asc.work()
@msg_handler(0xc1, AFKEP_Shutdown_Ack)
def Shutdown_Ack(self, msg):
self.alive = False
self.log("Shutdown ACKed")
return True
@msg_handler(0x8a, AFKEP_InitRB)
def InitTX(self, msg):
self.txq = self.init_rb(msg)
if self.rxq and self.txq:
self.start_queues()
return True
@msg_handler(0x8b, AFKEP_InitRB)
def InitRX(self, msg):
self.rxq = self.init_rb(msg)
if self.rxq and self.txq:
self.start_queues()
return True
def init_rb(self, msg):
off = msg.OFFSET * AFKRingBuf.BLOCK_SIZE
size = msg.SIZE * AFKRingBuf.BLOCK_SIZE
return AFKRingBuf(self, self.iobuffer + off, size)
def start_queues(self):
self.send(AFKEP_Start())
@msg_handler(0x86, AFKEP_Start_Ack)
def Start_Ack(self, msg):
self.started = True
return True
@msg_handler(0x85, AFKEP_Recv)
def Recv(self, msg):
for data in self.rxq.read():
if self.verbose >= 3:
self.log(f"<RX rptr={self.rxq.rptr:#x}")
chexdump(data)
self.handle_ipc(data)
return True
def handle_ipc(self, data):
pass
def METHOD_NAME(self, data):
wptr = self.txq.write(data)
self.send(AFKEP_Send(WPTR = wptr)) |
1,028 | parse | # coding=utf-8
"""Provider code for Beyond-hd."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import convert_size
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class BeyondHDProvider(TorrentProvider):
"""Beyond-hd Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(BeyondHDProvider, self).__init__('Beyond-HD')
self.enable_cookies = True
self.cookies = ''
self.required_cookies = ('remember_web_[**long_hash**]',)
self.url = 'https://beyond-hd.me'
self.urls = {
'login': urljoin(self.url, 'login'),
'search': urljoin(self.url, 'torrents'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']
# Miscellaneous Options
# Cache
self.cache = tv.Cache(self)
def search(self, search_strings, *args, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
search_params = {
'categories[]': 2,
'sorting': 'created_at',
'qty': '100',
'direction': 'desc',
'doSearch': 'Search'
}
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params['search'] = search_string
if mode == 'season':
search_params['pack'] = 1
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.METHOD_NAME(response.text, mode)
return results
def METHOD_NAME(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
# Units
units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']
items = []
with BS4Parser(data, 'html5lib') as html:
if html.find('div', class_='table-torrents'):
theme = 'modern'
torrent_table = html.find('div', class_='table-torrents').find('table')
else:
theme = 'classic'
torrent_table = html.find('div', class_='table-responsive').find('table')
torrent_rows = torrent_table('tr') if torrent_table else []
labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]
# For the classic theme, the tr don't match the td.
if theme == 'classic':
del labels[3]
# Continue only if one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
for result in torrent_rows[1:]:
cells = result('td')
try:
if len(cells) < 3:
continue
link = cells[1].find('a')
download_url = urljoin(self.url, cells[2].find('a')['href'])
title = link.get_text(strip=True)
if not all([title, download_url]):
continue
seeders = int(cells[labels.index('S')].find('span').get_text())
leechers = int(cells[labels.index('L')].find('span').get_text())
# Filter unseeded torrent
if seeders < self.minseed:
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
' minimum seeders: {0}. Seeders: {1}',
title, seeders)
continue
torrent_size = cells[labels.index('Size')].find('span').get_text()
size = convert_size(torrent_size, units=units) or -1
pubdate_raw = cells[labels.index('Age')].find('span').get_text()
pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
return self.cookie_login('Login now')
def check_required_cookies(self):
"""
Check if we have the required cookies in the requests sessions object.
Meaning that we've already successfully authenticated once, and we don't need to go through this again.
Note! This doesn't mean the cookies are correct!
"""
return False
provider = BeyondHDProvider() |
1,029 | rmdir | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import gc
import cv2
import shutil
import os
import numpy as np
from opendr.engine.datasets import ExternalDataset
from opendr.engine.target import TrackingAnnotation
from opendr.perception.object_tracking_2d import SiamRPNLearner
from opendr.perception.object_tracking_2d.datasets import OTBTrainDataset
device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu'
def rmfile(path):
try:
os.remove(path)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def METHOD_NAME(_dir):
try:
shutil.rmtree(_dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
class TestSiamRPNLearner(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\n\n**********************************\nTEST SiamRPN Learner\n"
"**********************************")
cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "object_tracking_2d",
"siamrpn", "siamrpn_temp")
cls.learner = SiamRPNLearner(device=device, temp_path=cls.temp_dir, batch_size=1, n_epochs=1,
lr=1e-4, num_workers=1)
# Download all required files for testing
cls.learner.download(cls.temp_dir, mode="pretrained")
cls.learner.download(os.path.join(cls.temp_dir, "test_data"), mode="test_data")
@classmethod
def tearDownClass(cls):
print('Removing temporary directories for SiamRPN...')
# Clean up downloaded files
METHOD_NAME(os.path.join(cls.temp_dir, "siamrpn_opendr"))
METHOD_NAME(os.path.join(cls.temp_dir, "test_data"))
METHOD_NAME(os.path.join(cls.temp_dir))
del cls.learner
gc.collect()
print('Finished cleaning for SiamRPN...')
def test_fit(self):
print('Starting training test for SiamRPN...')
print(os.listdir(os.path.join(self.temp_dir, "test_data")))
training_dataset = OTBTrainDataset(root=os.path.join(self.temp_dir, "test_data"),
json_path=os.path.join(self.temp_dir, "test_data", "OTBtest.json"))
m = list(self.learner._model.collect_params().values())[1].data().asnumpy().copy()
self.learner.fit(dataset=training_dataset, verbose=True)
n = list(self.learner._model.collect_params().values())[1].data().asnumpy()
self.assertFalse(np.array_equal(m, n),
msg="Model parameters did not change after running fit.")
del training_dataset, m, n
gc.collect()
print('Finished training test for SiamRPN...')
def test_eval(self):
print('Starting evaluation test for SiamRPN...')
eval_dataset = ExternalDataset(os.path.join(self.temp_dir, "test_data"),
dataset_type="OTBtest")
self.learner.load(os.path.join(self.temp_dir, "siamrpn_opendr"))
results_dict = self.learner.eval(eval_dataset)
self.assertIsNotNone(results_dict['success'],
msg="Eval results dictionary not returned.")
del eval_dataset, results_dict
gc.collect()
print('Finished evaluation test for SiamRPN...')
def test_infer(self):
print('Starting inference test for SiamRPN...')
self.learner._model = None
self.learner.load(os.path.join(self.temp_dir, "siamrpn_opendr"))
img = cv2.imread(os.path.join(self.temp_dir, "test_data", "Basketball", "img", "0001.jpg"))
init_box = TrackingAnnotation(left=198, top=214, width=34, height=81, id=0, name=0)
self.assertIsNotNone(self.learner.infer(img, init_box=init_box),
msg="Returned empty TrackingAnnotation.")
del img
gc.collect()
print('Finished inference test for SiamRPN...')
def test_save_load(self):
print('Starting save/load test for SiamRPN...')
self.learner.save(os.path.join(self.temp_dir, "test_model"))
self.learner._model = None
self.learner.load(os.path.join(self.temp_dir, "test_model"))
self.assertIsNotNone(self.learner._model, "model is None after loading model.")
# Cleanup
METHOD_NAME(os.path.join(self.temp_dir, "test_model"))
print('Finished save/load test for SiamRPN...')
if __name__ == "__main__":
unittest.main() |
1,030 | put connection | from collections import deque
from typing import Deque, Dict, List, Optional, Tuple
from twisted.internet import defer
from twisted.internet.base import ReactorBase
from twisted.internet.defer import Deferred
from twisted.internet.endpoints import HostnameEndpoint
from twisted.python.failure import Failure
from twisted.web.client import (
URI,
BrowserLikePolicyForHTTPS,
ResponseFailed,
_StandardEndpointFactory,
)
from twisted.web.error import SchemeNotSupported
from scrapy.core.downloader.contextfactory import AcceptableProtocolsContextFactory
from scrapy.core.http2.protocol import H2ClientFactory, H2ClientProtocol
from scrapy.http.request import Request
from scrapy.settings import Settings
from scrapy.spiders import Spider
class H2ConnectionPool:
def __init__(self, reactor: ReactorBase, settings: Settings) -> None:
self._reactor = reactor
self.settings = settings
# Store a dictionary which is used to get the respective
# H2ClientProtocolInstance using the key as Tuple(scheme, hostname, port)
self._connections: Dict[Tuple, H2ClientProtocol] = {}
# Save all requests that arrive before the connection is established
self._pending_requests: Dict[Tuple, Deque[Deferred]] = {}
def get_connection(
self, key: Tuple, uri: URI, endpoint: HostnameEndpoint
) -> Deferred:
if key in self._pending_requests:
# Received a request while connecting to remote
# Create a deferred which will fire with the H2ClientProtocol
# instance
d: Deferred = Deferred()
self._pending_requests[key].append(d)
return d
# Check if we already have a connection to the remote
conn = self._connections.get(key, None)
if conn:
# Return this connection instance wrapped inside a deferred
return defer.succeed(conn)
# No connection is established for the given URI
return self._new_connection(key, uri, endpoint)
def _new_connection(
self, key: Tuple, uri: URI, endpoint: HostnameEndpoint
) -> Deferred:
self._pending_requests[key] = deque()
conn_lost_deferred: Deferred = Deferred()
conn_lost_deferred.addCallback(self._remove_connection, key)
factory = H2ClientFactory(uri, self.settings, conn_lost_deferred)
conn_d = endpoint.connect(factory)
conn_d.addCallback(self.METHOD_NAME, key)
d: Deferred = Deferred()
self._pending_requests[key].append(d)
return d
def METHOD_NAME(self, conn: H2ClientProtocol, key: Tuple) -> H2ClientProtocol:
self._connections[key] = conn
# Now as we have established a proper HTTP/2 connection
# we fire all the deferred's with the connection instance
pending_requests = self._pending_requests.pop(key, None)
while pending_requests:
d = pending_requests.popleft()
d.callback(conn)
return conn
def _remove_connection(self, errors: List[BaseException], key: Tuple) -> None:
self._connections.pop(key)
# Call the errback of all the pending requests for this connection
pending_requests = self._pending_requests.pop(key, None)
while pending_requests:
d = pending_requests.popleft()
d.errback(ResponseFailed(errors))
def close_connections(self) -> None:
"""Close all the HTTP/2 connections and remove them from pool
Returns:
Deferred that fires when all connections have been closed
"""
for conn in self._connections.values():
assert conn.transport is not None # typing
conn.transport.abortConnection()
class H2Agent:
def __init__(
self,
reactor: ReactorBase,
pool: H2ConnectionPool,
context_factory: BrowserLikePolicyForHTTPS = BrowserLikePolicyForHTTPS(),
connect_timeout: Optional[float] = None,
bind_address: Optional[bytes] = None,
) -> None:
self._reactor = reactor
self._pool = pool
self._context_factory = AcceptableProtocolsContextFactory(
context_factory, acceptable_protocols=[b"h2"]
)
self.endpoint_factory = _StandardEndpointFactory(
self._reactor, self._context_factory, connect_timeout, bind_address
)
def get_endpoint(self, uri: URI):
return self.endpoint_factory.endpointForURI(uri)
def get_key(self, uri: URI) -> Tuple:
"""
Arguments:
uri - URI obtained directly from request URL
"""
return uri.scheme, uri.host, uri.port
def request(self, request: Request, spider: Spider) -> Deferred:
uri = URI.fromBytes(bytes(request.url, encoding="utf-8"))
try:
endpoint = self.get_endpoint(uri)
except SchemeNotSupported:
return defer.fail(Failure())
key = self.get_key(uri)
d = self._pool.get_connection(key, uri, endpoint)
d.addCallback(lambda conn: conn.request(request, spider))
return d
class ScrapyProxyH2Agent(H2Agent):
def __init__(
self,
reactor: ReactorBase,
proxy_uri: URI,
pool: H2ConnectionPool,
context_factory: BrowserLikePolicyForHTTPS = BrowserLikePolicyForHTTPS(),
connect_timeout: Optional[float] = None,
bind_address: Optional[bytes] = None,
) -> None:
super().__init__(
reactor=reactor,
pool=pool,
context_factory=context_factory,
connect_timeout=connect_timeout,
bind_address=bind_address,
)
self._proxy_uri = proxy_uri
def get_endpoint(self, uri: URI):
return self.endpoint_factory.endpointForURI(self._proxy_uri)
def get_key(self, uri: URI) -> Tuple:
"""We use the proxy uri instead of uri obtained from request url"""
return "http-proxy", self._proxy_uri.host, self._proxy_uri.port |
1,031 | get total hits | """
===============
=== Purpose ===
===============
Extract a useful subset of the CDC data. Reads from `cdc` and `cdc_meta` (all
pages, and daily resolution) and writes to `cdc_extract` (selected pages, and
weekly resolution). The Epidata API is then used to read from `cdc_extract` and
update the `sensors` and `nowcasts` tables.
This is necessary because the `cdc` table is huge, and string matching is slow.
If runtime was not an issue, everything could (in theory) be done in pure SQL
using just the `cdc` and `cdc_meta` tables.
A similar preprocessing step is performed for the wikipedia dataset in
signal_update.py.
=======================
=== Data Dictionary ===
=======================
`cdc_extract` is the table where the data is stored.
+---------+---------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+---------+---------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| epiweek | int(11) | NO | MUL | NULL | |
| state | char(2) | NO | MUL | NULL | |
| num1 | int(11) | NO | | NULL | |
| num2 | int(11) | NO | | NULL | |
| num3 | int(11) | NO | | NULL | |
| num4 | int(11) | NO | | NULL | |
| num5 | int(11) | NO | | NULL | |
| num6 | int(11) | NO | | NULL | |
| num7 | int(11) | NO | | NULL | |
| num8 | int(11) | NO | | NULL | |
| total | int(11) | NO | | NULL | |
+---------+---------+------+-----+---------+----------------+
id: unique identifier for each record
epiweek: the epiweek during which the data was collected
state: where the data was collected (51 states, including DC)
num1: hits for pages like '%What You Should Know for the % Influenza Season%'
num2: hits for pages like '%What To Do If You Get Sick%'
num3: hits for pages like '%Flu Symptoms & Severity%'
num4: hits for pages like '%How Flu Spreads%'
num5: hits for pages like '%What You Should Know About Flu Antiviral Drugs%'
num6: hits for pages like '%Weekly US Map%'
num7: hits for pages like '%Basics%'
num8: hits for pages like '%Flu Activity & Surveillance%'
total: total number of hits for all CDC pages
=================
=== Changelog ===
=================
2017-02-23
* secrets and minor cleanup
2016-04-16
+ initial version
"""
# standard library
import argparse
import sys
# third party
import mysql.connector
# first party
import delphi.operations.secrets as secrets
import delphi.utils.epiweek as flu
from . import cdc_upload
def get_num_hits(cur, epiweek, state, page):
sql = """
SELECT
sum(c.`num`) `num`
FROM
`cdc` c
JOIN
`cdc_meta` m
ON
m.`date` = c.`date` AND m.`state` = c.`state`
WHERE
m.`epiweek` = %s AND c.`state` = %s AND c.`page` LIKE %s
"""
num = None
cur.execute(sql, (epiweek, state, page))
for (num,) in cur:
pass
if num is None:
return 0
return num
def METHOD_NAME(cur, epiweek, state):
sql = """
SELECT
sum(m.`total`) `total`
FROM
`cdc_meta` m
WHERE
m.`epiweek` = %s AND m.`state` = %s
"""
total = None
cur.execute(sql, (epiweek, state))
for (total,) in cur:
pass
if total is None:
raise Exception(f"missing data for {int(epiweek)}-{state}")
return total
def store_result(cur, epiweek, state, num1, num2, num3, num4, num5, num6, num7, num8, total):
sql = """
INSERT INTO
`cdc_extract` (`epiweek`, `state`, `num1`, `num2`, `num3`, `num4`, `num5`, `num6`, `num7`, `num8`, `total`)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
`num1` = %s,
`num2` = %s,
`num3` = %s,
`num4` = %s,
`num5` = %s,
`num6` = %s,
`num7` = %s,
`num8` = %s,
`total` = %s
"""
values = [num1, num2, num3, num4, num5, num6, num7, num8, total]
args = tuple([epiweek, state] + values + values)
cur.execute(sql, args)
def extract(first_week=None, last_week=None, test_mode=False):
# page title templates
pages = [
"%What You Should Know for the % Influenza Season%",
"%What To Do If You Get Sick%",
"%Flu Symptoms & Severity%",
"%How Flu Spreads%",
"%What You Should Know About Flu Antiviral Drugs%",
"%Weekly US Map%",
"%Basics%",
"%Flu Activity & Surveillance%",
]
# location information
states = sorted(cdc_upload.STATES.values())
# connect
u, p = secrets.db.epi
cnx = mysql.connector.connect(user=u, password=p, database="epidata")
cur = cnx.cursor()
# weeks to update
if first_week is None:
cur.execute("SELECT max(`epiweek`) FROM `cdc_extract`")
for (first_week,) in cur:
pass
if last_week is None:
cur.execute("SELECT max(`epiweek`) FROM `cdc_meta`")
for (last_week,) in cur:
pass
print(f"extracting {int(first_week)}--{int(last_week)}")
# update each epiweek
for epiweek in flu.range_epiweeks(first_week, last_week, inclusive=True):
# update each state
for state in states:
try:
nums = []
for i in range(8):
nums[i] = get_num_hits(cur, epiweek, state, pages[i])
total = METHOD_NAME(cur, epiweek, state)
store_result(cur, epiweek, state, *nums, total)
print(f" {epiweek}-{state}: {' '.join(str(n) for n in nums)} ({total})")
except Exception as ex:
print(f" {int(epiweek)}-{state}: failed", ex)
# raise ex
sys.stdout.flush()
# disconnect
cur.close()
if not test_mode:
cnx.commit()
cnx.close()
def main():
# args and usage
parser = argparse.ArgumentParser()
parser.add_argument("--first", "-f", default=None, type=int, help="first epiweek override")
parser.add_argument("--last", "-l", default=None, type=int, help="last epiweek override")
parser.add_argument("--epiweek", "-w", default=None, type=int, help="epiweek override")
parser.add_argument("--test", "-t", default=False, action="store_true", help="dry run only")
args = parser.parse_args()
# sanity check
first, last, week = args.first, args.last, args.epiweek
for ew in [first, last, week]:
if ew is not None:
flu.check_epiweek(ew)
if first is not None and last is not None and first > last:
raise Exception("epiweeks in the wrong order")
if week is not None:
first = last = week
# extract the page hits for all states on the specified weeks
extract(first, last, args.test)
if __name__ == "__main__":
main() |
1,032 | register mode | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import fnmatch
import Gaffer
import GafferUI
class NodeFinderDialogue( GafferUI.Dialogue ) :
def __init__( self, scope, **kw ) :
GafferUI.Dialogue.__init__( self, "", sizeMode = self.SizeMode.Automatic, **kw )
with GafferUI.GridContainer( spacing = 4 ) as grid :
# criteria row
GafferUI.Label(
"Find",
parenting = {
"index" : ( 0, 0 ),
"alignment" : ( GafferUI.HorizontalAlignment.Right, GafferUI.VerticalAlignment.Center ),
}
)
self.__matchString = GafferUI.MultiSelectionMenu(
allowMultipleSelection = False,
allowEmptySelection = False,
parenting = { "index" : ( 1, 0 ) }
)
# match text row
GafferUI.Label(
"Matching",
parenting = {
"index" : ( 0, 2 ),
"alignment" : ( GafferUI.HorizontalAlignment.Right, GafferUI.VerticalAlignment.Center ),
}
)
self.__matchPattern = GafferUI.TextWidget( parenting = { "index" : ( 1, 2 ) } )
self.__matchPattern.setToolTip( "Use * to match any text and ? to match any single character.\nDrag a node here to get the text for selecting similar nodes." )
self.__matchPattern.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ), scoped = False )
self.__matchPattern.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ), scoped = False )
self.__matchPattern.dropSignal().connect( Gaffer.WeakMethod( self.__drop ), scoped = False )
self._setWidget( grid )
self.__cancelButton = self._addButton( "Cancel" )
self.__selectNextButton = self._addButton( "Select Next" )
self.__selectAllButton = self._addButton( "Select All" )
self.__matchPattern.activatedSignal().connect( Gaffer.WeakMethod( self.__activated ), scoped = False )
self.__cancelButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__selectNextButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__selectAllButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.visibilityChangedSignal().connect( Gaffer.WeakMethod( self.__visibilityChanged ), scoped = False )
self.__scope = None
self.setScope( scope )
def setScope( self, scope ) :
if scope.isSame( self.__scope ) :
return
self.__scope = scope
if isinstance( self.__scope, Gaffer.ScriptNode ) :
self.setTitle( "Find nodes" )
else :
self.setTitle( "Find nodes in %s" % self.__scope.getName() )
def getScope( self ) :
return self.__scope
__modes = []
@classmethod
def METHOD_NAME( cls, label, stringExtractor ) :
cls.__modes.append( ( label, stringExtractor ) )
def __visibilityChanged( self, widget ) :
if self.visible() :
# update modes in case more have been added
self.__matchString[:] = [ m[0] for m in self.__modes ]
self.__matchPattern.grabFocus()
self.__matchPattern.setSelection( None, None ) # all text
def __dragEnter( self, widget, event ) :
if self.__nodeFromDragData( event.data ) is not None :
widget.setHighlighted( True )
return True
return False
def __dragLeave( self, widget, event ) :
widget.setHighlighted( False )
def __drop( self, widget, event ) :
widget.setText( self.__matchStringExtractor()( self.__nodeFromDragData( event.data ) ) )
widget.setSelection( None, None ) # all text
widget.setHighlighted( False )
def __nodeFromDragData( self, dragData ) :
if isinstance( dragData, Gaffer.Node ) :
return dragData
elif isinstance( dragData, Gaffer.Set ) and len( dragData ) == 1 and isinstance( dragData[0], Gaffer.Node ) :
return dragData[0]
return None
def __matchStringExtractor( self ) :
for m in self.__modes :
if m[0] == self.__matchString.getSelection()[0] :
return m[1]
assert( False )
def __buttonClicked( self, button ) :
if button is self.__cancelButton :
self.setVisible( False )
elif button is self.__selectAllButton :
self.__selectAll()
elif button is self.__selectNextButton :
self.__selectNext()
def __activated( self, text ) :
self.__selectAll()
def __selectAll( self ) :
script = self.__scope.scriptNode() if not isinstance( self.__scope, Gaffer.ScriptNode ) else self.__scope
selection = script.selection()
extractor = self.__matchStringExtractor()
regex = re.compile( fnmatch.translate( self.__matchPattern.getText() ) )
newSelection = Gaffer.StandardSet()
for node in self.__scope.children( Gaffer.Node ) :
if regex.match( extractor( node ) ) :
newSelection.add( node )
if len( newSelection ) :
selection.clear()
selection.add( newSelection )
self.__frameSelection()
def __selectNext( self ) :
script = self.__scope.scriptNode() if not isinstance( self.__scope, Gaffer.ScriptNode ) else self.__scope
selection = script.selection()
extractor = self.__matchStringExtractor()
regex = re.compile( fnmatch.translate( self.__matchPattern.getText() ) )
startIndex = 0
if len( selection ) :
lastSelectedNode = selection[-1]
if self.__scope.isSame( lastSelectedNode.parent() ) :
for i, c in enumerate( self.__scope.children() ) :
if c.isSame( lastSelectedNode ) :
startIndex = i + 1
break
for i in range( startIndex, startIndex + len( self.__scope ) ) :
c = self.__scope[ i % len( self.__scope ) ]
if isinstance( c, Gaffer.Node ) :
if regex.match( extractor( c ) ) :
selection.clear()
selection.add( c )
self.__frameSelection()
break
def __frameSelection( self ) :
scriptWindow = self.ancestor( GafferUI.ScriptWindow )
graphEditors = scriptWindow.getLayout().editors( GafferUI.GraphEditor )
for graphEditor in graphEditors :
if graphEditor.graphGadget().getRoot().isSame( self.__scope ) :
graphEditor.frame( scriptWindow.scriptNode().selection() )
def __nodeNameExtractor( node ) :
return node.getName()
def __nodeTypeExtractor( node ) :
return node.typeName()
NodeFinderDialogue.METHOD_NAME( "Node Names", __nodeNameExtractor )
NodeFinderDialogue.METHOD_NAME( "Node Types", __nodeTypeExtractor ) |
1,033 | fake profile read only | import os
import json
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
import boto3.session
from chalice import Chalice, BadRequestError, NotFoundError, Response,\
CORSConfig, UnauthorizedError, AuthResponse, AuthRoute
# This is a test app that is used by integration tests.
# This app exercises all the major features of chalice
# and helps prevent regressions.
app = Chalice(app_name=os.environ['APP_NAME'])
app.websocket_api.session = boto3.session.Session()
app.experimental_feature_flags.update([
'WEBSOCKETS'
])
app.api.binary_types.append('application/binary')
@app.authorizer(ttl_seconds=300)
def dummy_auth(auth_request):
if auth_request.token == 'yes':
return AuthResponse(
routes=['/builtin-auth',
AuthRoute('/fake-profile', methods=['POST'])],
context={'foo': 'bar'},
principal_id='foo'
)
else:
raise UnauthorizedError('Authorization failed')
@app.route('/')
def index():
return {'hello': 'world'}
@app.route('/a/b/c/d/e/f/g')
def nested_route():
return {'nested': True}
@app.route('/path/{name}')
def supports_path_params(name):
return {'path': name}
@app.route('/singledoc')
def single_doc():
"""Single line docstring."""
return {'docstring': 'single'}
@app.route('/multidoc')
def multi_doc():
"""Multi-line docstring.
And here is another line.
"""
return {'docstring': 'multi'}
@app.route('/post', methods=['POST'])
def supports_only_post():
return {'success': True}
@app.route('/put', methods=['PUT'])
def supports_only_put():
return {'success': True}
@app.route('/jsonpost', methods=['POST'])
def supports_post_body_as_json():
json_body = app.current_request.json_body
return {'json_body': json_body}
@app.route('/multimethod', methods=['GET', 'POST'])
def multiple_methods():
return {'method': app.current_request.method}
@app.route('/badrequest')
def bad_request_error():
raise BadRequestError("Bad request.")
@app.route('/notfound')
def not_found_error():
raise NotFoundError("Not found")
@app.route('/arbitrary-error')
def raise_arbitrary_error():
raise TypeError("Uncaught exception")
@app.route('/formencoded', methods=['POST'],
content_types=['application/x-www-form-urlencoded'])
def form_encoded():
parsed = parse_qs(app.current_request.raw_body.decode('utf-8'))
return {
'parsed': parsed
}
@app.route('/json-only', content_types=['application/json'])
def json_only():
return {'success': True}
@app.route('/cors', methods=['GET', 'POST', 'PUT'], cors=True)
def supports_cors():
# It doesn't really matter what we return here because
# we'll be checking the response headers to verify CORS support.
return {'cors': True}
@app.route('/custom_cors', methods=['GET', 'POST', 'PUT'], cors=CORSConfig(
allow_origin='https://foo.example.com',
allow_headers=['X-Special-Header'],
max_age=600,
expose_headers=['X-Special-Header'],
allow_credentials=True))
def supports_custom_cors():
return {'cors': True}
@app.route('/todict', methods=['GET'])
def todict():
return app.current_request.to_dict()
@app.route('/multifile')
def multifile():
from chalicelib import MESSAGE
return {"message": MESSAGE}
@app.route('/custom-response', methods=['GET'])
def custom_response():
return Response(
status_code=204,
body='',
headers={
'Content-Type': 'text/plain',
'Set-Cookie': ['key=value', 'foo=bar'],
},
)
@app.route('/api-key-required', methods=['GET'], api_key_required=True)
def api_key_required():
return {"success": True}
@app.route('/binary', methods=['POST'],
content_types=['application/octet-stream'])
def binary_round_trip():
return Response(
app.current_request.raw_body,
headers={
'Content-Type': 'application/octet-stream'
},
status_code=200)
@app.route('/custom-binary', methods=['POST'],
content_types=['application/binary'])
def custom_binary_round_trip():
return Response(
app.current_request.raw_body,
headers={
'Content-Type': 'application/binary'
},
status_code=200)
@app.route('/get-binary', methods=['GET'])
def binary_response():
return Response(
body=b'\xDE\xAD\xBE\xEF',
headers={
'Content-Type': 'application/octet-stream'
},
status_code=200)
@app.route('/shared', methods=['GET'])
def shared_get():
return {'method': 'GET'}
@app.route('/shared', methods=['POST'])
def shared_post():
return {'method': 'POST'}
@app.route('/builtin-auth', authorizer=dummy_auth)
def builtin_auth():
return {'success': True, 'context': app.current_request.context}
# Testing a common use case where you can have read only GET access
# but you need to be auth'd to POST.
@app.route('/fake-profile', methods=['GET'])
def METHOD_NAME():
return {'success': True, 'context': app.current_request.context}
@app.route('/fake-profile', authorizer=dummy_auth,
methods=['POST'])
def fake_profile_post():
return {'success': True, 'context': app.current_request.context}
@app.route('/repr-raw-body', methods=['POST'])
def repr_raw_body():
return {'repr-raw-body': app.current_request.raw_body.decode('utf-8')}
SOCKET_MESSAGES = []
@app.on_ws_connect()
def connect(event):
pass
@app.on_ws_message()
def message(event):
SOCKET_MESSAGES.append((event.connection_id, event.body))
app.websocket_api.send(event.connection_id, json.dumps(SOCKET_MESSAGES))
@app.on_ws_disconnect()
def disconnect(event):
pass |
1,034 | hook mem fetch unmapped | #!/usr/bin/env python
# Mariano Graziano
from unicorn import *
from unicorn.x86_const import *
import regress
#echo -ne "\x48\x31\xc0\x48\xb8\x04\x00\x00\x00\x00\x00\x00\x00\x48\x3d\x05\x00\x00\x00\x74\x05\xe9\x0f\x00\x00\x00\x48\xba\xbe\xba\x00\x00\x00\x00\x00\x00\xe9\x0f\x00\x00\x00\x48\xba\xca\xc0\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00\x00\x90" | ndisasm - -b64
#00000000 4831C0 xor rax,rax
#00000003 48B8040000000000 mov rax,0x4
# -0000
#0000000D 483D05000000 cmp rax,0x5
#00000013 7405 jz 0x1a
#00000015 E90F000000 jmp qword 0x29
#0000001A 48BABEBA00000000 mov rdx,0xbabe
# -0000
#00000024 E90F000000 jmp qword 0x38
#00000029 48BACAC000000000 mov rdx,0xc0ca
# -0000
#00000033 E900000000 jmp qword 0x38
#00000038 90 nop
mu = 0
zf = 1 # (0:clear, 1:set)
class Init(regress.RegressTest):
def clear_zf(self):
eflags_cur = mu.reg_read(UC_X86_REG_EFLAGS)
eflags = eflags_cur & ~(1 << 6)
#eflags = 0x0
print "[clear_zf] - eflags from %x to %x" % (eflags_cur, eflags)
if eflags != eflags_cur:
print "[clear_zf] - writing new eflags..."
mu.reg_write(UC_X86_REG_EFLAGS, eflags)
def set_zf(self):
eflags_cur = mu.reg_read(UC_X86_REG_EFLAGS)
eflags = eflags_cur | (1 << 6)
#eflags = 0xFFFFFFFF
print "[set_zf] - eflags from %x to %x" % (eflags_cur, eflags)
if eflags != eflags_cur:
print "[set_zf] - writing new eflags..."
mu.reg_write(UC_X86_REG_EFLAGS, eflags)
def handle_zf(self, zf):
print "[handle_zf] - eflags " , zf
if zf == 0: self.clear_zf()
else: self.set_zf()
def multipath(self):
print "[multipath] - handling ZF (%s) - default" % zf
self.handle_zf(zf)
# callback for tracing basic blocks
def hook_block(self, uc, address, size, user_data):
print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size))
# callback for tracing instructions
def hook_code(self, uc, address, size, user_data):
print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size))
rax = mu.reg_read(UC_X86_REG_RAX)
rbx = mu.reg_read(UC_X86_REG_RBX)
rcx = mu.reg_read(UC_X86_REG_RCX)
rdx = mu.reg_read(UC_X86_REG_RDX)
rsi = mu.reg_read(UC_X86_REG_RSI)
rdi = mu.reg_read(UC_X86_REG_RDI)
r8 = mu.reg_read(UC_X86_REG_R8)
r9 = mu.reg_read(UC_X86_REG_R9)
r10 = mu.reg_read(UC_X86_REG_R10)
r11 = mu.reg_read(UC_X86_REG_R11)
r12 = mu.reg_read(UC_X86_REG_R12)
r13 = mu.reg_read(UC_X86_REG_R13)
r14 = mu.reg_read(UC_X86_REG_R14)
r15 = mu.reg_read(UC_X86_REG_R15)
eflags = mu.reg_read(UC_X86_REG_EFLAGS)
print(">>> RAX = %x" %rax)
print(">>> RBX = %x" %rbx)
print(">>> RCX = %x" %rcx)
print(">>> RDX = %x" %rdx)
print(">>> RSI = %x" %rsi)
print(">>> RDI = %x" %rdi)
print(">>> R8 = %x" %r8)
print(">>> R9 = %x" %r9)
print(">>> R10 = %x" %r10)
print(">>> R11 = %x" %r11)
print(">>> R12 = %x" %r12)
print(">>> R13 = %x" %r13)
print(">>> R14 = %x" %r14)
print(">>> R15 = %x" %r15)
print(">>> ELAGS = %x" %eflags)
print "-"*11
self.multipath()
print "-"*11
# callback for tracing memory access (READ or WRITE)
def hook_mem_access(self, uc, access, address, size, value, user_data):
if access == UC_MEM_WRITE:
print(">>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" \
%(address, size, value))
else: # READ
print(">>> Memory is being READ at 0x%x, data size = %u" \
%(address, size))
# callback for tracing invalid memory access (READ or WRITE)
def hook_mem_invalid(self, uc, access, address, size, value, user_data):
print("[ HOOK_MEM_INVALID - Address: %s ]" % hex(address))
if access == UC_MEM_WRITE_UNMAPPED:
print(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" %(address, size, value))
return True
else:
print(">>> Missing memory is being READ at 0x%x, data size = %u, data value = 0x%x" %(address, size, value))
return True
def METHOD_NAME(self, uc, access, address, size, value, user_data):
print("[ HOOK_MEM_FETCH - Address: %s ]" % hex(address))
print("[ mem_fetch_unmapped: faulting address at %s ]" % hex(address).strip("L"))
return True
def runTest(self):
global mu
JUMP = "\x48\x31\xc0\x48\xb8\x04\x00\x00\x00\x00\x00\x00\x00\x48\x3d\x05\x00\x00\x00\x74\x05\xe9\x0f\x00\x00\x00\x48\xba\xbe\xba\x00\x00\x00\x00\x00\x00\xe9\x0f\x00\x00\x00\x48\xba\xca\xc0\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00\x00\x90"
ADDRESS = 0x1000000
print("Emulate x86_64 code")
# Initialize emulator in X86-64bit mode
mu = Uc(UC_ARCH_X86, UC_MODE_64)
# map 2MB memory for this emulation
mu.mem_map(ADDRESS, 2 * 1024 * 1024)
# write machine code to be emulated to memory
mu.mem_write(ADDRESS, JUMP)
# setup stack
mu.reg_write(UC_X86_REG_RSP, ADDRESS + 0x200000)
# tracing all basic blocks with customized callback
mu.hook_add(UC_HOOK_BLOCK, self.hook_block)
# tracing all instructions in range [ADDRESS, ADDRESS+0x60]
mu.hook_add(UC_HOOK_CODE, self.hook_code, None, ADDRESS, ADDRESS+0x60)
# tracing all memory READ & WRITE access
mu.hook_add(UC_HOOK_MEM_WRITE, self.hook_mem_access)
mu.hook_add(UC_HOOK_MEM_READ, self.hook_mem_access)
mu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self.METHOD_NAME)
mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, self.hook_mem_invalid)
try:
# emulate machine code in infinite time
mu.emu_start(ADDRESS, ADDRESS + len(JUMP))
except UcError as e:
print("ERROR: %s" % e)
rdx = mu.reg_read(UC_X86_REG_RDX)
self.assertEqual(rdx, 0xbabe, "RDX contains the wrong value. Eflags modification failed.")
if __name__ == '__main__':
regress.main() |
1,035 | get survey id | # -*- coding: utf-8 -*-
# SDAPS - Scripts for data acquisition with paper based surveys
# Copyright(C) 2012 Benjamin Berg <benjamin@sipsolutions.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sdaps import defs
from sdaps import model
from sdaps.utils.exceptions import RecognitionError
from sdaps.utils.barcode import read_barcode
# Reading the metainformation of CODE-128 style questionnaires. See classic.py
# for some more information.
class Image(model.buddy.Buddy, metaclass=model.buddy.Register):
name = 'style'
obj_class = model.sheet.Image
def get_page_rotation(self):
# Returns page rotation or "None" if it cannot be retrieved
# Figure out wether the page is rotated by looking for a barcode first
# at the bottom right, then at the top left.
# Note that we do not care about the value of the barcode, we are happy
# to simply know that it exists.
paper_width = self.obj.sheet.survey.defs.paper_width
paper_height = self.obj.sheet.survey.defs.paper_height
# Search for the barcode in the lower right corner.
# Note that we cannot find another barcode this way, because the one in the
# center of the page is not complete
code = \
read_barcode(self.obj.surface.surface, self.obj.matrix.mm_to_px(),
paper_width / 2,
paper_height - self.obj.sheet.survey.defs.corner_mark_bottom - defs.code128_vpad - defs.code128_height - 5,
paper_width / 2,
self.obj.sheet.survey.defs.corner_mark_bottom + defs.code128_vpad + defs.code128_height + 5)
if code is None:
# Well, that failed, so try to search the upper left corner instead
code = \
read_barcode(self.obj.surface.surface, self.obj.matrix.mm_to_px(),
0, 0,
paper_width / 2,
self.obj.sheet.survey.defs.corner_mark_bottom + defs.code128_vpad + defs.code128_height + 5)
if code is not None:
return True
else:
return None
else:
return False
def get_page_number(self):
# Returns page number or "None" if it cannot be retrieved
# In this function assume that the rotation is correct already.
paper_width = self.obj.sheet.survey.defs.paper_width
paper_height = self.obj.sheet.survey.defs.paper_height
# Search for the barcode in the lower right corner.
code = \
read_barcode(self.obj.surface.surface, self.obj.matrix.mm_to_px(),
paper_width / 2,
paper_height - self.obj.sheet.survey.defs.corner_mark_bottom - defs.code128_vpad - defs.code128_height - 5,
paper_width / 2,
self.obj.sheet.survey.defs.corner_mark_bottom + defs.code128_vpad + defs.code128_height + 5)
# The code needs to be entirely numeric and at least 4 characters for the page
if code is None or(not code.isdigit() and len(code) < 4):
return None
# The page number is in the lower four digits, simply extract it and convert
# to integer
return int(code[-4:])
def METHOD_NAME(self):
# Returns the survey ID or "None" if it cannot be retrieved
# In this function assume that the rotation is correct already.
paper_width = self.obj.sheet.survey.defs.paper_width
paper_height = self.obj.sheet.survey.defs.paper_height
# Search for the barcode in the lower left corner.
code = \
read_barcode(self.obj.surface.surface, self.obj.matrix.mm_to_px(),
paper_width / 2,
paper_height - self.obj.sheet.survey.defs.corner_mark_bottom - defs.code128_vpad - defs.code128_height - 5,
paper_width / 2,
self.obj.sheet.survey.defs.corner_mark_bottom + defs.code128_vpad + defs.code128_height + 5)
if code is None or not code.isdigit() or len(code) <= 4:
return None
return int(code[:-4])
def get_questionnaire_id(self):
# Returns the questionnaire ID or "None" if it cannot be retrieved
# In this function assume that the rotation is correct already.
paper_width = self.obj.sheet.survey.defs.paper_width
paper_height = self.obj.sheet.survey.defs.paper_height
# Search for the barcode on the bottom left of the page
code = \
read_barcode(self.obj.surface.surface, self.obj.matrix.mm_to_px(),
0,
paper_height - self.obj.sheet.survey.defs.corner_mark_bottom - defs.code128_vpad - defs.code128_height - 5,
paper_width / 2,
self.obj.sheet.survey.defs.corner_mark_bottom + defs.code128_vpad + defs.code128_height + 5)
# Simply return the code, it may be alphanumeric, we don't care here
# XXX: Is that assumption sane?
return code
def get_global_id(self):
# Returns the global ID or "None" if it cannot be retrieved
# In this function assume that the rotation is correct already.
paper_width = self.obj.sheet.survey.defs.paper_width
paper_height = self.obj.sheet.survey.defs.paper_height
# Search for the barcode in the bottom center of the page
code = \
read_barcode(self.obj.surface.surface, self.obj.matrix.mm_to_px(),
paper_width / 4,
paper_height - self.obj.sheet.survey.defs.corner_mark_bottom - defs.code128_vpad - defs.code128_height - 5,
paper_width / 2,
self.obj.sheet.survey.defs.corner_mark_bottom + defs.code128_vpad + defs.code128_height + 5)
# Simply return the code, it may be alphanumeric, we don't care here
return code |
1,036 | test project form | from django.test.client import RequestFactory
from django.test.testcases import TestCase
from mediathread.factories import MediathreadTestMixin, ProjectFactory
from mediathread.main.course_details import ALLOW_PUBLIC_COMPOSITIONS_KEY, \
SELECTION_VISIBILITY_KEY
from mediathread.projects.forms import ProjectForm
from mediathread.projects.models import RESPONSE_VIEW_NEVER, \
RESPONSE_VIEW_SUBMITTED, RESPONSE_VIEW_ALWAYS, PROJECT_TYPE_ASSIGNMENT, \
PUBLISH_DRAFT, PUBLISH_WHOLE_CLASS, PROJECT_TYPE_SELECTION_ASSIGNMENT
class TestProjectForms(MediathreadTestMixin, TestCase):
def setUp(self):
self.setup_sample_course()
self.request = RequestFactory().get('/')
self.request.course = self.sample_course
self.request.user = self.instructor_one
def test_publish_options(self):
# faculty
self.request.user = self.instructor_one
frm = ProjectForm(self.request, instance=None, data={})
lst = frm.fields['publish'].choices
self.assertEquals(len(lst), 2)
self.assertEquals(lst[0][0], PUBLISH_DRAFT[0])
self.assertEquals(lst[1][0], PUBLISH_WHOLE_CLASS[0])
# student
self.request.user = self.student_one
frm = ProjectForm(self.request, instance=None, data={})
lst = frm.fields['publish'].choices
self.assertEquals(len(lst), 3)
self.assertEquals(lst[0][0], PUBLISH_DRAFT[0])
self.assertEquals(lst[1][0], 'InstructorShared')
self.assertEquals(lst[2][0], PUBLISH_WHOLE_CLASS[0])
# maybe public option
self.sample_course.add_detail(ALLOW_PUBLIC_COMPOSITIONS_KEY, 1)
frm = ProjectForm(self.request, instance=None, data={})
lst = frm.fields['publish'].choices
self.assertEquals(len(lst), 3)
self.assertEquals(lst[0][0], PUBLISH_DRAFT[0])
self.assertEquals(lst[1][0], 'InstructorShared')
self.assertEquals(lst[2][0], PUBLISH_WHOLE_CLASS[0])
def METHOD_NAME(self):
frm = ProjectForm(self.request, instance=None, data={})
lst = frm.fields['participants'].choices
self.assertEquals(lst[0][1], 'Instructor One')
self.assertEquals(lst[1][1], 'Instructor Two')
self.assertEquals(lst[2][1], 'Student One')
self.assertEquals(lst[3][1], 'Student Three')
self.assertEquals(lst[4][1], 'Student Two')
self.assertFalse(frm.fields['participants'].required)
self.assertFalse(frm.fields['body'].required)
self.assertFalse(frm.fields['submit'].required)
self.assertFalse(frm.fields['publish'].required)
self.assertFalse(frm.fields['response_view_policy'].required)
def test_bound_composition_form(self):
self.sample_course.add_detail(ALLOW_PUBLIC_COMPOSITIONS_KEY, 1)
project = ProjectFactory.create(
course=self.sample_course, author=self.student_one,
policy=PUBLISH_DRAFT[0], response_view_policy='always')
data = {}
frm = ProjectForm(self.request, instance=project, data=data)
self.assertEquals(frm.initial['publish'], PUBLISH_DRAFT[0])
lst = frm.fields['publish'].choices
self.assertEquals(len(lst), 3)
self.assertEquals(lst[0][0], PUBLISH_DRAFT[0])
self.assertEquals(lst[1][0], PUBLISH_WHOLE_CLASS[0])
self.assertEquals(lst[2][0], 'PublicEditorsAreOwners')
def test_bound_assignment_form(self):
self.sample_course.add_detail(ALLOW_PUBLIC_COMPOSITIONS_KEY, 1)
assignment = ProjectFactory.create(
course=self.sample_course, author=self.student_one,
policy=PUBLISH_DRAFT[0],
project_type=PROJECT_TYPE_ASSIGNMENT)
data = {}
frm = ProjectForm(self.request, instance=assignment, data=data)
self.assertEquals(frm.initial['publish'], PUBLISH_DRAFT[0])
lst = frm.fields['publish'].choices
self.assertEquals(len(lst), 2)
self.assertEquals(lst[0][0], PUBLISH_DRAFT[0])
self.assertEquals(lst[1][0], PUBLISH_WHOLE_CLASS[0])
def test_bound_assignment_form_with_responses(self):
self.sample_course.add_detail(ALLOW_PUBLIC_COMPOSITIONS_KEY, 1)
assignment = ProjectFactory.create(
course=self.sample_course, author=self.student_one,
policy=PUBLISH_WHOLE_CLASS[0],
project_type=PROJECT_TYPE_ASSIGNMENT)
ProjectFactory.create(
course=self.sample_course, author=self.student_one,
title="Student One Response",
policy=PUBLISH_WHOLE_CLASS[0], parent=assignment)
data = {}
frm = ProjectForm(self.request, instance=assignment, data=data)
self.assertEquals(frm.initial['publish'], PUBLISH_WHOLE_CLASS[0])
lst = frm.fields['publish'].choices
self.assertEquals(len(lst), 1)
self.assertEquals(lst[0][0], PUBLISH_WHOLE_CLASS[0])
def test_response_policy_options(self):
frm = ProjectForm(self.request, instance=None, data={})
lst = frm.fields['response_view_policy'].choices
self.assertEquals(len(lst), 3)
self.assertEquals(lst[0], RESPONSE_VIEW_NEVER)
self.assertEquals(lst[1], RESPONSE_VIEW_SUBMITTED)
self.assertEquals(lst[2], RESPONSE_VIEW_ALWAYS)
self.sample_course.add_detail(SELECTION_VISIBILITY_KEY, 0)
frm = ProjectForm(self.request, instance=None, data={})
lst = frm.fields['response_view_policy'].choices
self.assertEquals(len(lst), 1)
self.assertEquals(lst[0], RESPONSE_VIEW_NEVER)
def test_bound_assignment_response_policy_options(self):
self.sample_course.add_detail(SELECTION_VISIBILITY_KEY, 0)
selection_assignment = ProjectFactory.create(
course=self.sample_course, author=self.student_one,
policy=PUBLISH_WHOLE_CLASS[0],
project_type=PROJECT_TYPE_ASSIGNMENT)
frm = ProjectForm(self.request, instance=selection_assignment, data={})
lst = frm.fields['response_view_policy'].choices
self.assertEquals(len(lst), 1)
self.assertEquals(lst[0], RESPONSE_VIEW_NEVER)
assignment = ProjectFactory.create(
course=self.sample_course, author=self.instructor_one,
policy=PUBLISH_WHOLE_CLASS[0],
project_type=PROJECT_TYPE_SELECTION_ASSIGNMENT)
frm = ProjectForm(self.request, instance=assignment, data={})
lst = frm.fields['response_view_policy'].choices
self.assertEquals(len(lst), 1)
self.assertEquals(lst[0], RESPONSE_VIEW_NEVER) |
1,037 | verify tags | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with Amazon Simple Email Service
(Amazon SES) to manage email templates that contain replaceable tags.
"""
import logging
from pprint import pprint
import re
import boto3
from botocore.exceptions import ClientError
# Defines template tags, which are enclosed in two curly braces, such as {{tag}}.
TEMPLATE_REGEX = r'(?<={{).+?(?=}})'
logger = logging.getLogger(__name__)
# snippet-start:[python.example_code.ses.SesTemplate]
class SesTemplate:
"""Encapsulates Amazon SES template functions."""
def __init__(self, ses_client):
"""
:param ses_client: A Boto3 Amazon SES client.
"""
self.ses_client = ses_client
self.template = None
self.template_tags = set()
def _extract_tags(self, subject, text, html):
"""
Extracts tags from a template as a set of unique values.
:param subject: The subject of the email.
:param text: The text version of the email.
:param html: The html version of the email.
"""
self.template_tags = set(re.findall(TEMPLATE_REGEX, subject + text + html))
logger.info("Extracted template tags: %s", self.template_tags)
# snippet-end:[python.example_code.ses.SesTemplate]
def METHOD_NAME(self, template_data):
"""
Verifies that the tags in the template data are part of the template.
:param template_data: Template data formed of key-value pairs of tags and
replacement text.
:return: True when all of the tags in the template data are usable with the
template; otherwise, False.
"""
diff = set(template_data) - self.template_tags
if diff:
logger.warning(
"Template data contains tags that aren't in the template: %s", diff)
return False
else:
return True
def name(self):
"""
:return: Gets the name of the template, if a template has been loaded.
"""
return self.template['TemplateName'] if self.template is not None else None
# snippet-start:[python.example_code.ses.CreateTemplate]
def create_template(self, name, subject, text, html):
"""
Creates an email template.
:param name: The name of the template.
:param subject: The subject of the email.
:param text: The plain text version of the email.
:param html: The HTML version of the email.
"""
try:
template = {
'TemplateName': name,
'SubjectPart': subject,
'TextPart': text,
'HtmlPart': html}
self.ses_client.create_template(Template=template)
logger.info("Created template %s.", name)
self.template = template
self._extract_tags(subject, text, html)
except ClientError:
logger.exception("Couldn't create template %s.", name)
raise
# snippet-end:[python.example_code.ses.CreateTemplate]
# snippet-start:[python.example_code.ses.DeleteTemplate]
def delete_template(self):
"""
Deletes an email template.
"""
try:
self.ses_client.delete_template(TemplateName=self.template['TemplateName'])
logger.info("Deleted template %s.", self.template['TemplateName'])
self.template = None
self.template_tags = None
except ClientError:
logger.exception(
"Couldn't delete template %s.", self.template['TemplateName'])
raise
# snippet-end:[python.example_code.ses.DeleteTemplate]
# snippet-start:[python.example_code.ses.GetTemplate]
def get_template(self, name):
"""
Gets a previously created email template.
:param name: The name of the template to retrieve.
:return: The retrieved email template.
"""
try:
response = self.ses_client.get_template(TemplateName=name)
self.template = response['Template']
logger.info("Got template %s.", name)
self._extract_tags(
self.template['SubjectPart'], self.template['TextPart'],
self.template['HtmlPart'])
except ClientError:
logger.exception("Couldn't get template %s.", name)
raise
else:
return self.template
# snippet-end:[python.example_code.ses.GetTemplate]
# snippet-start:[python.example_code.ses.ListTemplates]
def list_templates(self):
"""
Gets a list of all email templates for the current account.
:return: The list of retrieved email templates.
"""
try:
response = self.ses_client.list_templates()
templates = response['TemplatesMetadata']
logger.info("Got %s templates.", len(templates))
except ClientError:
logger.exception("Couldn't get templates.")
raise
else:
return templates
# snippet-end:[python.example_code.ses.ListTemplates]
# snippet-start:[python.example_code.ses.UpdateTemplate]
def update_template(self, name, subject, text, html):
"""
Updates a previously created email template.
:param name: The name of the template.
:param subject: The subject of the email.
:param text: The plain text version of the email.
:param html: The HTML version of the email.
"""
try:
template = {
'TemplateName': name,
'SubjectPart': subject,
'TextPart': text,
'HtmlPart': html}
self.ses_client.update_template(Template=template)
logger.info("Updated template %s.", name)
self.template = template
self._extract_tags(subject, text, html)
except ClientError:
logger.exception("Couldn't update template %s.", name)
raise
# snippet-end:[python.example_code.ses.UpdateTemplate]
# snippet-start:[python.example_code.ses.Scenario_Templates]
def usage_demo():
print('-'*88)
print("Welcome to the Amazon Simple Email Service (Amazon SES) email template "
"demo!")
print('-'*88)
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
ses_template = SesTemplate(boto3.client('ses'))
template = {
'name': 'doc-example-template',
'subject': 'Example of an email template.',
'text': "This is what {{name}} will {{action}} if {{name}} can't display HTML.",
'html': "<p><i>This</i> is what {{name}} will {{action}} if {{name}} "
"<b>can</b> display HTML.</p>"}
print("Creating an email template.")
ses_template.create_template(**template)
print("Getting the list of template metadata.")
template_metas = ses_template.list_templates()
for temp_meta in template_metas:
print(f"Got template {temp_meta['Name']}:")
temp_data = ses_template.get_template(temp_meta['Name'])
pprint(temp_data)
print(f"Deleting template {template['name']}.")
ses_template.delete_template()
print("Thanks for watching!")
print('-'*88)
# snippet-end:[python.example_code.ses.Scenario_Templates]
if __name__ == '__main__':
usage_demo() |
1,038 | get account address | #!/bin/env python3
# Send a raw transaction
from web3 import Web3
from web3.middleware import geth_poa_middleware
from eth_account import Account
import os, sys
import json
import requests
def getFileContent(file_name):
file = open(file_name, "r")
data = file.read()
file.close()
return data.replace("\n","")
def getContent(file_name):
file = open(file_name, "r")
data = file.read()
file.close()
return data.replace("\n","")
# Connect to a geth node
def connect_to_geth(url, consensus):
if consensus== 'POA':
return connect_to_geth_poa(url)
elif consensus == 'POS':
return connect_to_geth_pos(url)
elif consensus == 'POW':
return connect_to_geth_pow(url)
# Connect to a geth node
def connect_to_geth_pos(url):
web3 = Web3(Web3.HTTPProvider(url))
if not web3.isConnected():
sys.exit("Connection failed!")
return web3
# Connect to a geth node
def connect_to_geth_poa(url):
web3 = Web3(Web3.HTTPProvider(url))
if not web3.isConnected():
sys.exit("Connection failed!")
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
return web3
# Connect to a geth node
def connect_to_geth_pow(url):
web3 = Web3(Web3.HTTPProvider(url))
if not web3.isConnected():
sys.exit("Connection failed!")
return web3
# Select an account address from the key store
# Return: an address
def METHOD_NAME(index):
file_list = os.listdir('keystore/eth')
if 'index.json' in file_list:
file_list.remove('index.json')
f = file_list[index]
with open('keystore/eth/{}'.format(f)) as keyfile:
content = json.loads(keyfile.read())
return Web3.toChecksumAddress(content['address'])
# Return how many account addresses are in the keystore folder
def get_account_total():
file_list = os.listdir('keystore/eth')
if 'index.json' in file_list:
file_list.remove('index.json')
return len(file_list)
# Get all the account addresses from the key store
# Return: a list of addresses
def get_all_account_addresses():
accounts = []
file_list = os.listdir('keystore/eth')
if 'index.json' in file_list:
file_list.remove('index.json')
for f in file_list:
with open('keystore/eth/{}'.format(f)) as keyfile:
content = json.loads(keyfile.read())
if 'address' in content:
accounts.append(Web3.toChecksumAddress(content['address']))
return accounts
def get_all_accounts_with_node_info():
f = open('keystore/eth/index.json')
accounts = json.load(f)
return accounts
# Print balance
def print_balance(web3, account):
print("{}: {}".format(account, web3.eth.get_balance(account)))
# Construct a transaction
def construct_raw_transaction(sender, recipient, nonce, amount, data):
tx = {
'nonce': nonce,
'from': sender,
'to': recipient,
'value': Web3.toWei(amount, 'ether'),
'gas': 2000000,
'chainId': 10, # Must match with the value used in the emulator
'gasPrice': Web3.toWei('50', 'gwei'),
'data': data
}
return tx
# Send raw transaction
def send_raw_transaction(web3, sender, recipient, amount, data):
print("---------Sending Raw Transaction ---------------")
nonce = web3.eth.getTransactionCount(sender.address)
tx = construct_raw_transaction(sender.address, recipient, nonce, amount, data)
signed_tx = web3.eth.account.sign_transaction(tx, sender.key)
tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction)
print("Transaction Hash: {}".format(tx_hash.hex()))
tx_receipt = web3.eth.wait_for_transaction_receipt(tx_hash)
print("Transaction Receipt: {}".format(tx_receipt))
return tx_receipt
# Send raw transaction (no wait)
def send_raw_transaction_no_wait(web3, sender, recipient, amount, data):
print("---------Sending Raw Transaction ---------------")
nonce = web3.eth.getTransactionCount(sender.address)
tx = construct_raw_transaction(sender.address, recipient, nonce, amount, data)
signed_tx = web3.eth.account.sign_transaction(tx, sender.key)
tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction)
print("Transaction Hash: {}".format(tx_hash.hex()))
return tx_hash
# Send transaction
def send_transaction_via_geth(node, recipient, amount, data):
print("---------Sending Transaction from a geth node ---------------")
tx_hash = node.eth.send_transaction({
'from': node.eth.coinbase,
'to': recipient,
'value': amount,
'data': data})
print("Transaction Hash: {}".format(tx_hash.hex()))
tx_receipt = node.eth.wait_for_transaction_receipt(tx_hash)
print("Transaction Receipt: {}".format(tx_receipt))
return tx_receipt
# Deploy contract (high-level)
def deploy_contract_via_geth(node, abi_file, bin_file):
print("---------Deploying Contract from a geth node ----------------")
abi = getContent(abi_file)
bytecode = getContent(bin_file)
myContract = node.eth.contract(abi=abi, bytecode=bytecode)
tx_hash = myContract.constructor().transact({ 'from': node.eth.coinbase })
print("... Waiting for block")
tx_receipt = node.eth.wait_for_transaction_receipt(tx_hash)
contract_address = tx_receipt.contractAddress
print("Transaction Hash: {}".format(tx_receipt.transactionHash.hex()))
print("Transaction Receipt: {}".format(tx_receipt))
print("Contract Address: {}".format(contract_address))
return contract_address
# Deploy contract (low-level): directly construct a transaction
# Using None as the target address, so the transaction will not have
# the 'to' field.
def deploy_contract_low_level_via_geth(node, abi_file, bin_file):
print("---------Deploying Contract from a geth node (low level) ----------")
bytecode = getContent(bin_file)
tx_receipt = send_transaction_via_geth(node, None, 0, bytecode)
contract_address = tx_receipt.contractAddress
print("Contract Address: {}".format(contract_address))
return contract_address
# Deploy a contract using raw transaction
def deploy_contract_raw(web3, sender, bin_file):
print("---------Deploying Raw Contract (low level) ----------")
bytecode = getContent(bin_file)
tx_receipt = send_raw_transaction(web3, sender, None, 0, bytecode)
contract_address = tx_receipt.contractAddress
print("Contract Address: {}".format(contract_address))
return contract_address
# Invoke contract
def invoke_contract_via_geth(node, contract_address, abi_file, function, arg):
print("---------Invoking Contract Function via a geth node --------")
new_address = Web3.toChecksumAddress(contract_address)
contract = node.eth.contract(address=new_address, abi=getContent(abi_file))
contract_func = contract.functions[function]
# Invoke the function locally. We can immediately get the return value
r = contract_func(arg).call()
print("Return value: {}".format(r))
# Invoke the function as a transaction. We cannot get the return value.
# The function emits return value using an event, which is included in
# the logs array of the transaction receipt.
tx_hash = contract_func(arg).transact({ 'from': node.eth.coinbase })
tx_receipt = node.eth.wait_for_transaction_receipt(tx_hash)
print("Transaction Hash: {}".format(tx_receipt.transactionHash.hex()))
print("Transaction Receipt: {}".format(tx_receipt))
return tx_receipt
# Send RPC to geth node
def send_geth_rpc(url, method, params):
myobj = {"jsonrpc":"2.0","id":1}
myobj["method"] = method
myobj["params"] = params
x = requests.post(url, json = myobj)
y = json.loads(x.text)
return y["result"]
|
1,039 | is symlink | #!/usr/bin/env python
"""A module with filesystem-related utility functions and classes."""
import array
import os
import platform
import stat
from typing import Dict
from typing import NamedTuple
from typing import Optional
from typing import Text
from grr_response_core.lib.util import precondition
class Stat(object):
"""A wrapper around standard `os.[l]stat` function.
The standard API for using `stat` results is very clunky and unpythonic.
This is an attempt to create a more familiar and consistent interface to make
the code look cleaner.
Moreover, standard `stat` does not properly support extended flags - even
though the documentation mentions that `stat.st_flags` should work on macOS
and Linux it works only on macOS and raises an error on Linux (and Windows).
This class handles that and fetches these flags lazily (as it can be costly
operation on Linux).
"""
@classmethod
def FromPath(cls, path: Text, follow_symlink: bool = True) -> "Stat":
"""Returns stat information about the given OS path, calling os.[l]stat.
Args:
path: A path to perform `stat` on.
follow_symlink: True if `stat` of a file that a symlink points to should
be returned instead of the symlink itself. For non-symlinks this setting
has no effect.
Returns:
Stat instance, with information about the given path.
"""
# Note that we do not add type assertion for `path` here. The reason is that
# many of the existing system calls (e.g. `os.listdir`) return results as
# bytestrings in Python 2. This is fine because it also means that they also
# accept bytestring paths as arguments in Python 2 (e.g. `os.stat`). Having
# consistent types in both versions is certainly desired but it might be too
# much work for too little benefit.
precondition.AssertType(follow_symlink, bool)
if follow_symlink:
stat_obj = os.stat(path)
else:
stat_obj = os.lstat(path)
try:
target = os.readlink(path)
# `os.readlink` raises `ValueError` on Windows and `OSError` on UNIX.
except (OSError, ValueError):
target = None
return cls(path=path, stat_obj=stat_obj, symlink_target=target)
def __init__(self,
path: Text,
stat_obj: os.stat_result,
symlink_target: Optional[Text] = None) -> None:
"""Wrap an existing stat result in a `filesystem.Stat` instance.
Args:
path: the path of `stat_obj`.
stat_obj: an instance of os.stat_result with information about `path`.
symlink_target: Path of the original file that symlink refers to.
"""
self._path = path
self._stat = stat_obj
self._symlink_target = symlink_target
self._flags_linux = None
self._flags_osx = None
def GetRaw(self) -> os.stat_result:
return self._stat
def GetPath(self) -> Text:
return self._path
def GetLinuxFlags(self) -> int:
if self._flags_linux is None:
self._flags_linux = self._FetchLinuxFlags()
return self._flags_linux
def GetOsxFlags(self) -> int:
if self._flags_osx is None:
self._flags_osx = self._FetchOsxFlags()
return self._flags_osx
def GetSize(self) -> int:
return self._stat.st_size
def GetAccessTime(self) -> int:
return _NanosecondsToMicroseconds(self._stat.st_atime_ns)
def GetModificationTime(self) -> int:
return _NanosecondsToMicroseconds(self._stat.st_mtime_ns)
def GetChangeTime(self) -> int:
return _NanosecondsToMicroseconds(self._stat.st_ctime_ns)
def GetDevice(self) -> int:
return self._stat.st_dev
def GetSymlinkTarget(self) -> Optional[Text]:
return self._symlink_target
def IsDirectory(self) -> bool:
return stat.S_ISDIR(self._stat.st_mode)
def IsRegular(self) -> bool:
return stat.S_ISREG(self._stat.st_mode)
def IsSocket(self) -> bool:
return stat.S_ISSOCK(self._stat.st_mode)
def METHOD_NAME(self) -> bool:
return stat.S_ISLNK(self._stat.st_mode)
# http://manpages.courier-mta.org/htmlman2/ioctl_list.2.html
FS_IOC_GETFLAGS = 0x80086601
def _FetchLinuxFlags(self) -> int:
"""Fetches Linux extended file flags."""
if platform.system() != "Linux":
return 0
# Since we open a file in the next step we do not want to open a symlink.
# `lsattr` returns an error when trying to check flags of a symlink, so we
# assume that symlinks cannot have them.
if self.METHOD_NAME():
return 0
# Some files (e.g. sockets) cannot be opened. For these we do not really
# care about extended flags (they should have none). `lsattr` does not seem
# to support such cases anyway. It is also possible that a file has been
# deleted (because this method is used lazily).
try:
fd = os.open(self._path, os.O_RDONLY)
except (IOError, OSError):
return 0
try:
# This import is Linux-specific.
import fcntl # pylint: disable=g-import-not-at-top
buf = array.array("l", [0])
# TODO(user):pytype: incorrect type spec for fcntl.ioctl
# pytype: disable=wrong-arg-types
fcntl.ioctl(fd, self.FS_IOC_GETFLAGS, buf)
# pytype: enable=wrong-arg-types
return buf[0]
except (IOError, OSError):
# File system does not support extended attributes.
return 0
finally:
os.close(fd)
def _FetchOsxFlags(self) -> int:
"""Fetches macOS extended file flags."""
if platform.system() != "Darwin":
return 0
return self._stat.st_flags # pytype: disable=attribute-error
class StatCache(object):
"""An utility class for avoiding unnecessary syscalls to `[l]stat`.
This class is useful in situations where manual bookkeeping of stat results
in order to prevent extra system calls becomes tedious and complicates control
flow. This class makes sure that no unnecessary system calls are made and is
smart enough to cache symlink results when a file is not a symlink.
"""
_Key = NamedTuple("_Key", (("path", Text), ("follow_symlink", bool))) # pylint: disable=invalid-name
def __init__(self):
self._cache: Dict[StatCache._Key, Stat] = {}
def Get(self, path: Text, follow_symlink: bool = True) -> Stat:
"""Stats given file or returns a cached result if available.
Args:
path: A path to the file to perform `stat` on.
follow_symlink: True if `stat` of a file that a symlink points to should
be returned instead of the symlink itself. For non-symlinks this setting
has no effect.
Returns:
`Stat` object corresponding to the given path.
"""
key = self._Key(path=path, follow_symlink=follow_symlink)
try:
return self._cache[key]
except KeyError:
value = Stat.FromPath(path, follow_symlink=follow_symlink)
self._cache[key] = value
# If we are not following symlinks and the file is a not symlink then
# the stat result for this file stays the same even if we want to follow
# symlinks.
if not follow_symlink and not value.METHOD_NAME():
self._cache[self._Key(path=path, follow_symlink=True)] = value
return value
def _NanosecondsToMicroseconds(ns: int) -> int:
"""Converts nanoseconds to microseconds."""
return ns // 1000 |
1,040 | configure | import pathlib
import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.microsoft import check_min_vs, is_msvc
from conan.tools.apple import is_apple_os
from conan.tools.files import apply_conandata_patches, get, copy, rm
from conan.tools.build import check_min_cppstd
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.env import VirtualBuildEnv
required_conan_version = ">=1.53.0"
class PackageConan(ConanFile):
name = "openassetio"
description = "An open-source interoperability standard for tools and content management systems used in media production."
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/OpenAssetIO/OpenAssetIO"
topics = ("asset-pipeline", "vfx", "cg", "assetmanager", "vfx-pipeline")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"with_python": [True, False],
}
default_options = {
"shared": False,
"with_python": True,
}
short_paths = True
@property
def _min_cppstd(self):
return 17
@property
def _compilers_minimum_version(self):
return {
"gcc": "9",
"clang": "12",
"apple-clang": "12",
}
def METHOD_NAME(self):
if self.options.with_python:
if is_msvc(self):
# Required to create import .lib for building extension module.
self.options["cpython"].shared = True
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("tomlplusplus/3.2.0")
if self.options.with_python:
# TODO: cpython requires ncurses/6.2 but no pre-built package exists.
self.requires("ncurses/6.3")
self.requires("cpython/3.9.7")
self.requires("pybind11/2.10.1")
def validate(self):
if is_apple_os(self):
raise ConanInvalidConfiguration(
f"{self.ref} does not support MacOS at this time"
)
if self.settings.compiler.cppstd:
check_min_cppstd(self, self._min_cppstd)
if is_msvc(self) and not self.dependencies["cpython"].options.shared:
raise ConanInvalidConfiguration(f"{self.ref} requires cpython:shared=True when using MSVC compiler")
check_min_vs(self, 191)
if not is_msvc(self):
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
def build_requirements(self):
self.tool_requires("cmake/3.25.3")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["OPENASSETIO_ENABLE_TESTS"] = not self.conf.get("tools.build:skip_test", default=True, check_type=bool)
tc.variables["OPENASSETIO_GLIBCXX_USE_CXX11_ABI"] = self.settings.get_safe("compiler.libcxx") == "libstdc++11"
tc.variables["OPENASSETIO_ENABLE_PYTHON"] = self.options.with_python
if self.options.with_python:
tc.variables["Python_EXECUTABLE"] = self._python_exe
if is_msvc(self):
tc.variables["Python_LIBRARY"] = self._python_windows_lib
tc.generate()
tc = CMakeDeps(self)
tc.generate()
tc = VirtualBuildEnv(self)
tc.generate()
@property
def _python_exe(self):
# TODO: update to V2 once cpython is updated
return pathlib.Path(self.deps_user_info["cpython"].python).as_posix()
@property
def _python_windows_lib(self):
pth = pathlib.Path(
self.dependencies["cpython"].package_folder,
self.dependencies["cpython"].cpp_info.components["embed"].libdirs[0],
self.dependencies["cpython"].cpp_info.components["embed"].libs[0])
pth = pth.with_suffix(".lib")
return pth.as_posix()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.METHOD_NAME()
cmake.build()
def package_id(self):
if self.options.with_python:
self.info.requires["cpython"].minor_mode()
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rm(self, "OpenAssetIOConfig*.cmake", os.path.join(self.package_folder, "lib", "cmake", "OpenAssetIO"))
rm(self, "OpenAssetIOTargets*.cmake", os.path.join(self.package_folder, "lib", "cmake", "OpenAssetIO"))
rm(self, "*.pdb", os.path.join(self.package_folder, "lib"))
rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
def package_info(self):
self.cpp_info.libs = []
self.cpp_info.set_property("cmake_file_name", "OpenAssetIO")
self.cpp_info.set_property("cmake_target_name", "OpenAssetIO::OpenAssetIO")
self.cpp_info.set_property("cmake_build_modules", [os.path.join("lib", "cmake", "OpenAssetIO", "OpenAssetIOVariables.cmake")])
self.cpp_info.builddirs = [os.path.join("lib", "cmake")]
self.cpp_info.components["openassetio-core"].set_property("cmake_target_name", "OpenAssetIO::openassetio-core")
self.cpp_info.components["openassetio-core"].libs = ["openassetio"]
if self.options.with_python:
self.cpp_info.components["openassetio-python-bridge"].set_property("cmake_target_name", "OpenAssetIO::openassetio-python-bridge")
self.cpp_info.components["openassetio-python-bridge"].requires = ["openassetio-core"]
self.cpp_info.components["openassetio-python-bridge"].libs = ["openassetio-python"]
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "OpenAssetIO"
self.cpp_info.names["cmake_find_package_multi"] = "OpenAssetIO" |
1,041 | time strided assign | from .common import Benchmark, get_squares, get_squares_
import numpy as np
from io import SEEK_SET, StringIO, BytesIO
class Copy(Benchmark):
params = ["int8", "int16", "float32", "float64",
"complex64", "complex128"]
param_names = ['type']
def setup(self, typename):
dtype = np.dtype(typename)
self.d = np.arange((50 * 500), dtype=dtype).reshape((500, 50))
self.e = np.arange((50 * 500), dtype=dtype).reshape((50, 500))
self.e_d = self.e.reshape(self.d.shape)
self.dflat = np.arange((50 * 500), dtype=dtype)
def time_memcpy(self, typename):
self.d[...] = self.e_d
def time_memcpy_large_out_of_place(self, typename):
l = np.ones(1024**2, dtype=np.dtype(typename))
l.copy()
def time_cont_assign(self, typename):
self.d[...] = 1
def time_strided_copy(self, typename):
self.d[...] = self.e.T
def METHOD_NAME(self, typename):
self.dflat[::2] = 2
class CopyTo(Benchmark):
def setup(self):
self.d = np.ones(50000)
self.e = self.d.copy()
self.m = (self.d == 1)
self.im = (~ self.m)
self.m8 = self.m.copy()
self.m8[::8] = (~ self.m[::8])
self.im8 = (~ self.m8)
def time_copyto(self):
np.copyto(self.d, self.e)
def time_copyto_sparse(self):
np.copyto(self.d, self.e, where=self.m)
def time_copyto_dense(self):
np.copyto(self.d, self.e, where=self.im)
def time_copyto_8_sparse(self):
np.copyto(self.d, self.e, where=self.m8)
def time_copyto_8_dense(self):
np.copyto(self.d, self.e, where=self.im8)
class Savez(Benchmark):
def setup(self):
self.squares = get_squares()
def time_vb_savez_squares(self):
np.savez('tmp.npz', **self.squares)
class LoadNpyOverhead(Benchmark):
def setup(self):
self.buffer = BytesIO()
np.save(self.buffer, get_squares_()['float32'])
def time_loadnpy_overhead(self):
self.buffer.seek(0, SEEK_SET)
np.load(self.buffer)
class LoadtxtCSVComments(Benchmark):
# benchmarks for np.loadtxt comment handling
# when reading in CSV files
params = [10, int(1e2), int(1e4), int(1e5)]
param_names = ['num_lines']
def setup(self, num_lines):
data = ['1,2,3 # comment'] * num_lines
# unfortunately, timeit will only run setup()
# between repeat events, but not for iterations
# within repeats, so the StringIO object
# will have to be rewinded in the benchmark proper
self.data_comments = StringIO('\n'.join(data))
def time_comment_loadtxt_csv(self, num_lines):
# benchmark handling of lines with comments
# when loading in from csv files
# inspired by similar benchmark in pandas
# for read_csv
# need to rewind StringIO object (unfortunately
# confounding timing result somewhat) for every
# call to timing test proper
np.loadtxt(self.data_comments,
delimiter=',')
self.data_comments.seek(0)
class LoadtxtCSVdtypes(Benchmark):
# benchmarks for np.loadtxt operating with
# different dtypes parsed / cast from CSV files
params = (['float32', 'float64', 'int32', 'int64',
'complex128', 'str', 'object'],
[10, int(1e2), int(1e4), int(1e5)])
param_names = ['dtype', 'num_lines']
def setup(self, dtype, num_lines):
data = ['5, 7, 888'] * num_lines
self.csv_data = StringIO('\n'.join(data))
def time_loadtxt_dtypes_csv(self, dtype, num_lines):
# benchmark loading arrays of various dtypes
# from csv files
# state-dependent timing benchmark requires
# rewind of StringIO object
np.loadtxt(self.csv_data,
delimiter=',',
dtype=dtype)
self.csv_data.seek(0)
class LoadtxtCSVStructured(Benchmark):
# benchmarks for np.loadtxt operating with
# a structured data type & CSV file
def setup(self):
num_lines = 50000
data = ["M, 21, 72, X, 155"] * num_lines
self.csv_data = StringIO('\n'.join(data))
def time_loadtxt_csv_struct_dtype(self):
# obligate rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.csv_data,
delimiter=',',
dtype=[('category_1', 'S1'),
('category_2', 'i4'),
('category_3', 'f8'),
('category_4', 'S1'),
('category_5', 'f8')])
self.csv_data.seek(0)
class LoadtxtCSVSkipRows(Benchmark):
# benchmarks for loadtxt row skipping when
# reading in csv file data; a similar benchmark
# is present in the pandas asv suite
params = [0, 500, 10000]
param_names = ['skiprows']
def setup(self, skiprows):
np.random.seed(123)
test_array = np.random.rand(100000, 3)
self.fname = 'test_array.csv'
np.savetxt(fname=self.fname,
X=test_array,
delimiter=',')
def time_skiprows_csv(self, skiprows):
np.loadtxt(self.fname,
delimiter=',',
skiprows=skiprows)
class LoadtxtReadUint64Integers(Benchmark):
# pandas has a similar CSV reading benchmark
# modified to suit np.loadtxt
params = [550, 1000, 10000]
param_names = ['size']
def setup(self, size):
arr = np.arange(size).astype('uint64') + 2**63
self.data1 = StringIO('\n'.join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO('\n'.join(arr.astype(str).tolist()))
def time_read_uint64(self, size):
# mandatory rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.data1)
self.data1.seek(0)
def time_read_uint64_neg_values(self, size):
# mandatory rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.data2)
self.data2.seek(0)
class LoadtxtUseColsCSV(Benchmark):
# benchmark selective column reading from CSV files
# using np.loadtxt
params = [2, [1, 3], [1, 3, 5, 7]]
param_names = ['usecols']
def setup(self, usecols):
num_lines = 5000
data = ['0, 1, 2, 3, 4, 5, 6, 7, 8, 9'] * num_lines
self.csv_data = StringIO('\n'.join(data))
def time_loadtxt_usecols_csv(self, usecols):
# must rewind StringIO because of state
# dependence of file reading
np.loadtxt(self.csv_data,
delimiter=',',
usecols=usecols)
self.csv_data.seek(0)
class LoadtxtCSVDateTime(Benchmark):
# benchmarks for np.loadtxt operating with
# datetime data in a CSV file
params = [20, 200, 2000, 20000]
param_names = ['num_lines']
def setup(self, num_lines):
# create the equivalent of a two-column CSV file
# with date strings in the first column and random
# floating point data in the second column
dates = np.arange('today', 20, dtype=np.datetime64)
np.random.seed(123)
values = np.random.rand(20)
date_line = ''
for date, value in zip(dates, values):
date_line += (str(date) + ',' + str(value) + '\n')
# expand data to specified number of lines
data = date_line * (num_lines // 20)
self.csv_data = StringIO(data)
def time_loadtxt_csv_datetime(self, num_lines):
# rewind StringIO object -- the timing iterations
# are state-dependent
X = np.loadtxt(self.csv_data,
delimiter=',',
dtype=([('dates', 'M8[us]'),
('values', 'float64')]))
self.csv_data.seek(0) |
1,042 | activate | """
Manage modjk workers
====================
Send commands to a :strong:`modjk` load balancer via the peer system.
This module can be used with the :ref:`prereq <requisites-prereq>`
requisite to remove/add the worker from the load balancer before
deploying/restarting service.
Mandatory Settings:
- The minion needs to have permission to publish the :strong:`modjk.*`
functions (see :ref:`here <peer>` for information on configuring
peer publishing permissions)
- The modjk load balancer must be configured as stated in the :strong:`modjk`
execution module :mod:`documentation <salt.modules.modjk>`
"""
def __virtual__():
"""
Check if we have peer access ?
"""
return True
def _send_command(cmd, worker, lbn, target, profile="default", tgt_type="glob"):
"""
Send a command to the modjk loadbalancer
The minion need to be able to publish the commands to the load balancer
cmd:
worker_stop - won't get any traffic from the lbn
worker_activate - activate the worker
worker_disable - will get traffic only for current sessions
"""
ret = {
"code": False,
"msg": "OK",
"minions": [],
}
# Send the command to target
func = "modjk.{}".format(cmd)
args = [worker, lbn, profile]
response = __salt__["publish.publish"](target, func, args, tgt_type)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret["msg"] = "no servers answered the published command {}".format(cmd)
return ret
elif len(errors) > 0:
ret["msg"] = "the following minions return False"
ret["minions"] = errors
return ret
else:
ret["code"] = True
ret["msg"] = "the commad was published successfully"
ret["minions"] = minions
return ret
def _worker_status(target, worker, activation, profile="default", tgt_type="glob"):
"""
Check if the worker is in `activation` state in the targeted load balancers
The function will return the following dictionary:
result - False if no server returned from the published command
errors - list of servers that couldn't find the worker
wrong_state - list of servers that the worker was in the wrong state
(not activation)
"""
ret = {
"result": True,
"errors": [],
"wrong_state": [],
}
args = [worker, profile]
status = __salt__["publish.publish"](target, "modjk.worker_status", args, tgt_type)
# Did we got any respone from someone ?
if not status:
ret["result"] = False
return ret
# Search for errors & status
for balancer in status:
if not status[balancer]:
ret["errors"].append(balancer)
elif status[balancer]["activation"] != activation:
ret["wrong_state"].append(balancer)
return ret
def _talk2modjk(name, lbn, target, action, profile="default", tgt_type="glob"):
"""
Wrapper function for the stop/disable/activate functions
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
action_map = {
"worker_stop": "STP",
"worker_disable": "DIS",
"worker_activate": "ACT",
}
# Check what needs to be done
status = _worker_status(target, name, action_map[action], profile, tgt_type)
if not status["result"]:
ret["result"] = False
ret["comment"] = "no servers answered the published command modjk.worker_status"
return ret
if status["errors"]:
ret["result"] = False
ret[
"comment"
] = "the following balancers could not find the worker {}: {}".format(
name, status["errors"]
)
return ret
if not status["wrong_state"]:
ret[
"comment"
] = "the worker is in the desired activation state on all the balancers"
return ret
else:
ret["comment"] = "the action {} will be sent to the balancers {}".format(
action, status["wrong_state"]
)
ret["changes"] = {action: status["wrong_state"]}
if __opts__["test"]:
ret["result"] = None
return ret
# Send the action command to target
response = _send_command(action, name, lbn, target, profile, tgt_type)
ret["comment"] = response["msg"]
ret["result"] = response["code"]
return ret
def stop(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Stop the named worker from the lbn load balancers at the targeted minions
The worker won't get any traffic from the lbn
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.stop:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_stop", profile, tgt_type)
def METHOD_NAME(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Activate the named worker from the lbn load balancers at the targeted
minions
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.activate:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_activate", profile, tgt_type)
def disable(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Disable the named worker from the lbn load balancers at the targeted
minions. The worker will get traffic only for current sessions and won't
get new ones.
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.disable:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_disable", profile, tgt_type) |
1,043 | readinto | import sys
from _typeshed import ReadableBuffer
from collections.abc import Callable, Set as AbstractSet
from typing import Protocol
from typing_extensions import Self, final
if sys.version_info >= (3, 11):
__all__ = (
"md5",
"sha1",
"sha224",
"sha256",
"sha384",
"sha512",
"blake2b",
"blake2s",
"sha3_224",
"sha3_256",
"sha3_384",
"sha3_512",
"shake_128",
"shake_256",
"new",
"algorithms_guaranteed",
"algorithms_available",
"pbkdf2_hmac",
"file_digest",
)
else:
__all__ = (
"md5",
"sha1",
"sha224",
"sha256",
"sha384",
"sha512",
"blake2b",
"blake2s",
"sha3_224",
"sha3_256",
"sha3_384",
"sha3_512",
"shake_128",
"shake_256",
"new",
"algorithms_guaranteed",
"algorithms_available",
"pbkdf2_hmac",
)
class _Hash:
@property
def digest_size(self) -> int: ...
@property
def block_size(self) -> int: ...
@property
def name(self) -> str: ...
def __init__(self, data: ReadableBuffer = ...) -> None: ...
def copy(self) -> Self: ...
def digest(self) -> bytes: ...
def hexdigest(self) -> str: ...
def update(self, __data: ReadableBuffer) -> None: ...
if sys.version_info >= (3, 9):
def new(name: str, data: ReadableBuffer = b"", *, usedforsecurity: bool = ...) -> _Hash: ...
def md5(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> _Hash: ...
def sha1(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> _Hash: ...
def sha224(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> _Hash: ...
def sha256(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> _Hash: ...
def sha384(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> _Hash: ...
def sha512(string: ReadableBuffer = b"", *, usedforsecurity: bool = True) -> _Hash: ...
elif sys.version_info >= (3, 8):
def new(name: str, data: ReadableBuffer = b"") -> _Hash: ...
def md5(string: ReadableBuffer = b"") -> _Hash: ...
def sha1(string: ReadableBuffer = b"") -> _Hash: ...
def sha224(string: ReadableBuffer = b"") -> _Hash: ...
def sha256(string: ReadableBuffer = b"") -> _Hash: ...
def sha384(string: ReadableBuffer = b"") -> _Hash: ...
def sha512(string: ReadableBuffer = b"") -> _Hash: ...
else:
def new(name: str, data: ReadableBuffer = b"") -> _Hash: ...
def md5(__string: ReadableBuffer = ...) -> _Hash: ...
def sha1(__string: ReadableBuffer = ...) -> _Hash: ...
def sha224(__string: ReadableBuffer = ...) -> _Hash: ...
def sha256(__string: ReadableBuffer = ...) -> _Hash: ...
def sha384(__string: ReadableBuffer = ...) -> _Hash: ...
def sha512(__string: ReadableBuffer = ...) -> _Hash: ...
algorithms_guaranteed: AbstractSet[str]
algorithms_available: AbstractSet[str]
def pbkdf2_hmac(
hash_name: str, password: ReadableBuffer, salt: ReadableBuffer, iterations: int, dklen: int | None = None
) -> bytes: ...
class _VarLenHash:
digest_size: int
block_size: int
name: str
def __init__(self, data: ReadableBuffer = ...) -> None: ...
def copy(self) -> _VarLenHash: ...
def digest(self, __length: int) -> bytes: ...
def hexdigest(self, __length: int) -> str: ...
def update(self, __data: ReadableBuffer) -> None: ...
sha3_224 = _Hash
sha3_256 = _Hash
sha3_384 = _Hash
sha3_512 = _Hash
shake_128 = _VarLenHash
shake_256 = _VarLenHash
def scrypt(
password: ReadableBuffer,
*,
salt: ReadableBuffer | None = None,
n: int | None = None,
r: int | None = None,
p: int | None = None,
maxmem: int = 0,
dklen: int = 64,
) -> bytes: ...
@final
class _BlakeHash(_Hash):
MAX_DIGEST_SIZE: int
MAX_KEY_SIZE: int
PERSON_SIZE: int
SALT_SIZE: int
if sys.version_info >= (3, 9):
def __init__(
self,
__data: ReadableBuffer = ...,
*,
digest_size: int = ...,
key: ReadableBuffer = ...,
salt: ReadableBuffer = ...,
person: ReadableBuffer = ...,
fanout: int = ...,
depth: int = ...,
leaf_size: int = ...,
node_offset: int = ...,
node_depth: int = ...,
inner_size: int = ...,
last_node: bool = ...,
usedforsecurity: bool = ...,
) -> None: ...
else:
def __init__(
self,
__data: ReadableBuffer = ...,
*,
digest_size: int = ...,
key: ReadableBuffer = ...,
salt: ReadableBuffer = ...,
person: ReadableBuffer = ...,
fanout: int = ...,
depth: int = ...,
leaf_size: int = ...,
node_offset: int = ...,
node_depth: int = ...,
inner_size: int = ...,
last_node: bool = ...,
) -> None: ...
blake2b = _BlakeHash
blake2s = _BlakeHash
if sys.version_info >= (3, 11):
class _BytesIOLike(Protocol):
def getbuffer(self) -> ReadableBuffer: ...
class _FileDigestFileObj(Protocol):
def METHOD_NAME(self, __buf: bytearray) -> int: ...
def readable(self) -> bool: ...
def file_digest(
__fileobj: _BytesIOLike | _FileDigestFileObj, __digest: str | Callable[[], _Hash], *, _bufsize: int = 262144
) -> _Hash: ... |
1,044 | test transfer |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import shutil
from twisted.internet import defer
from buildbot.process.buildstep import BuildStep
from buildbot.process.results import FAILURE
from buildbot.process.results import SUCCESS
from buildbot.steps.transfer import DirectoryUpload
from buildbot.steps.transfer import FileDownload
from buildbot.steps.transfer import FileUpload
from buildbot.steps.transfer import MultipleFileUpload
from buildbot.steps.transfer import StringDownload
from buildbot.steps.worker import CompositeStepMixin
from buildbot.test.util.decorators import flaky
from buildbot.test.util.integration import RunMasterBase
# This integration test creates a master and worker environment
# and make sure the transfer steps are working
# When new protocols are added, make sure you update this test to exercise
# your proto implementation
class TransferStepsMasterPb(RunMasterBase):
proto = "pb"
@defer.inlineCallbacks
def setup_config(self, bigfilename):
c = {}
from buildbot.config import BuilderConfig
from buildbot.plugins import schedulers
from buildbot.process.factory import BuildFactory
c['schedulers'] = [
schedulers.ForceScheduler(
name="force",
builderNames=["testy"])]
f = BuildFactory()
# do a bunch of transfer to exercise the protocol
f.addStep(StringDownload("filecontent", workerdest="dir/file1.txt"))
f.addStep(StringDownload("filecontent2", workerdest="dir/file2.txt"))
# create 8 MB file
with open(bigfilename, 'w', encoding='utf-8') as o:
buf = "xxxxxxxx" * 1024
for _ in range(1000):
o.write(buf)
f.addStep(FileDownload(mastersrc=bigfilename, workerdest="bigfile.txt"))
f.addStep(FileUpload(workersrc="dir/file2.txt", masterdest="master.txt"))
f.addStep(FileDownload(mastersrc="master.txt", workerdest="dir/file3.txt"))
f.addStep(DirectoryUpload(workersrc="dir", masterdest="dir"))
c['builders'] = [
BuilderConfig(name="testy", workernames=["local1"], factory=f)
]
yield self.setup_master(c)
@defer.inlineCallbacks
def setup_config_glob(self):
c = {}
from buildbot.config import BuilderConfig
from buildbot.plugins import schedulers
from buildbot.process.factory import BuildFactory
class CustomStep(BuildStep, CompositeStepMixin):
@defer.inlineCallbacks
def run(self):
content = yield self.getFileContentFromWorker(
"dir/file1.txt", abandonOnFailure=True)
assert content == "filecontent"
return SUCCESS
c['schedulers'] = [
schedulers.ForceScheduler(
name="force", builderNames=["testy"])
]
f = BuildFactory()
f.addStep(StringDownload("filecontent", workerdest="dir/file1.txt"))
f.addStep(StringDownload("filecontent2", workerdest="dir/notafile1.txt"))
f.addStep(StringDownload("filecontent2", workerdest="dir/only1.txt"))
f.addStep(
MultipleFileUpload(
workersrcs=["dir/file*.txt", "dir/not*.txt", "dir/only?.txt"],
masterdest="dest/",
glob=True))
f.addStep(CustomStep())
c['builders'] = [
BuilderConfig(name="testy", workernames=["local1"], factory=f)
]
yield self.setup_master(c)
@defer.inlineCallbacks
def setup_config_single_step(self, step):
c = {}
from buildbot.config import BuilderConfig
from buildbot.plugins import schedulers
from buildbot.process.factory import BuildFactory
c['schedulers'] = [
schedulers.ForceScheduler(
name="force",
builderNames=["testy"])]
f = BuildFactory()
f.addStep(FileUpload(workersrc="dir/noexist_path", masterdest="master_dest"))
c['builders'] = [
BuilderConfig(name="testy",
workernames=["local1"],
factory=f)
]
yield self.setup_master(c)
def readMasterDirContents(self, top):
contents = {}
for root, _, files in os.walk(top):
for name in files:
fn = os.path.join(root, name)
with open(fn, encoding='utf-8') as f:
contents[fn] = f.read()
return contents
@flaky(bugNumber=4407, onPlatform='win32')
@defer.inlineCallbacks
def METHOD_NAME(self):
yield self.setup_config(bigfilename=self.mktemp())
build = yield self.doForceBuild(wantSteps=True, wantLogs=True)
self.assertEqual(build['results'], SUCCESS)
dirContents = self.readMasterDirContents("dir")
self.assertEqual(
dirContents,
{os.path.join('dir', 'file1.txt'): 'filecontent',
os.path.join('dir', 'file2.txt'): 'filecontent2',
os.path.join('dir', 'file3.txt'): 'filecontent2'})
# cleanup our mess (worker is cleaned up by parent class)
shutil.rmtree("dir")
os.unlink("master.txt")
@defer.inlineCallbacks
def test_globTransfer(self):
yield self.setup_config_glob()
build = yield self.doForceBuild(wantSteps=True, wantLogs=True)
self.assertEqual(build['results'], SUCCESS)
dirContents = self.readMasterDirContents("dest")
self.assertEqual(dirContents, {
os.path.join('dest', 'file1.txt'): 'filecontent',
os.path.join('dest', 'notafile1.txt'): 'filecontent2',
os.path.join('dest', 'only1.txt'): 'filecontent2'
})
# cleanup
shutil.rmtree("dest")
@defer.inlineCallbacks
def test_no_exist_file_upload(self):
step = FileUpload(workersrc="dir/noexist_path", masterdest="master_dest")
yield self.setup_config_single_step(step)
build = yield self.doForceBuild(wantSteps=True, wantLogs=True)
self.assertEqual(build['results'], FAILURE)
res = yield self.checkBuildStepLogExist(build, "Cannot open file")
self.assertTrue(res)
@defer.inlineCallbacks
def test_no_exist_directory_upload(self):
step = DirectoryUpload(workersrc="dir/noexist_path", masterdest="master_dest")
yield self.setup_config_single_step(step)
build = yield self.doForceBuild(wantSteps=True, wantLogs=True)
self.assertEqual(build['results'], FAILURE)
res = yield self.checkBuildStepLogExist(build, "Cannot open file")
self.assertTrue(res)
@defer.inlineCallbacks
def test_no_exist_multiple_file_upload(self):
step = MultipleFileUpload(workersrcs=["dir/noexist_path"], masterdest="master_dest")
yield self.setup_config_single_step(step)
build = yield self.doForceBuild(wantSteps=True, wantLogs=True)
self.assertEqual(build['results'], FAILURE)
res = yield self.checkBuildStepLogExist(build, "Cannot open file")
self.assertTrue(res)
class TransferStepsMasterNull(TransferStepsMasterPb):
proto = "null" |
1,045 | section test plot view | import vtk
import slicer
from slicer.ScriptedLoadableModule import *
#
# PlotsSelfTest
#
class PlotsSelfTest(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "PlotsSelfTest"
self.parent.categories = ["Testing.TestCases"]
self.parent.dependencies = ["Plots"]
self.parent.contributors = ["Andras Lasso (PerkLab, Queen's)"]
self.parent.helpText = """This is a self test for plot nodes and widgets."""
parent.acknowledgementText = """This file was originally developed by Andras Lasso, PerkLab, Queen's University
and was supported through Canada CANARIE's Research Software Program."""
#
# PlotsSelfTestWidget
#
class PlotsSelfTestWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#
# PlotsSelfTestLogic
#
class PlotsSelfTestLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget
"""
def __init__(self):
pass
class PlotsSelfTestTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_PlotsSelfTest_FullTest1()
# ------------------------------------------------------------------------------
def test_PlotsSelfTest_FullTest1(self):
# Check for Plots module
self.assertTrue(slicer.modules.plots)
self.section_SetupPathsAndNames()
self.section_CreateTable()
self.section_CreatePlots()
self.METHOD_NAME()
self.delayDisplay("Test passed")
# ------------------------------------------------------------------------------
def section_SetupPathsAndNames(self):
# Set constants
self.tableName = 'SampleTable'
self.xColumnName = 'x'
self.y1ColumnName = 'cos'
self.y2ColumnName = 'sin'
self.series1Name = "Cosine"
self.series2Name = "Sine"
self.chartName = "My Chart"
# ------------------------------------------------------------------------------
def section_CreateTable(self):
self.delayDisplay("Create table")
tableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode", self.tableName)
self.assertIsNotNone(tableNode)
table = tableNode.GetTable()
self.assertIsNotNone(table)
# Create X, Y1, and Y2 series
arrX = vtk.vtkFloatArray()
arrX.SetName(self.xColumnName)
table.AddColumn(arrX)
arrY1 = vtk.vtkFloatArray()
arrY1.SetName(self.y1ColumnName)
table.AddColumn(arrY1)
arrY2 = vtk.vtkFloatArray()
arrY2.SetName(self.y2ColumnName)
table.AddColumn(arrY2)
# Fill in the table with some example values
import math
numPoints = 69
inc = 7.5 / (numPoints - 1)
table.SetNumberOfRows(numPoints)
for i in range(numPoints):
table.SetValue(i, 0, i * inc)
table.SetValue(i, 1, math.cos(i * inc))
table.SetValue(i, 2, math.sin(i * inc))
# ------------------------------------------------------------------------------
def section_CreatePlots(self):
self.delayDisplay("Create plots")
tableNode = slicer.util.getNode(self.tableName)
# Create plot data series nodes
plotSeriesNode1 = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotSeriesNode", self.series1Name)
plotSeriesNode1.SetAndObserveTableNodeID(tableNode.GetID())
plotSeriesNode1.SetXColumnName(self.xColumnName)
plotSeriesNode1.SetYColumnName(self.y1ColumnName)
plotSeriesNode1.SetLineStyle(slicer.vtkMRMLPlotSeriesNode.LineStyleDash)
plotSeriesNode1.SetMarkerStyle(slicer.vtkMRMLPlotSeriesNode.MarkerStyleSquare)
plotSeriesNode2 = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotSeriesNode", self.series2Name)
plotSeriesNode2.SetAndObserveTableNodeID(tableNode.GetID())
plotSeriesNode2.SetXColumnName(self.xColumnName)
plotSeriesNode2.SetYColumnName(self.y2ColumnName)
plotSeriesNode2.SetUniqueColor()
# Create plot chart node
plotChartNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotChartNode", self.chartName)
plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode1.GetID())
plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode2.GetID())
plotChartNode.SetTitle('A simple plot with 2 curves')
plotChartNode.SetXAxisTitle('A simple plot with 2 curves')
plotChartNode.SetYAxisTitle('This is the Y axis')
# ------------------------------------------------------------------------------
def METHOD_NAME(self):
self.delayDisplay("Test plot view")
plotChartNode = slicer.util.getNode(self.chartName)
# Create plot view node
plotViewNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotViewNode")
plotViewNode.SetPlotChartNodeID(plotChartNode.GetID())
# Create plotWidget
plotWidget = slicer.qMRMLPlotWidget()
plotWidget.setMRMLScene(slicer.mrmlScene)
plotWidget.setMRMLPlotViewNode(plotViewNode)
plotWidget.show()
# Create plotView
plotView = slicer.qMRMLPlotView()
plotView.setMRMLScene(slicer.mrmlScene)
plotView.setMRMLPlotViewNode(plotViewNode)
plotView.show()
# Save variables into slicer namespace for debugging
slicer.plotWidget = plotWidget
slicer.plotView = plotView |
1,046 | test mips | import logging
import os
import unittest
import angr
import claripy
l = logging.getLogger("angr_tests")
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "binaries", "tests")
# pylint: disable=missing-class-docstring
# pylint: disable=no-self-use
class TestArgv(unittest.TestCase):
def METHOD_NAME(self):
proj = angr.Project(os.path.join(test_location, "mips", "argv_test"), auto_load_libs=False)
r_addr = 0x400768
s = proj.factory.entry_state(args=["aaa", "Yan is a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 1
s = proj.factory.entry_state(args=["aaa", "Yan is not a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 0
# symbolic command line argument
arg = claripy.BVS("arg_2", 50 * 8)
s = proj.factory.entry_state(args=["aaa", arg], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
found = xpl.found[0]
conc = found.solver.eval(found.memory.load(found.registers.load("sp"), 400), cast_to=bytes)
assert b"Yan is a noob" in conc
def test_mipsel(self):
proj = angr.Project(os.path.join(test_location, "mipsel", "argv_test"), auto_load_libs=False)
r_addr = 0x400768
s = proj.factory.entry_state(args=["aaa", "Yan is a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 1
s = proj.factory.entry_state(args=["aaa", "Yan is not a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 0
# symbolic args
s = proj.factory.entry_state(args=["aaa", claripy.BVS("arg_2", 50 * 8)], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
found = xpl.found[0]
conc = found.solver.eval(found.memory.load(found.registers.load("sp"), 400), cast_to=bytes)
assert b"Yan is a noob" in conc
def test_i386(self):
proj = angr.Project(os.path.join(test_location, "i386", "argv_test"), auto_load_libs=False)
r_addr = 0x804845B
s = proj.factory.entry_state(args=["aaa", "Yan is a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 1
s = proj.factory.entry_state(args=["aaa", "Yan is not a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 0
# symbolic args
s = proj.factory.entry_state(args=["aaa", claripy.BVS("arg_2", 50 * 8)], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
found = xpl.found[0]
conc = found.solver.eval(found.memory.load(found.registers.load("sp"), 400), cast_to=bytes)
assert b"Yan is a noob" in conc
def test_amd64(self):
proj = angr.Project(os.path.join(test_location, "x86_64", "argv_test"), auto_load_libs=False)
r_addr = 0x400571
s = proj.factory.entry_state(args=["aaa", "Yan is a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 1
s = proj.factory.entry_state(args=["aaa", "Yan is not a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 0
# symbolic args
s = proj.factory.entry_state(args=["aaa", claripy.BVS("arg_2", 50 * 8)], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
found = xpl.found[0]
conc = found.solver.eval(found.memory.load(found.registers.load("sp"), 400), cast_to=bytes)
assert b"Yan is a noob" in conc
def test_arm(self):
proj = angr.Project(os.path.join(test_location, "armel", "argv_test"), auto_load_libs=False)
r_addr = 0x1048C
s = proj.factory.entry_state(args=["aaa", "Yan is a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 1
s = proj.factory.entry_state(args=["aaa", "Yan is not a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 0
# symbolic args
s = proj.factory.entry_state(args=["aaa", claripy.BVS("arg_2", 50 * 8)], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
found = xpl.found[0]
conc = found.solver.eval(found.memory.load(found.registers.load("sp"), 400), cast_to=bytes)
assert b"Yan is a noob" in conc
def test_ppc32(self):
proj = angr.Project(os.path.join(test_location, "ppc", "argv_test"), auto_load_libs=False)
r_addr = 0x10000498
s = proj.factory.entry_state(args=["aaa", "Yan is a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 1
s = proj.factory.entry_state(args=["aaa", "Yan is not a noob"], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
assert len(xpl.found) == 0
# symbolic args
s = proj.factory.entry_state(args=["aaa", claripy.BVS("arg_2", 50 * 8)], env={"HOME": "/home/angr"})
xpl = proj.factory.simulation_manager(s).explore(find=r_addr)
found = xpl.found[0]
conc = found.solver.eval(found.memory.load(found.registers.load("sp"), 400), cast_to=bytes)
assert b"Yan is a noob" in conc
if __name__ == "__main__":
unittest.main() |
1,047 | ensure dut readiness | import ast
import logging
import pytest
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait_until
from tests.common.helpers.dut_utils import verify_orchagent_running_or_assert
from tests.generic_config_updater.gu_utils import apply_patch, expect_op_success, expect_op_failure
from tests.generic_config_updater.gu_utils import generate_tmpfile, delete_tmpfile
from tests.generic_config_updater.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload
from tests.generic_config_updater.gu_utils import is_valid_platform_and_version
pytestmark = [
pytest.mark.topology('any'),
]
logger = logging.getLogger(__name__)
READ_ASICDB_TIMEOUT = 20
READ_ASICDB_INTERVAL = 5
WRED_MAPPING = {'green_min_threshold': 'SAI_WRED_ATTR_GREEN_MIN_THRESHOLD',
'green_max_threshold': 'SAI_WRED_ATTR_GREEN_MAX_THRESHOLD',
'green_drop_probability': 'SAI_WRED_ATTR_GREEN_DROP_PROBABILITY'}
@pytest.fixture(scope="function")
def METHOD_NAME(duthost):
"""
Setup/teardown fixture ecn config update tst
Args:
duthost: DUT host object
"""
verify_orchagent_running_or_assert(duthost)
create_checkpoint(duthost)
yield
try:
verify_orchagent_running_or_assert(duthost)
logger.info("Rolled back to original checkpoint")
rollback_or_reload(duthost)
finally:
delete_checkpoint(duthost)
def ensure_application_of_updated_config(duthost, configdb_field, values):
"""
Ensures application of the JSON patch config update
Args:
duthost: DUT host object
configdb_field: config db field(s) under test
values: expected value(s) of configdb_field
"""
def _confirm_value_in_asic_db():
if(duthost.facts['asic_type'] == 'cisco-8000'):
wred_objects = duthost.shell('sonic-db-cli ASIC_DB keys *WRED*')["stdout"]
wred_objects = wred_objects.split("\n")
for wred_object in wred_objects:
wred_data = duthost.shell('sonic-db-cli ASIC_DB hgetall {}'.format(wred_object))["stdout"]
if('NULL' in wred_data):
continue
wred_data = ast.literal_eval(wred_data)
for field, value in zip(configdb_field.split(','), values.split(',')):
if value != wred_data[WRED_MAPPING[field]]:
return False
return True
return False
else:
table_name = duthost.shell('sonic-db-cli ASIC_DB keys *WRED*')["stdout"]
wred_data = duthost.shell('sonic-db-cli ASIC_DB hgetall {}'.format(table_name))["stdout"]
wred_data = ast.literal_eval(wred_data)
for field, value in zip(configdb_field.split(','), values.split(',')):
if value != wred_data[WRED_MAPPING[field]]:
return False
return True
logger.info("Validating fields in ASIC DB...")
pytest_assert(
wait_until(READ_ASICDB_TIMEOUT, READ_ASICDB_INTERVAL, 0, _confirm_value_in_asic_db),
"ASIC DB does not properly reflect newly configured field(s): {} expected value(s): {}"
.format(configdb_field, values)
)
@pytest.mark.parametrize("configdb_field", ["green_min_threshold", "green_max_threshold", "green_drop_probability",
"green_min_threshold,green_max_threshold,green_drop_probability"])
@pytest.mark.parametrize("operation", ["replace"])
def test_ecn_config_updates(duthost, METHOD_NAME, configdb_field, operation):
tmpfile = generate_tmpfile(duthost)
logger.info("tmpfile {} created for json patch of field: {} and operation: {}"
.format(tmpfile, configdb_field, operation))
json_patch = list()
values = list()
ecn_data = duthost.shell('sonic-db-cli CONFIG_DB hgetall "WRED_PROFILE|AZURE_LOSSLESS"')['stdout']
ecn_data = ast.literal_eval(ecn_data)
for field in configdb_field.split(','):
value = int(ecn_data[field]) + 1
values.append(str(value))
logger.info("value to be added to json patch: {}, operation: {}, field: {}"
.format(value, operation, field))
json_patch.append(
{"op": "{}".format(operation),
"path": "/WRED_PROFILE/AZURE_LOSSLESS/{}".format(field),
"value": "{}".format(value)})
try:
output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile)
if is_valid_platform_and_version(duthost, "WRED_PROFILE", "ECN tuning"):
expect_op_success(duthost, output)
ensure_application_of_updated_config(duthost, configdb_field, ",".join(values))
else:
expect_op_failure(output)
finally:
delete_tmpfile(duthost, tmpfile) |
1,048 | test multiple upgrades | # Copyright 2020 Least Authority TFA GmbH
# See COPYING for details.
"""
Tests for ``_zkapauthorizer.schema``.
"""
from testtools import (
TestCase,
ExpectedException,
)
from testtools.matchers import (
Equals,
MatchesStructure,
MatchesAll,
)
from hypothesis import (
given,
)
from hypothesis.strategies import (
integers,
data,
lists,
)
from sqlite3 import (
connect,
)
from .._schema import (
MAXIMUM_UPGRADES,
DatabaseSchemaTooNew,
SchemaUpgrade,
Schema,
change_user_version,
)
class SchemaTests(TestCase):
"""
Tests for ``Schema``.
"""
def test_exception_str(self):
"""
``str(DatabaseSchemaTooNew(...))`` returns a string identifying the
exception and its details.
"""
exc = DatabaseSchemaTooNew(1, 2)
self.assertThat(
"DatabaseSchemaTooNew(software_version=1, database_version=2)",
MatchesAll(
Equals(repr(exc)),
Equals(str(exc)),
),
)
@given(
integers(
min_value=MAXIMUM_UPGRADES + 1,
max_value=MAXIMUM_UPGRADES * 100,
),
)
def test_too_many_upgrades(self, num_upgrades):
"""
``Schema`` raises ``ValueError`` if initialized with a schema with more
than ``MAXIMUM_UPGRADES`` upgrades.
"""
with ExpectedException(ValueError):
Schema(
upgrades=dummy_upgrades(num_upgrades),
)
@given(
integers(min_value=0, max_value=MAXIMUM_UPGRADES),
)
def test_version(self, num_upgrades):
"""
``Schema.version`` evaluates to the version that the schema itself
defines.
"""
upgrades = dummy_upgrades(num_upgrades)
schema = Schema(upgrades=upgrades)
self.assertThat(
schema,
MatchesStructure(
version=Equals(num_upgrades),
),
)
@given(integers(min_value=0, max_value=MAXIMUM_UPGRADES))
def test_get_version_before_upgrades(self, num_upgrades):
"""
``Schema.get_version`` returns 0 when run against a new database.
"""
db = connect(":memory:")
cursor = db.cursor()
self.assertThat(
Schema(upgrades=dummy_upgrades(num_upgrades)).get_version(cursor),
Equals(0),
)
@given(
integers(min_value=0, max_value=MAXIMUM_UPGRADES),
)
def test_get_version(self, num_upgrades):
"""
``Schema.get_version`` returns the version number to which the schema has
been upgraded.
"""
upgrades = dummy_upgrades(num_upgrades)
schema = Schema(upgrades=upgrades)
db = connect(":memory:")
cursor = db.cursor()
schema.run_upgrades(cursor)
self.assertThat(
schema.get_version(cursor),
Equals(num_upgrades),
)
@given(
integers(min_value=0, max_value=MAXIMUM_UPGRADES),
integers(min_value=1, max_value=2 ** 31 - 1),
)
def test_database_newer_than_schema(self, num_upgrades, additional_versions):
"""
``Schema.run_upgrades`` raises ``DatabaseSchemaTooNew`` if initialized
with a schema with a version that is less than the version recorded in
the database.
"""
schema = Schema(upgrades=dummy_upgrades(num_upgrades))
db = connect(":memory:")
cursor = db.cursor()
# Advance to a version newer than we have.
change_user_version(
cursor,
# Don't overflow SQLite3 user_version field.
lambda old_version: min(
2 ** 31 - 1,
num_upgrades + additional_versions,
),
)
with ExpectedException(DatabaseSchemaTooNew):
schema.run_upgrades(cursor)
@given(
lists(
integers(
min_value=-2 ** 63,
max_value=2 ** 63,
),
unique=True,
min_size=1,
max_size=MAXIMUM_UPGRADES,
),
data(),
)
def test_upgrades_run(self, values, data):
"""
``Schema.run_upgrades`` executes all of the statements from the given
``SchemaUpgrade`` instances.
"""
# Pick a version at which to start the database.
current_version = data.draw(
integers(min_value=0, max_value=len(values)),
)
upgrades = list(
# Interpolating into SQL here ... bad form but I don't want to
# hand-code a bunch of unique SQL statements for this test. A
# schema upgrade would normally not have a variable in it like
# this.
SchemaUpgrade(["INSERT INTO [a] ([b]) VALUES ({})".format(value)])
for value
in values
)
schema = Schema(upgrades=upgrades)
db = connect(":memory:")
cursor = db.cursor()
# Create the table we're going to mess with.
cursor.execute("CREATE TABLE [a] ([b] INTEGER)")
# Fast-forward to the state we're going to pretend the database is at.
change_user_version(cursor, lambda old_version: current_version)
# Run whatever upgrades remain appropriate.
schema.run_upgrades(cursor)
cursor.execute("SELECT [b] FROM [a]")
selected_values = list(b for (b,) in cursor.fetchall())
self.assertThat(
selected_values,
Equals(values[current_version:]),
)
@given(
lists(
integers(min_value=1, max_value=10),
min_size=2,
max_size=100,
),
)
def METHOD_NAME(self, upgrade_groups):
"""
A database can be upgraded repeatedly over time with newer and newer
schemas.
"""
db = connect(":memory:")
cursor = db.cursor()
all_upgrades = dummy_upgrades(sum(upgrade_groups))
some_upgrades = []
for more_upgrades in upgrade_groups:
some_upgrades = some_upgrades + all_upgrades[:more_upgrades]
del all_upgrades[:more_upgrades]
schema = Schema(some_upgrades)
schema.run_upgrades(cursor)
self.assertThat(
schema.get_version(cursor),
Equals(schema.version),
)
def dummy_upgrades(count):
"""
Create ``count`` valid, executable schema upgrade objects.
The exact schema changes made aren't meant to be significant themselves.
Instead, what's interesting is the fact that they can really be executed
against a database and that each can only ever run successfully once
against a particular database.
:return [SchemaUpgrade]: The requested number of upgrades.
"""
return [
SchemaUpgrade([
"CREATE TABLE [foo_{}] ( [a] INT )".format(n),
])
for n
in range(count)
] |
1,049 | upgrader | # Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
import configparser #To check whether the appropriate exceptions are raised.
import pytest #To register tests with.
import VersionUpgrade25to26 #The module we're testing.
## Creates an instance of the upgrader to test with.
@pytest.fixture
def METHOD_NAME():
return VersionUpgrade25to26.VersionUpgrade25to26()
test_cfg_version_good_data = [
{
"test_name": "Simple",
"file_data": """[general]
version = 1
""",
"version": 1000000
},
{
"test_name": "Other Data Around",
"file_data": """[nonsense]
life = good
[general]
version = 3
[values]
layer_height = 0.12
infill_sparse_density = 42
""",
"version": 3000000
},
{
"test_name": "Negative Version", #Why not?
"file_data": """[general]
version = -20
""",
"version": -20000000
},
{
"test_name": "Setting Version",
"file_data": """[general]
version = 1
[metadata]
setting_version = 1
""",
"version": 1000001
},
{
"test_name": "Negative Setting Version",
"file_data": """[general]
version = 1
[metadata]
setting_version = -3
""",
"version": 999997
}
]
test_upgrade_preferences_removed_settings_data = [
{
"test_name": "Removed Setting",
"file_data": """[general]
visible_settings = baby;you;know;how;I;like;to;start_layers_at_same_position
""",
},
{
"test_name": "No Removed Setting",
"file_data": """[general]
visible_settings = baby;you;now;how;I;like;to;eat;chocolate;muffins
"""
},
{
"test_name": "No Visible Settings Key",
"file_data": """[general]
cura = cool
"""
},
{
"test_name": "No General Category",
"file_data": """[foos]
foo = bar
"""
}
]
## Tests whether the settings that should be removed are removed for the 2.6
# version of preferences.
@pytest.mark.parametrize("data", test_upgrade_preferences_removed_settings_data)
def test_upgradePreferencesRemovedSettings(data, METHOD_NAME):
#Get the settings from the original file.
original_parser = configparser.ConfigParser(interpolation = None)
original_parser.read_string(data["file_data"])
settings = set()
if original_parser.has_section("general") and "visible_settings" in original_parser["general"]:
settings = set(original_parser["general"]["visible_settings"].split(";"))
#Perform the upgrade.
_, upgraded_preferences = METHOD_NAME.upgradePreferences(data["file_data"], "<string>")
upgraded_preferences = upgraded_preferences[0]
#Find whether the removed setting is removed from the file now.
settings -= VersionUpgrade25to26._removed_settings
parser = configparser.ConfigParser(interpolation = None)
parser.read_string(upgraded_preferences)
assert (parser.has_section("general") and "visible_settings" in parser["general"]) == (len(settings) > 0) #If there are settings, there must also be a preference.
if settings:
assert settings == set(parser["general"]["visible_settings"].split(";"))
test_upgrade_instance_container_removed_settings_data = [
{
"test_name": "Removed Setting",
"file_data": """[values]
layer_height = 0.1337
start_layers_at_same_position = True
"""
},
{
"test_name": "No Removed Setting",
"file_data": """[values]
oceans_number = 11
"""
},
{
"test_name": "No Values Category",
"file_data": """[general]
type = instance_container
"""
}
]
## Tests whether the settings that should be removed are removed for the 2.6
# version of instance containers.
@pytest.mark.parametrize("data", test_upgrade_instance_container_removed_settings_data)
def test_upgradeInstanceContainerRemovedSettings(data, METHOD_NAME):
#Get the settings from the original file.
original_parser = configparser.ConfigParser(interpolation = None)
original_parser.read_string(data["file_data"])
settings = set()
if original_parser.has_section("values"):
settings = set(original_parser["values"])
#Perform the upgrade.
_, upgraded_container = METHOD_NAME.upgradeInstanceContainer(data["file_data"], "<string>")
upgraded_container = upgraded_container[0]
#Find whether the forbidden setting is still in the container.
settings -= VersionUpgrade25to26._removed_settings
parser = configparser.ConfigParser(interpolation = None)
parser.read_string(upgraded_container)
assert parser.has_section("values") == (len(settings) > 0) #If there are settings, there must also be the values category.
if settings:
assert settings == set(parser["values"]) |
1,050 | test convert string to bool | # Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for various errors raised by validate method of payload validator."""
from __future__ import annotations
from core.controllers import payload_validator
from core.tests import test_utils
from typing import Any, Dict, List, Tuple
class PayloadValidationUnitTests(test_utils.GenericTestBase):
def test_invalid_args_raises_exceptions(self) -> None:
# List of 3-tuples, where the first element is an invalid argument dict,
# the second element is a schema dict and the third element
# is a list of errors.
list_of_invalid_args_with_schema_and_errors: List[
Tuple[
# Here we use type Any because the first element of tuple
# represents the argument dict and those argument dicts
# can have various types of values.
Dict[str, Any],
# Here we use type Any because the second element of tuple
# represents the schema dict and those schema dicts
# can have different types of values.
Dict[str, Any],
List[str]
]
] = [
({
'exploration_id': 2
}, {
'exploration_id': {
'schema': {
'type': 'basestring'
}
}
}, [
'Schema validation for \'exploration_id\' failed: '
'Expected string, received 2']),
({
'version': 'random_string'
}, {
'version': {
'schema': {
'type': 'int'
}
}
}, [
'Schema validation for \'version\' failed: '
'Could not convert str to int: random_string']),
({
'exploration_id': 'any_exp_id'
}, {}, [
'Found extra args: [\'exploration_id\'].']),
({}, {
'exploration_id': {
'schema': {
'type': 'basestring'
}
}
}, [
'Missing key in handler args: exploration_id.'])
]
for handler_args, handler_args_schema, error_msg in (
list_of_invalid_args_with_schema_and_errors):
normalized_value, errors = (
payload_validator.validate_arguments_against_schema(
handler_args,
handler_args_schema,
allowed_extra_args=False,
allow_string_to_bool_conversion=False
)
)
self.assertEqual(normalized_value, {})
self.assertEqual(error_msg, errors)
def test_valid_args_do_not_raises_exception(self) -> None:
# List of 3-tuples, where the first element is a valid argument dict,
# the second element is a schema dict and the third element is the
# normalized value of the corresponding argument.
list_of_valid_args_with_schema: List[
Tuple[
# Here we use type Any because the first element of tuple
# represents the argument dict and those argument dicts
# can have various types of values.
Dict[str, Any],
# Here we use type Any because the second element of tuple
# represents the schema dict and those schema dicts
# can have different types of values.
Dict[str, Any],
# Here we use type Any because the third element of tuple
# represents the normalized value of the corresponding
# argument.
Dict[str, Any]
]
] = [
({}, {
'exploration_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
}
}, {}),
({}, {
'exploration_id': {
'schema': {
'type': 'basestring'
},
'default_value': 'default_exp_id'
}
}, {
'exploration_id': 'default_exp_id'
}),
({
'exploration_id': 'any_exp_id'
}, {
'exploration_id': {
'schema': {
'type': 'basestring'
}
}
}, {
'exploration_id': 'any_exp_id'
}),
({
'apply_draft': 'true'
}, {
'apply_draft': {
'schema': {
'type': 'bool',
'new_key_for_argument': 'new_key_for_apply_draft'
}
}
}, {
'new_key_for_apply_draft': True
})
]
for handler_args, handler_args_schema, normalized_value_for_args in (
list_of_valid_args_with_schema
):
normalized_value, errors = (
payload_validator.validate_arguments_against_schema(
handler_args,
handler_args_schema,
allowed_extra_args=False,
allow_string_to_bool_conversion=True
)
)
self.assertEqual(normalized_value, normalized_value_for_args)
self.assertEqual(errors, [])
class CheckConversionOfStringToBool(test_utils.GenericTestBase):
"""Test class to check behaviour of convert_string_to_bool method."""
def METHOD_NAME(self) -> None:
"""Test case to check behaviour of convert_string_to_bool method."""
self.assertTrue(
payload_validator.convert_string_to_bool('true'))
self.assertFalse(
payload_validator.convert_string_to_bool('false'))
self.assertEqual(
payload_validator.convert_string_to_bool('any_other_value'),
'any_other_value'
)
class CheckGetCorrespondingKeyForObjectMethod(test_utils.GenericTestBase):
"""Test class to check behaviour of get_corresponding_key_for_object
method."""
def test_get_new_arg_key_from_schema(self) -> None:
"""Test case to check behaviour of new arg key name."""
sample_arg_schema = {
'schema': {
'new_key_for_argument': 'sample_new_arg_name'
}
}
new_key_name = payload_validator.get_corresponding_key_for_object(
sample_arg_schema)
self.assertEqual(new_key_name, 'sample_new_arg_name')
class CheckGetSchemaTypeMethod(test_utils.GenericTestBase):
"""Test class to check behaviour of get_schema_type method."""
def test_get_schema_type_from_schema(self) -> None:
"""Test case to check behaviour of get_schema_type method."""
sample_arg_schema = {
'schema': {
'type': 'bool'
}
}
schema_type = payload_validator.get_schema_type(sample_arg_schema)
self.assertEqual(schema_type, 'bool') |
1,051 | write header | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 09 15:50:53 2016
@author: Alison Kirkby
read and write gocad objects
"""
import numpy as np
import os.path as op
import os
class Sgrid():
"""
class to read and write gocad sgrid files
need to provide:
workdir = working directory
fn = filename for the sgrid
resistivity = 3d numpy array containing resistivity values, shape (ny,nx,nz)
grid_xyz = tuple containing x,y,z locations of edges of cells for each
resistivity value. Each item in tuple has shape (ny+1,nx+1,nz+1)
"""
def __init__(self, **kwargs):
self.workdir = kwargs.pop('workdir',None)
self.fn = kwargs.pop('fn', 'model')
# set workdir to directory of fn if not None
if self.workdir is None:
print("workdir is None")
if self.fn is not None:
try:
self.workdir = os.path.dirname(self.fn)
print("setting filepath to fn path")
except:
self.workdir = '.'
self.ascii_data_file = self.fn.replace('.sg','') + '__ascii@@'
self.property_name = 'Resistivity'
self.grid_xyz = kwargs.pop('grid_xyz', None)
if self.grid_xyz is not None:
self.ncells = self.grid_xyz[0].shape
self.resistivity = kwargs.pop('resistivity', None)
self.no_data_value = -99999
def _read_header(self, headerfn=None):
"""
read header, get the following attributes and store in object
- ascii data file name
- number of cells in x, y and z direction
"""
if headerfn is not None:
self.workdir = op.dirname(headerfn)
self.fn = headerfn
if self.fn is None:
print("Cannot read, no header file name provided")
return
with open(op.join(self.workdir, self.fn)) as header:
for line in header.readlines():
if line.startswith('AXIS_N '):
self.ncells = [int(val)
for val in line.strip().split()[1:]]
for param in ['ASCII_DATA_FILE']:
if line.startswith(param):
setattr(
self,
str.lower(param),
line.strip().split()[1].replace('"',''))
def _read_ascii_data(self, ascii_data_file=None):
if self.ascii_data_file is None:
self._read_header()
asciidata = np.loadtxt(
op.join(
self.workdir,
self.ascii_data_file),
comments='*')
self.grid_xyz = [
asciidata[
:,
i].reshape(
*
self.ncells[
::-
1]).transpose(
2,
1,
0) for i in range(3)]
self.resistivity = asciidata[:, 3].reshape(
*self.ncells[::-1]).transpose(2, 1, 0)[:-1, :-1, :-1]
def read_sgrid_file(self, headerfn=None):
self._read_header(headerfn=headerfn)
self._read_ascii_data()
def METHOD_NAME(self):
ny, nx, nz = np.array(self.resistivity.shape) + 1
headerlines = [r'' + item + '\n' for item in ['GOCAD SGrid 1 ',
'HEADER {',
'name:{}'.format(
op.basename(self.fn)),
'ascii:on',
'double_precision_binary:off',
'}',
'GOCAD_ORIGINAL_COORDINATE_SYSTEM',
'NAME Default',
'AXIS_NAME "X" "Y" "Z"',
'AXIS_UNIT "m" "m" "m"',
'ZPOSITIVE Elevation',
'END_ORIGINAL_COORDINATE_SYSTEM',
'AXIS_N {} {} {} '.format(
ny, nx, nz),
'PROP_ALIGNMENT CELLS',
'ASCII_DATA_FILE {}'.format(
op.basename(self.ascii_data_file)),
'',
'',
'PROPERTY 1 "{}"'.format(
self.property_name),
'PROPERTY_CLASS 1 "{}"'.format(
self.property_name),
'PROPERTY_KIND 1 "Resistivity"',
'PROPERTY_CLASS_HEADER 1 "{}" '.format(
str.lower(self.property_name)) + '{',
'low_clip:1',
'high_clip:10000',
'pclip:99',
'colormap:flag',
'last_selected_folder:Property',
'scale_function:log10',
'*colormap*reverse:true',
'}',
'PROPERTY_SUBCLASS 1 QUANTITY Float',
'PROP_ORIGINAL_UNIT 1 ohm*m',
'PROP_UNIT 1 ohm*m',
'PROP_NO_DATA_VALUE 1 {}'.format(
self.no_data_value),
'PROP_ESIZE 1 4',
'END']]
hdrfn = os.path.join(self.workdir,self.fn)
if not hdrfn.endswith('.sg'):
hdrfn += '.sg'
print("saving sgrid to ",hdrfn)
with open(hdrfn, 'w') as hdrfile:
hdrfile.writelines(headerlines)
def _write_data(self):
resmodel = np.ones(
np.array(
self.resistivity.shape) + 1) * self.no_data_value
resmodel[:-1, :-1, :-1] = self.resistivity
resvals = resmodel.transpose(2, 1, 0).flatten()
x, y, z = [arr.transpose(2, 1, 0).flatten() for arr in self.grid_xyz]
ny, nx, nz = self.grid_xyz[0].shape
j, i, k = [arr.transpose(2, 1, 0).flatten() for arr in
np.meshgrid(*[np.arange(ll) for ll in [nx, ny, nz]])]
# make an array containing the data
data = np.vstack([x, y, z, resvals, i, j, k]).T
# make data header
datahdr = '\n X Y Z {} I J K\n'.format(self.property_name)
# write property values
np.savetxt(
os.path.join(self.workdir,self.ascii_data_file),
data,
header=datahdr,
comments='*',
fmt=['%10.6f'] *
4 +
['%10i'] *
3)
def write_sgrid_file(self):
self.METHOD_NAME()
self._write_data() |
1,052 | test list plugins disabled | """
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import pytest
from mock import patch
from kolibri.plugins.utils import autoremove_unavailable_plugins
from kolibri.utils import cli
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
"""
Monkeypatching for logging.Logger._log to scoop up log messages if we wanna
test something specific was logged.
"""
LOG_LOGGER.append((LEVEL, msg))
# Call the original function
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, "__log", logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, "_log", log_logger)
@pytest.fixture
def plugins():
from kolibri import plugins
_, config_file = tempfile.mkstemp(suffix="json")
old_config_file = plugins.conf_file
plugins.conf_file = config_file
plugins.config.set_defaults()
yield plugins
plugins.conf_file = old_config_file
@patch("kolibri.plugins.registry.is_initialized", return_value=False)
def test_bogus_plugin_autoremove(plugins):
"""
Checks that a plugin is auto-removed when it cannot be imported
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
@patch("kolibri.plugins.registry.is_initialized", return_value=False)
def test_bogus_plugin_autoremove_no_path(plugins):
"""
Checks that a plugin without a dotted path is also auto-removed
"""
plugin_name = "giraffehorse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
disabled_apps_before = plugins.config["DISABLED_PLUGINS"].copy()
try:
cli.disable.callback(("i_do_not_exist",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
assert disabled_apps_before == plugins.config["DISABLED_PLUGINS"]
def test_plugin_cannot_be_imported_disable(plugins):
"""
A plugin may be in plugins.config['INSTALLED_PLUGINS'] but broken or uninstalled
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
try:
cli.disable.callback((plugin_name,), False)
except Exception:
pass
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
# We also don't want to endlessly add cruft to the disabled apps
assert plugin_name not in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable_twice(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_plugin_with_no_plugin_class(plugins):
"""
Expected behavior is that nothing blows up with exceptions, user just gets
a warning and nothing is enabled or changed in the configuration.
"""
# For fun, we pass in a system library
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
try:
cli.enable.callback(("os.path",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
@pytest.mark.django_db
def test_kolibri_listen_port_env(monkeypatch):
"""
Starts and stops the server, mocking the actual server.start()
Checks that the correct fallback port is used from the environment.
"""
with patch("django.core.management.call_command"), patch(
"kolibri.utils.server.start"
) as start:
from kolibri.utils import server
def start_mock(port, *args, **kwargs):
assert port == test_port
try:
os.remove(server.PID_FILE)
except OSError:
pass
activate_log_logger(monkeypatch)
start.side_effect = start_mock
test_port = 1234
test_zip_port = 1432
os.environ["KOLIBRI_HTTP_PORT"] = str(test_port)
# force a reload of plugins.OPTIONS so the environment variable will be read in
from kolibri.utils import conf
conf.OPTIONS.update(options.read_options_file())
cli.start.callback(test_port, test_zip_port, False)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
# Stop the server AGAIN, asserting that we can call the stop command
# on an already stopped server and will be gracefully informed about
# it.
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
assert "Already stopped" in LOG_LOGGER[-1][1]
def status_starting_up():
raise server.NotRunning(server.STATUS_STARTING_UP)
# Ensure that if a server is reported to be 'starting up', it still
# successfully shuts down.
monkeypatch.setattr(server, "get_status", status_starting_up)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == server.STATUS_STARTING_UP
assert "successfully been stopped" in LOG_LOGGER[-1][1]
def test_cli_usage():
# Test the -h
with pytest.raises(SystemExit) as excinfo:
cli.main("-h")
assert excinfo.code == 0
with pytest.raises(SystemExit) as excinfo:
cli.main("--version")
assert excinfo.code == 0
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
any(
map(
lambda x: test_plugin in x[0] and "ENABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli.click.echo")
def METHOD_NAME(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
cli.disable.callback((test_plugin,), False)
any(
map(
lambda x: test_plugin in x[0] and "DISABLED" in x[0],
echo_mock.call_args_list,
)
) |
1,053 | raise not implemented | import functools
from typing import NoReturn
from dbt.events.functions import warn_or_error
from dbt.events.types import JinjaLogWarning
from dbt.exceptions import (
DbtRuntimeError,
MissingConfigError,
MissingMaterializationError,
MissingRelationError,
AmbiguousAliasError,
AmbiguousCatalogMatchError,
CacheInconsistencyError,
DataclassNotDictError,
CompilationError,
DbtDatabaseError,
DependencyNotFoundError,
DependencyError,
DuplicatePatchPathError,
DuplicateResourceNameError,
PropertyYMLError,
NotImplementedError,
RelationWrongTypeError,
ContractError,
ColumnTypeMissingError,
FailFastError,
scrub_secrets,
env_secrets,
)
def warn(msg, node=None):
warn_or_error(JinjaLogWarning(msg=msg), node=node)
return ""
def missing_config(model, name) -> NoReturn:
raise MissingConfigError(unique_id=model.unique_id, name=name)
def missing_materialization(model, adapter_type) -> NoReturn:
raise MissingMaterializationError(
materialization=model.config.materialized, adapter_type=adapter_type
)
def missing_relation(relation, model=None) -> NoReturn:
raise MissingRelationError(relation, model)
def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn:
raise AmbiguousAliasError(node_1, node_2, duped_name)
def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn:
raise AmbiguousCatalogMatchError(unique_id, match_1, match_2)
def raise_cache_inconsistent(message) -> NoReturn:
raise CacheInconsistencyError(message)
def raise_dataclass_not_dict(obj) -> NoReturn:
raise DataclassNotDictError(obj)
def raise_compiler_error(msg, node=None) -> NoReturn:
raise CompilationError(msg, node)
def raise_contract_error(yaml_columns, sql_columns) -> NoReturn:
raise ContractError(yaml_columns, sql_columns)
def raise_database_error(msg, node=None) -> NoReturn:
raise DbtDatabaseError(msg, node)
def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn:
raise DependencyNotFoundError(node, node_description, required_pkg)
def raise_dependency_error(msg) -> NoReturn:
raise DependencyError(scrub_secrets(msg, env_secrets()))
def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn:
raise DuplicatePatchPathError(patch_1, existing_patch_path)
def raise_duplicate_resource_name(node_1, node_2) -> NoReturn:
raise DuplicateResourceNameError(node_1, node_2)
def raise_invalid_property_yml_version(path, issue) -> NoReturn:
raise PropertyYMLError(path, issue)
def METHOD_NAME(msg) -> NoReturn:
raise NotImplementedError(msg)
def relation_wrong_type(relation, expected_type, model=None) -> NoReturn:
raise RelationWrongTypeError(relation, expected_type, model)
def column_type_missing(column_names) -> NoReturn:
raise ColumnTypeMissingError(column_names)
def raise_fail_fast_error(msg, node=None) -> NoReturn:
raise FailFastError(msg, node=node)
# Update this when a new function should be added to the
# dbt context's `exceptions` key!
CONTEXT_EXPORTS = {
fn.__name__: fn
for fn in [
warn,
missing_config,
missing_materialization,
missing_relation,
raise_ambiguous_alias,
raise_ambiguous_catalog_match,
raise_cache_inconsistent,
raise_dataclass_not_dict,
raise_compiler_error,
raise_database_error,
raise_dep_not_found,
raise_dependency_error,
raise_duplicate_patch_name,
raise_duplicate_resource_name,
raise_invalid_property_yml_version,
METHOD_NAME,
relation_wrong_type,
raise_contract_error,
column_type_missing,
raise_fail_fast_error,
]
}
# wraps context based exceptions in node info
def wrapper(model):
def wrap(func):
@functools.wraps(func)
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except DbtRuntimeError as exc:
exc.add_node(model)
raise exc
return inner
return wrap
def wrapped_exports(model):
wrap = wrapper(model)
return {name: wrap(export) for name, export in CONTEXT_EXPORTS.items()} |
1,054 | method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet-gateway delete",
)
class Delete(AAZCommand):
"""Delete a virtual network gateway.
In order to delete a Virtual Network Gateway, you must first delete ALL Connection objects in Azure that are connected to the Gateway. After deleting the Gateway, proceed to delete other resources now not in use. For more information, follow the order of instructions on this page: https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-delete-vnet-gateway-portal
:example: Delete a virtual network gateway.
az network vnet-gateway delete -g MyResourceGroup -n MyVnetGateway
"""
_aaz_info = {
"version": "2017-10-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworkgateways/{}", "2017-10-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the VNet gateway.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewaysDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualNetworkGatewaysDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-10-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
1,055 | test asfortranarray3 | import unittest
import pytest
import numpy
import cupy
from cupy import testing
class TestKind(unittest.TestCase):
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_chkfinite(self, xp, dtype, order):
a = [0, 4, 0, 5]
return xp.asarray_chkfinite(a, dtype=dtype, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes(no_bool=True)
def test_asarray_chkfinite_non_finite_vals(self, dtype, order):
a = [-numpy.inf, 0., numpy.inf, numpy.nan]
for xp in (numpy, cupy):
if xp.issubdtype(dtype, xp.integer):
error = OverflowError
else:
error = ValueError
with pytest.raises(error):
xp.asarray_chkfinite(a, dtype=dtype, order=order)
@testing.for_all_dtypes()
def test_asfarray(self, dtype):
a = cupy.asarray([1, 2, 3])
a_gpu = cupy.asfarray(a, dtype)
a_cpu = numpy.asfarray(a.get(), dtype)
assert a_cpu.dtype == a_gpu.dtype
@testing.for_all_dtypes()
def test_asfortranarray1(self, dtype):
def func(xp):
x = xp.zeros((2, 3), dtype)
ret = xp.asfortranarray(x)
assert x.flags.c_contiguous
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_asfortranarray2(self, dtype):
def func(xp):
x = xp.zeros((2, 3, 4), dtype)
ret = xp.asfortranarray(x)
assert x.flags.c_contiguous
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def METHOD_NAME(self, dtype):
def func(xp):
x = xp.zeros((2, 3, 4), dtype)
ret = xp.asfortranarray(xp.asfortranarray(x))
assert x.flags.c_contiguous
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_asfortranarray4(self, dtype):
def func(xp):
x = xp.zeros((2, 3), dtype)
x = xp.transpose(x, (1, 0))
ret = xp.asfortranarray(x)
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_asfortranarray5(self, dtype):
def func(xp):
x = testing.shaped_arange((2, 3), xp, dtype)
ret = xp.asfortranarray(x)
assert x.flags.c_contiguous
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_require_flag_check(self, dtype):
possible_flags = [['C_CONTIGUOUS'], ['F_CONTIGUOUS']]
x = cupy.zeros((2, 3, 4), dtype)
for flags in possible_flags:
arr = cupy.require(x, dtype, flags)
for parameter in flags:
assert arr.flags[parameter]
assert arr.dtype == dtype
@testing.for_all_dtypes()
def test_require_owndata(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
arr = x.view()
arr = cupy.require(arr, dtype, ['O'])
assert arr.flags['OWNDATA']
@testing.for_all_dtypes()
def test_require_C_and_F_flags(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
with pytest.raises(ValueError):
cupy.require(x, dtype, ['C', 'F'])
@testing.for_all_dtypes()
def test_require_incorrect_requirments(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
with pytest.raises(ValueError):
cupy.require(x, dtype, ['W'])
@testing.for_all_dtypes()
def test_require_incorrect_dtype(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
with pytest.raises(ValueError):
cupy.require(x, 'random', 'C')
@testing.for_all_dtypes()
def test_require_empty_requirements(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
x = cupy.require(x, dtype, [])
assert x.flags['C_CONTIGUOUS'] |
1,056 | get label | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
import torch.nn.functional as F
from mmcv.transforms import to_tensor
from mmengine.structures import LabelData
from mmaction.models import CutmixBlending, MixupBlending, RandomBatchAugment
from mmaction.structures import ActionDataSample
def METHOD_NAME(label_):
label = []
for idx, one_label in enumerate(label_):
data_sample = ActionDataSample()
data_sample.gt_labels = LabelData(item=label_[idx])
label.append(data_sample)
return label
def test_mixup():
alpha = 0.2
num_classes = 10
label = METHOD_NAME([to_tensor(x) for x in range(4)])
mixup = MixupBlending(num_classes, alpha)
# NCHW imgs
imgs = torch.randn(4, 4, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert len(mixed_label) == 4
# NCTHW imgs
imgs = torch.randn(4, 4, 2, 3, 32, 32)
label = METHOD_NAME([to_tensor(x) for x in range(4)])
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert len(mixed_label) == 4
# multi-label with one-hot tensor as label
imgs = torch.randn(4, 4, 2, 3, 32, 32)
label = METHOD_NAME(F.one_hot(torch.arange(4), num_classes=num_classes))
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert len(mixed_label) == 4
def test_cutmix():
alpha = 0.2
num_classes = 10
label = METHOD_NAME([to_tensor(x) for x in range(4)])
cutmix = CutmixBlending(num_classes, alpha)
# NCHW imgs
imgs = torch.randn(4, 4, 3, 32, 32)
mixed_imgs, mixed_label = cutmix(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert len(mixed_label) == 4
# NCTHW imgs
imgs = torch.randn(4, 4, 2, 3, 32, 32)
label = METHOD_NAME([to_tensor(x) for x in range(4)])
mixed_imgs, mixed_label = cutmix(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert len(mixed_label) == 4
# multi-label with one-hot tensor as label
imgs = torch.randn(4, 4, 2, 3, 32, 32)
label = METHOD_NAME(F.one_hot(torch.arange(4), num_classes=num_classes))
mixed_imgs, mixed_label = cutmix(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert len(mixed_label) == 4
def test_rand_blend():
alpha_mixup = 0.2
alpha_cutmix = 0.2
num_classes = 10
label = METHOD_NAME([to_tensor(x) for x in range(4)])
blending_augs = [
dict(type='MixupBlending', alpha=alpha_mixup, num_classes=num_classes),
dict(
type='CutmixBlending', alpha=alpha_cutmix, num_classes=num_classes)
]
# test assertion
with pytest.raises(AssertionError):
rand_mix = RandomBatchAugment(blending_augs, [0.5, 0.6])
# mixup, cutmix
rand_mix = RandomBatchAugment(blending_augs, probs=None)
assert rand_mix.probs is None
# mixup, cutmix and None
probs = [0.5, 0.4]
rand_mix = RandomBatchAugment(blending_augs, probs)
np.testing.assert_allclose(rand_mix.probs[-1], 0.1)
# test call
imgs = torch.randn(4, 4, 3, 32, 32) # NCHW imgs
mixed_imgs, mixed_label = rand_mix(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert len(mixed_label) == 4
imgs = torch.randn(4, 4, 2, 3, 32, 32) # NCTHW imgs
label = METHOD_NAME([to_tensor(x) for x in range(4)])
mixed_imgs, mixed_label = rand_mix(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert len(mixed_label) == 4
# multi-label with one-hot tensor as label
imgs = torch.randn(4, 4, 2, 3, 32, 32)
label = METHOD_NAME(F.one_hot(torch.arange(4), num_classes=num_classes))
mixed_imgs, mixed_label = rand_mix(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert len(mixed_label) == 4 |
1,057 | is match | # -*- coding: utf-8 -*-
import json
import os
import unittest
from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnValidator
from SpiffWorkflow.task import TaskState
from SpiffWorkflow.bpmn.serializer.workflow import BpmnWorkflowSerializer, DEFAULT_SPEC_CONFIG
from .BpmnLoaderForTests import TestUserTaskConverter, TestBpmnParser, TestDataStoreConverter
__author__ = 'matth'
DEFAULT_SPEC_CONFIG['task_specs'].append(TestUserTaskConverter)
DEFAULT_SPEC_CONFIG['task_specs'].append(TestDataStoreConverter)
wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter(spec_config=DEFAULT_SPEC_CONFIG)
class BpmnWorkflowTestCase(unittest.TestCase):
serializer = BpmnWorkflowSerializer(wf_spec_converter)
def get_parser(self, filename, validate=True):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
validator = BpmnValidator() if validate else None
parser = TestBpmnParser(validator=validator)
parser.add_bpmn_files_by_glob(f)
return parser
def load_workflow_spec(self, filename, process_name, validate=True):
parser = self.get_parser(filename, validate)
top_level_spec = parser.get_spec(process_name)
subprocesses = parser.get_subprocess_specs(process_name)
return top_level_spec, subprocesses
def load_collaboration(self, filename, collaboration_name):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
parser = TestBpmnParser()
parser.add_bpmn_files_by_glob(f)
return parser.get_collaboration(collaboration_name)
def get_all_specs(self, filename):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
parser = TestBpmnParser()
parser.add_bpmn_files_by_glob(f)
return parser.find_all_specs()
def do_next_exclusive_step(self, step_name, with_save_load=False, set_attribs=None, choice=None):
if with_save_load:
self.save_restore_all()
self.workflow.do_engine_steps()
tasks = self.workflow.get_tasks(TaskState.READY)
self._do_single_step(step_name, tasks, set_attribs, choice)
def do_next_named_step(self, step_name, with_save_load=False, set_attribs=None, choice=None, only_one_instance=True):
if with_save_load:
self.save_restore()
self.workflow.do_engine_steps()
step_name_path = step_name.split("|")
def switch_workflow(p):
for task_id, sp in p.workflow._get_outermost_workflow().subprocesses.items():
if p in sp.get_tasks(workflow=sp):
return p.workflow.get_task_from_id(task_id)
def METHOD_NAME(t):
if not (t.task_spec.name == step_name_path[-1] or t.task_spec.bpmn_name == step_name_path[-1]):
return False
for parent_name in step_name_path[:-1]:
p = t.parent
found = False
while (p and p != p.parent):
if (p.task_spec.name == parent_name or p.task_spec.bpmn_name == parent_name):
found = True
break
if p.parent is None and p.workflow != p.workflow.parent:
p = switch_workflow(p)
else:
p = p.parent
if not found:
return False
return True
tasks = [t for t in self.workflow.get_tasks(TaskState.READY) if METHOD_NAME(t)]
self._do_single_step(
step_name_path[-1], tasks, set_attribs, choice, only_one_instance=only_one_instance)
def assertTaskNotReady(self, step_name):
tasks = list([t for t in self.workflow.get_tasks(TaskState.READY)
if t.task_spec.name == step_name or t.task_spec.bpmn_name == step_name])
self.assertEqual([], tasks)
def _do_single_step(self, step_name, tasks, set_attribs=None, choice=None, only_one_instance=True):
if only_one_instance:
self.assertEqual(
len(tasks), 1, 'Did not find one task for \'%s\' (got %d)' % (step_name, len(tasks)))
else:
self.assertNotEqual(
len(tasks), 0, 'Did not find any tasks for \'%s\'' % (step_name))
self.assertTrue(
tasks[0].task_spec.name == step_name or tasks[
0].task_spec.bpmn_name == step_name,
'Expected step %s, got %s (%s)' % (step_name, tasks[0].task_spec.bpmn_name, tasks[0].task_spec.name))
if not set_attribs:
set_attribs = {}
if choice:
set_attribs['choice'] = choice
if set_attribs:
tasks[0].set_data(**set_attribs)
tasks[0].run()
def save_restore(self):
script_engine = self.workflow.script_engine
before_state = self._get_workflow_state(do_steps=False)
before_dump = self.workflow.get_dump()
# Check that we can actully convert this to JSON
json_str = json.dumps(before_state)
after = self.serializer.workflow_from_dict(json.loads(json_str))
# Check that serializing and deserializing results in the same workflow
after_state = self.serializer.workflow_to_dict(after)
after_dump = after.get_dump()
self.maxDiff = None
self.assertEqual(before_dump, after_dump)
self.assertEqual(before_state, after_state)
self.workflow = after
self.workflow.script_engine = script_engine
def restore(self, state):
self.workflow = self.serializer.workflow_from_dict(state)
def _get_workflow_state(self, do_steps=True):
if do_steps:
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
return self.serializer.workflow_to_dict(self.workflow) |
1,058 | setup | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from . import lang_EU
ZERO = 'nulla'
class Num2Word_HU(lang_EU.Num2Word_EU):
GIGA_SUFFIX = "illiárd"
MEGA_SUFFIX = "illió"
def METHOD_NAME(self):
super(Num2Word_HU, self).METHOD_NAME()
self.negword = "mínusz "
self.pointword = "egész"
self.mid_numwords = [(1000, "ezer"), (100, "száz"), (90, "kilencven"),
(80, "nyolcvan"), (70, "hetven"), (60, "hatvan"),
(50, "ötven"), (40, "negyven"), (30, "harminc")]
low_numwords = ["kilenc", "nyolc", "hét", "hat", "öt", "négy", "három",
"kettő", "egy"]
self.low_numwords = (['tizen' + w for w in low_numwords]
+ ['tíz']
+ low_numwords)
self.low_numwords = (['huszon' + w for w in low_numwords]
+ ['húsz']
+ self.low_numwords
+ [ZERO])
self.partial_ords = {
'nulla': 'nullad',
'egy': 'egyed',
'kettő': 'ketted',
'három': 'harmad',
'négy': 'negyed',
'öt': 'ötöd',
'hat': 'hatod',
'hét': 'heted',
'nyolc': 'nyolcad',
'kilenc': 'kilenced',
'tíz': 'tized',
'húsz': 'huszad',
'harminc': 'harmincad',
'negyven': 'negyvened',
'ötven': 'ötvened',
'hatvan': 'hatvanad',
'hetven': 'hetvened',
'nyolcvan': 'nyolcvanad',
'kilencven': 'kilencvened',
'száz': 'század',
'ezer': 'ezred',
'illió': 'milliomod',
'illiárd': 'milliárdod'
}
def to_cardinal(self, value, zero=ZERO):
if int(value) != value:
return self.to_cardinal_float(value)
elif value < 0:
out = self.negword + self.to_cardinal(-value)
elif value == 0:
out = zero
elif zero == '' and value == 2:
out = 'két'
elif value < 30:
out = self.cards[value]
elif value < 100:
out = self.tens_to_cardinal(value)
elif value < 1000:
out = self.hundreds_to_cardinal(value)
elif value < 10**6:
out = self.thousands_to_cardinal(value)
else:
out = self.big_number_to_cardinal(value)
return out
def tens_to_cardinal(self, value):
try:
return self.cards[value]
except KeyError:
return self.cards[value // 10 * 10] + self.to_cardinal(value % 10)
def hundreds_to_cardinal(self, value):
hundreds = value // 100
prefix = "száz"
if hundreds != 1:
prefix = self.to_cardinal(hundreds, zero="") + prefix
postfix = self.to_cardinal(value % 100, zero="")
return prefix + postfix
def thousands_to_cardinal(self, value):
thousands = value // 1000
prefix = "ezer"
if thousands != 1:
prefix = self.to_cardinal(thousands, zero="") + prefix
postfix = self.to_cardinal(value % 1000, zero="")
return prefix + ('' if value <= 2000 or not postfix else '-') + postfix
def big_number_to_cardinal(self, value):
digits = len(str(value))
digits = digits if digits % 3 != 0 else digits - 2
exp = 10 ** (digits // 3 * 3)
rest = self.to_cardinal(value % exp, '')
return (self.to_cardinal(value // exp, '') + self.cards[exp]
+ ('-' + rest if rest else ''))
def to_ordinal(self, value):
if value < 0:
return self.negword + self.to_ordinal(-value)
if value == 1:
return 'első'
elif value == 2:
return 'második'
else:
out = self.to_cardinal(value)
for card_word, ord_word in self.partial_ords.items():
if out[-len(card_word):] == card_word:
out = out[:-len(card_word)] + ord_word
break
return out + 'ik'
def to_ordinal_num(self, value):
self.verify_ordinal(value)
return str(value) + '.'
def to_year(self, val, suffix=None, longval=True):
# suffix is prefix here
prefix = ''
if val < 0 or suffix is not None:
val = abs(val)
prefix = (suffix + ' ' if suffix is not None else 'i. e. ')
return prefix + self.to_cardinal(val)
def to_currency(self, val, currency='HUF', cents=True, separator=',',
adjective=False):
return super(Num2Word_HU, self).to_currency(
val, currency, cents, separator, adjective)
def to_cardinal_float(self, value):
if abs(value) != value:
return self.negword + self.to_cardinal_float(-value)
left, right = str(value).split('.')
return (self.to_cardinal(int(left))
+ ' egész '
+ self.to_cardinal(int(right))
+ ' ' + self.partial_ords[self.cards[10 ** len(right)]]) |
1,059 | expect none | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import sys
import unittest
from ZPublisher.HTTPRangeSupport import expandRanges
from ZPublisher.HTTPRangeSupport import parseRange
class TestRangeHeaderParse(unittest.TestCase):
# Utility methods
def METHOD_NAME(self, header):
result = parseRange(header)
self.assertTrue(result is None, 'Expected None, got %r' % result)
def expectSets(self, header, sets):
result = parseRange(header)
self.assertTrue(
result == sets,
f'Expected {sets!r}, got {result!r}')
# Syntactically incorrect headers
def testGarbage(self):
self.METHOD_NAME('kjahskjhdfkgkjbnbb ehgdk dsahg wlkjew lew\n =lkdskue')
def testIllegalSpec(self):
self.METHOD_NAME('notbytes=0-1000')
def testNoSets(self):
self.METHOD_NAME('bytes=')
def testEmptySets(self):
self.METHOD_NAME('bytes=,,,')
def testIllegalRange(self):
self.METHOD_NAME('bytes=foo-bar')
def testAlmostIntegers(self):
self.METHOD_NAME('bytes=1.0-2.0')
def testEndLowerThanStart(self):
self.METHOD_NAME('bytes=5-4')
# Correct headers
def testSimpleRange(self):
self.expectSets('bytes=2-20', [(2, 21)])
def testSimpleRangeAndEmpty(self):
self.expectSets('bytes=,2-20,', [(2, 21)])
def testSuffixRange(self):
self.expectSets('bytes=-100', [(-100, None)])
def testOpenEnded(self):
self.expectSets('bytes=100-', [(100, None)])
def testStartEqualsEnd(self):
self.expectSets('bytes=100-100', [(100, 101)])
def testMultiple(self):
self.expectSets(
'bytes=-100,,1-2,20-',
[(-100, None), (1, 3), (20, None)])
def testFirstByte(self):
self.expectSets('bytes=0-0', [(0, 1)])
def testNegativeZero(self):
self.expectSets('bytes=-0', [(sys.maxsize, None)])
class TestExpandRanges(unittest.TestCase):
def expectSets(self, sets, size, expect):
result = expandRanges(sets, size)
self.assertTrue(
result == expect,
f'Expected {expect!r}, got {result!r}')
def testExpandOpenEnd(self):
self.expectSets([(1, 2), (5, None)], 50, [(1, 2), (5, 50)])
def testMakeAbsolute(self):
self.expectSets([(1, 2), (-5, None)], 50, [(1, 2), (45, 50)])
def testNoOverlapInOrder(self):
self.expectSets(
[(1, 5), (1000, 2000), (3000, None)], 5000,
[(1, 5), (1000, 2000), (3000, 5000)])
def testNoOverlapOutOfOrder(self):
self.expectSets(
[(1000, 2000), (3000, None), (1, 5)], 5000,
[(1000, 2000), (3000, 5000), (1, 5)])
def testOverlapInOrder(self):
self.expectSets(
[(1, 10), (8, 20), (25, None)], 5000,
[(1, 10), (8, 20), (25, 5000)])
def testOverlapOutOfOrder(self):
self.expectSets(
[(25, 50), (8, None), (1, 10)], 5000,
[(25, 50), (8, 5000), (1, 10)])
def testAdjacentInOrder(self):
self.expectSets(
[(1, 10), (10, 20), (25, 50)], 5000,
[(1, 10), (10, 20), (25, 50)])
def testAdjacentOutOfOrder(self):
self.expectSets([(-5, None), (40, 45)], 50, [(45, 50), (40, 45)])
def testOverlapAndOverflow(self):
# Note that one endpoint lies beyond the end.
self.expectSets([(-5, None), (40, 100)], 50, [(45, 50), (40, 50)])
def testRemoveUnsatisfiable(self):
self.expectSets([(sys.maxsize, None), (10, 20)], 50, [(10, 20)]) |
1,060 | bootstrap cc binary | """Macros that implement bootstrapping for the upb code generator."""
load(
"//bazel:upb_proto_library.bzl",
"upb_proto_library",
)
load(
"//cmake:build_defs.bzl",
"staleness_test",
)
_stages = ["_stage0", "_stage1", ""]
_protoc = "@com_google_protobuf//:protoc"
_upbc_base = "//upbc:protoc-gen-upb"
# begin:google_only
# _is_google3 = True
# _extra_proto_path = ""
# end:google_only
# begin:github_only
_is_google3 = False
_extra_proto_path = "-I$$(dirname $(location @com_google_protobuf//:descriptor_proto_srcs))/../.. "
# end:github_only
def _upbc(stage):
return _upbc_base + _stages[stage]
def bootstrap_cc_library(name, visibility, deps, bootstrap_deps, **kwargs):
for stage in _stages:
stage_visibility = visibility if stage == "" else ["//upbc:__pkg__"]
native.cc_library(
name = name + stage,
deps = deps + [dep + stage for dep in bootstrap_deps],
visibility = stage_visibility,
**kwargs
)
def METHOD_NAME(name, deps, bootstrap_deps, **kwargs):
for stage in _stages:
native.cc_binary(
name = name + stage,
deps = deps + [dep + stage for dep in bootstrap_deps],
**kwargs
)
def _generated_srcs_for_suffix(prefix, srcs, suffix):
return [prefix + "/" + src[:-len(".proto")] + suffix for src in srcs]
def _generated_srcs(prefix, srcs):
return _generated_srcs_for_suffix(prefix, srcs, ".upb.h") + _generated_srcs_for_suffix(prefix, srcs, ".upb.c")
def _stage0_proto_staleness_test(name, base_dir, src_files, src_rules, strip_prefix):
native.genrule(
name = name + "_generate_bootstrap",
srcs = src_rules,
outs = _generated_srcs("bootstrap_generated_sources/" + base_dir + "stage0", src_files),
tools = [_protoc, _upbc(0)],
cmd =
"$(location " + _protoc + ") " +
"-I$(GENDIR)/" + strip_prefix + " " + _extra_proto_path +
"--plugin=protoc-gen-upb=$(location " + _upbc(0) + ") " +
"--upb_out=bootstrap_upb:$(@D)/bootstrap_generated_sources/" + base_dir + "stage0 " +
" ".join(src_files),
)
staleness_test(
name = name + "_staleness_test",
outs = _generated_srcs(base_dir + "stage0", src_files),
generated_pattern = "bootstrap_generated_sources/%s",
target_files = native.glob([base_dir + "stage0/**"]),
# To avoid skew problems for descriptor.proto/pluging.proto between
# GitHub repos. It's not critical that the checked-in protos are up to
# date for every change, they just needs to be complete enough to have
# everything needed by the code generator itself.
tags = ["manual"],
)
def bootstrap_upb_proto_library(
name,
base_dir,
google3_src_files,
google3_src_rules,
oss_src_files,
oss_src_rules,
oss_strip_prefix,
proto_lib_deps,
visibility,
deps = [],
**kwargs):
"""A version of upb_proto_library() that is augmented to allow for bootstrapping the compiler.
Args:
name: Name of this rule. This name will resolve to a upb_proto_library().
base_dir: The directory that all generated files should be placed under.
google3_src_files: Google3 filenames of .proto files that should be built by this rule.
The names should be relative to the depot base.
google3_src_rules: Target names of the Blaze rules that will provide these filenames.
oss_src_files: OSS filenames of .proto files that should be built by this rule.
oss_src_rules: Target names of the Bazel rules that will provide these filenames.
oss_strip_prefix: Prefix that should be stripped from OSS file names.
proto_lib_deps: proto_library() rules that we will use to build the protos when we are
not bootstrapping.
visibility: Visibility list for the final upb_proto_library() rule. Bootstrapping rules
will always be hidden, and will not honor the visibility parameter passed here.
deps: other bootstrap_upb_proto_library() rules that this one depends on.
**kwargs: Other arguments that will be passed through to cc_library(), genrule(), and
upb_proto_library().
"""
_stage0_proto_staleness_test(name, base_dir, oss_src_files, oss_src_rules, oss_strip_prefix)
# stage0 uses checked-in protos.
native.cc_library(
name = name + "_stage0",
srcs = _generated_srcs_for_suffix(base_dir + "stage0", oss_src_files, ".upb.c"),
hdrs = _generated_srcs_for_suffix(base_dir + "stage0", oss_src_files, ".upb.h"),
includes = [base_dir + "stage0"],
visibility = ["//upbc:__pkg__"],
# This macro signals to the runtime that it must use OSS APIs for descriptor.proto/plugin.proto.
defines = ["UPB_BOOTSTRAP_STAGE0"],
deps = [
"//:generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me",
"//:mini_table",
] + [dep + "_stage0" for dep in deps],
**kwargs
)
src_files = google3_src_files if _is_google3 else oss_src_files
src_rules = google3_src_rules if _is_google3 else oss_src_rules
# Generate stage1 protos using stage0 compiler.
native.genrule(
name = "gen_" + name + "_stage1",
srcs = src_rules,
outs = _generated_srcs(base_dir + "stage1", src_files),
cmd = "$(location " + _protoc + ") " +
"--plugin=protoc-gen-upb=$(location " + _upbc(0) + ") " + _extra_proto_path +
"--upb_out=$(@D)/" + base_dir + "stage1 " +
" ".join(src_files),
visibility = ["//upbc:__pkg__"],
tools = [
_protoc,
_upbc(0),
],
**kwargs
)
native.cc_library(
name = name + "_stage1",
srcs = _generated_srcs_for_suffix(base_dir + "stage1", src_files, ".upb.c"),
hdrs = _generated_srcs_for_suffix(base_dir + "stage1", src_files, ".upb.h"),
includes = [base_dir + "stage1"],
visibility = ["//upbc:__pkg__"],
deps = [
"//:generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me",
] + [dep + "_stage1" for dep in deps],
**kwargs
)
# The final protos are generated via normal upb_proto_library().
upb_proto_library(
name = name,
deps = proto_lib_deps,
visibility = visibility,
**kwargs
) |
1,061 | test random pauli measurement output types | # Copyright (C) Unitary Fund
#
# This source code is licensed under the GPL license (v3) found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for quantum processing functions for classical shadows."""
import importlib
from typing import Callable, List
from unittest.mock import patch
import cirq
import cirq.testing
import pytest
from qiskit_aer import Aer
import mitiq
from mitiq import MeasurementResult
from mitiq.interface.mitiq_cirq.cirq_utils import (
sample_bitstrings as cirq_sample_bitstrings,
)
from mitiq.interface.mitiq_qiskit.conversions import to_qiskit
from mitiq.interface.mitiq_qiskit.qiskit_utils import (
sample_bitstrings as qiskit_sample_bitstrings,
)
from mitiq.shadows.quantum_processing import (
generate_random_pauli_strings,
get_rotated_circuits,
random_pauli_measurement,
)
def test_tqdm_import_available():
# Test the case where tqdm is available
import tqdm as tqdm_orig
assert tqdm_orig is not None
assert mitiq.shadows.quantum_processing.tqdm is not None
def test_tqdm_import_not_available():
with patch.dict("sys.modules", {"tqdm": None}):
importlib.reload(
mitiq.shadows.quantum_processing
) # Reload the module to trigger the import
assert mitiq.shadows.quantum_processing.tqdm is None
def test_generate_random_pauli_strings():
"""Tests that the function generates random Pauli strings."""
num_qubits = 5
num_strings = 10
# Generate random pauli strings
result = generate_random_pauli_strings(num_qubits, num_strings)
# Check that the result is a list
assert isinstance(result, List)
# Check that the number of strings matches the input
assert len(result) == num_strings
# Check that each string has the right number of qubits
for pauli_string in result:
assert len(pauli_string) == num_qubits
# Check that each string contains only the letters X, Y, and Z
for pauli_string in result:
assert set(pauli_string).issubset(set(["X", "Y", "Z"]))
# Check that the function raises an exception for negative num_qubits
# or num_strings
with pytest.raises(ValueError):
generate_random_pauli_strings(-1, num_strings)
with pytest.raises(ValueError):
generate_random_pauli_strings(num_qubits, -1)
def cirq_executor(circuit: cirq.Circuit) -> MeasurementResult:
return cirq_sample_bitstrings(
circuit,
noise_level=(0,),
shots=1,
sampler=cirq.Simulator(),
)
def qiskit_executor(circuit: cirq.Circuit) -> MeasurementResult:
return qiskit_sample_bitstrings(
to_qiskit(circuit),
noise_model=None,
backend=Aer.get_backend("aer_simulator"),
shots=1,
measure_all=False,
)
def test_get_rotated_circuits():
"""Tests that the circuit is rotated."""
# define circuit
circuit = cirq.Circuit()
qubits = cirq.LineQubit.range(4)
circuit.append(cirq.H(qubits[0]))
circuit.append(cirq.CNOT(qubits[0], qubits[1]))
circuit.append(cirq.CNOT(qubits[1], qubits[2]))
circuit.append(cirq.CNOT(qubits[2], qubits[3]))
# define the pauli measurements to be performed on the circuit
pauli_strings = ["XYZX"]
# Rotate the circuit.
rotated_circuits = get_rotated_circuits(circuit, pauli_strings)
# Verify that the circuit was rotated.
circuit_1 = circuit.copy()
circuit_1.append(cirq.H(qubits[0]))
circuit_1.append(cirq.S(qubits[1]) ** -1)
circuit_1.append(cirq.H(qubits[1]))
circuit_1.append(cirq.H(qubits[3]))
circuit_1.append(cirq.measure(*qubits))
assert rotated_circuits[0] == circuit_1
for rc in rotated_circuits:
assert isinstance(rc, cirq.Circuit)
# define a simple test circuit for the following tests
def simple_test_circuit(qubits):
circuit = cirq.Circuit()
num_qubits = len(qubits)
circuit.append(cirq.H.on_each(*qubits))
for i in range(num_qubits - 1):
circuit.append(cirq.CNOT(qubits[i], qubits[i + 1]))
return circuit
@pytest.mark.parametrize("n_qubits", [1, 2, 5])
@pytest.mark.parametrize("executor", [cirq_executor, qiskit_executor])
def test_random_pauli_measurement_no_errors(n_qubits, executor):
"""Test that random_pauli_measurement runs without errors."""
qubits = cirq.LineQubit.range(n_qubits)
circuit = simple_test_circuit(qubits)
random_pauli_measurement(
circuit, n_total_measurements=10, executor=executor
)
@pytest.mark.parametrize("n_qubits", [1, 2, 5])
@pytest.mark.parametrize("executor", [cirq_executor, qiskit_executor])
def test_random_pauli_measurement_output_dimensions(
n_qubits: int, executor: Callable
):
"""Test that random_pauli_measurement returns the correct output
dimensions."""
qubits = cirq.LineQubit.range(n_qubits)
circuit = simple_test_circuit(qubits)
n_total_measurements = 10
shadow_outcomes, pauli_strings = random_pauli_measurement(
circuit, n_total_measurements, executor=executor
)
shadow_outcomes_shape = len(shadow_outcomes), len(shadow_outcomes[0])
pauli_strings_shape = len(pauli_strings), len(pauli_strings[0])
assert shadow_outcomes_shape == (n_total_measurements, n_qubits), (
f"Shadow outcomes have incorrect shape, expected "
f"{(n_total_measurements, n_qubits)}, got {shadow_outcomes_shape}"
)
assert pauli_strings_shape == (n_total_measurements, n_qubits), (
f"Pauli strings have incorrect shape, expected "
f"{(n_total_measurements, n_qubits)}, got {pauli_strings_shape}"
)
@pytest.mark.parametrize("n_qubits", [1, 2, 5])
@pytest.mark.parametrize("executor", [cirq_executor, qiskit_executor])
def METHOD_NAME(
n_qubits: int, executor: Callable
):
"""Test that random_pauli_measurement returns the correct output types."""
qubits = cirq.LineQubit.range(n_qubits)
circuit = simple_test_circuit(qubits)
shadow_outcomes, pauli_strings = random_pauli_measurement(
circuit, n_total_measurements=10, executor=executor
)
assert isinstance(shadow_outcomes[0], str)
assert isinstance(pauli_strings[0], str) |
1,062 | mock recovery dialog | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""Fixtures for the Editor plugin tests."""
import os.path as osp
from unittest.mock import MagicMock, Mock
from spyder.api.plugins import Plugins
from spyder.utils.qthelpers import qapplication
# This is needed to avoid an error because QtAwesome
# needs a QApplication to work correctly.
app = qapplication()
from qtpy.QtWidgets import QMainWindow
import pytest
from spyder.config.manager import CONF
from spyder.plugins.editor.plugin import Editor
@pytest.fixture
def METHOD_NAME(monkeypatch):
"""Mock the RecoveryDialog in the editor plugin."""
mock = MagicMock()
monkeypatch.setattr('spyder.plugins.editor.utils.autosave.RecoveryDialog',
mock)
return mock
@pytest.fixture
def editor_plugin(qtbot, monkeypatch):
"""Set up the Editor plugin."""
monkeypatch.setattr('spyder.plugins.editor.plugin.add_actions', Mock())
class MainMock(QMainWindow):
def __getattr__(self, attr):
if attr.endswith('actions'):
return []
else:
return Mock()
def get_plugin(self, plugin_name, error=True):
if plugin_name in [
Plugins.IPythonConsole,
Plugins.Projects,
Plugins.Debugger]:
return None
else:
return Mock()
window = MainMock()
editor = Editor(window)
window.setCentralWidget(editor)
window.resize(640, 480)
qtbot.addWidget(window)
window.show()
yield editor
editor.close()
CONF.remove_option('editor', 'autosave_mapping')
@pytest.fixture(scope="module")
def python_files(tmpdir_factory):
"""Create and save some python codes in temporary files."""
tmpdir = tmpdir_factory.mktemp("files")
tmpdir = osp.normcase(tmpdir.strpath)
filenames = [osp.join(tmpdir, f) for f in
('file1.py', 'file2.py', 'file3.py', 'file4.py',
'untitled4.py')]
for filename in filenames:
with open(filename, 'w', newline='') as f:
f.write("# -*- coding: utf-8 -*-\n"
"print('Hello World!')\n")
return filenames, tmpdir
@pytest.fixture
def editor_plugin_open_files(request, editor_plugin, python_files):
"""
Setup an Editor with a set of open files, given a past file in focus.
If no/None ``last_focused_filename`` is passed, the ``"layout_settings"``
key is not included in the options dict.
If no/None ``expected_current_filename``, is assumed to be the first file.
"""
def _get_editor_open_files(last_focused_filename,
expected_current_filename):
editor = editor_plugin
expected_filenames, tmpdir = python_files
if expected_current_filename is None:
expected_current_filename = expected_filenames[0]
expected_current_filename = osp.join(tmpdir, expected_current_filename)
options_dict = {
# For tests
'filenames': expected_filenames,
'max_recent_files': 20,
# To make tests pass
'indent_chars': '* *',
'show_tab_bar': True,
'code_folding': True,
'edge_line': True,
'indent_guides': False,
'scroll_past_end': False,
'line_numbers': True,
'occurrence_highlighting/timeout': 1500,
'tab_stop_width_spaces': 4,
'show_class_func_dropdown': False,
}
if last_focused_filename is not None:
splitsettings = [(False,
osp.join(tmpdir, last_focused_filename),
[1] * len(expected_filenames))]
layout_dict = {'layout_settings': {'splitsettings': splitsettings}}
options_dict.update(layout_dict)
def get_option(option, default=None):
return options_dict.get(option)
def set_option(option, value):
options_dict[option] = value
editor.get_option = get_option
editor.set_option = set_option
editor.setup_open_files()
return editor, expected_filenames, expected_current_filename
return _get_editor_open_files |
1,063 | prop descriptions | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergeo"
_path_str = "scattergeo.line"
_valid_props = {"color", "dash", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def METHOD_NAME(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergeo.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False |
1,064 | used s | from collections import ChainMap
from .LoopIR import LoopIR
from .memory import DRAM
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Memory Analysis Pass
class MemoryAnalysis:
def __init__(self):
self.mem_env = ChainMap()
self.tofree = []
def run(self, proc):
assert isinstance(proc, LoopIR.proc)
self.mem_env = ChainMap()
self.tofree = []
for a in proc.args:
if a.type.is_numeric():
mem = a.mem if a.mem else DRAM
self.mem_env[a.name] = mem
self.push()
body = self.mem_stmts(proc.body)
self.pop()
assert len(self.tofree) == 0
return LoopIR.proc(
proc.name,
proc.args,
proc.preds,
body,
proc.instr,
proc.eff,
proc.srcinfo,
)
def push(self):
self.mem_env = self.mem_env.new_child()
self.tofree.append([])
def pop(self):
self.mem_env = self.mem_env.parents
assert len(self.tofree[-1]) == 0
self.tofree.pop()
def add_malloc(self, sym, typ, mem):
assert isinstance(self.tofree[-1], list)
assert isinstance((sym, typ, mem), tuple)
self.tofree[-1].append((sym, typ, mem))
def mem_stmts(self, stmts):
if len(stmts) == 0:
return stmts
def used_e(e):
res = []
if isinstance(e, LoopIR.Read):
res += [e.name]
for ei in e.idx:
res += used_e(ei)
elif isinstance(e, LoopIR.USub):
res += used_e(e.arg)
elif isinstance(e, LoopIR.BinOp):
res += used_e(e.lhs)
res += used_e(e.rhs)
elif isinstance(e, LoopIR.BuiltIn):
for ei in e.args:
res += used_e(ei)
elif isinstance(e, (LoopIR.WindowExpr, LoopIR.StrideExpr)):
res += [e.name]
return res
def METHOD_NAME(s):
res = []
if isinstance(s, (LoopIR.Assign, LoopIR.Reduce)):
res += [s.name]
res += used_e(s.rhs)
elif isinstance(s, LoopIR.WriteConfig):
res += used_e(s.rhs)
elif isinstance(s, LoopIR.If):
res += used_e(s.cond)
for b in s.body:
res += METHOD_NAME(b)
for b in s.orelse:
res += METHOD_NAME(b)
elif isinstance(s, LoopIR.Seq):
for b in s.body:
res += METHOD_NAME(b)
elif isinstance(s, LoopIR.Alloc):
res += [s.name]
elif isinstance(s, LoopIR.Call):
for e in s.args:
res += used_e(e)
elif isinstance(s, LoopIR.WindowStmt):
res += used_e(s.rhs)
return res
body = []
for b in reversed([self.mem_s(b) for b in stmts]):
used = METHOD_NAME(b)
rm = []
for (nm, typ, mem) in self.tofree[-1]:
if nm in used:
rm += [(nm, typ, mem)]
for (nm, typ, mem) in rm:
body += [LoopIR.Free(nm, typ, mem, None, b.srcinfo)]
self.tofree[-1].remove((nm, typ, mem))
body += [b]
return list(reversed(body))
def get_e_mem(self, e):
if isinstance(e, (LoopIR.WindowExpr, LoopIR.Read)):
return self.mem_env[e.name]
else:
assert False
def mem_s(self, s):
styp = type(s)
if (
styp is LoopIR.Pass
or styp is LoopIR.Assign
or styp is LoopIR.Reduce
or styp is LoopIR.WriteConfig
):
return s
elif styp is LoopIR.WindowStmt:
mem = self.get_e_mem(s.rhs)
self.mem_env[s.lhs] = mem
return s
elif styp is LoopIR.Call:
# check memory consistency at call boundaries
for ca, sa in zip(s.args, s.f.args):
if sa.type.is_numeric():
smem = sa.mem if sa.mem else DRAM
cmem = self.get_e_mem(ca)
if not issubclass(cmem, smem):
raise TypeError(
f"{ca.srcinfo}: expected "
f"argument in {smem.name()} but got an "
f"argument in {cmem.name()}"
)
return s
elif styp is LoopIR.If:
self.push()
body = self.mem_stmts(s.body)
self.pop()
self.push()
ebody = self.mem_stmts(s.orelse)
self.pop()
return LoopIR.If(s.cond, body, ebody, None, s.srcinfo)
elif styp is LoopIR.Seq:
self.push()
body = self.mem_stmts(s.body)
self.pop()
return s.update(body=body)
elif styp is LoopIR.Alloc:
mem = s.mem if s.mem else DRAM
self.mem_env[s.name] = mem
self.add_malloc(s.name, s.type, s.mem)
return s
elif styp is LoopIR.Free:
assert False, "There should not be frees inserted before mem " "analysis"
else:
assert False, f"bad case {styp}" |
1,065 | conv block | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Port of NNVM version of MobileNet to Relay.
"""
# pylint: disable=invalid-name
from tvm import relay
from . import layers
from .init import create_workload
def METHOD_NAME(
data,
name,
channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
epsilon=1e-5,
layout="NCHW",
):
"""Helper function to construct conv_bn-relu"""
# convolution + bn + relu
conv = layers.conv2d(
data=data,
channels=channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
name=name + "_conv",
)
bn = layers.batch_norm_infer(data=conv, epsilon=epsilon, name=name + "_bn")
act = relay.nn.relu(data=bn)
return act
def separable_conv_block(
data,
name,
depthwise_channels,
pointwise_channels,
kernel_size=(3, 3),
downsample=False,
padding=(1, 1),
epsilon=1e-5,
layout="NCHW",
dtype="float32",
):
"""Helper function to get a separable conv block"""
if downsample:
strides = (2, 2)
else:
strides = (1, 1)
# depthwise convolution + bn + relu
if layout == "NCHW":
wshape = (depthwise_channels, 1) + kernel_size
elif layout == "NHWC":
wshape = kernel_size + (depthwise_channels, 1)
else:
raise ValueError("Invalid layout: " + layout)
bn_axis = layout.index("C")
weight = relay.var(name + "_weight", shape=wshape, dtype=dtype)
conv1 = layers.conv2d(
data=data,
weight=weight,
channels=depthwise_channels,
groups=depthwise_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout, True),
name=name + "_depthwise_conv1",
)
bn1 = layers.batch_norm_infer(data=conv1, epsilon=epsilon, axis=bn_axis, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
# pointwise convolution + bn + relu
conv2 = layers.conv2d(
data=act1,
channels=pointwise_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
name=name + "_conv2",
)
bn2 = layers.batch_norm_infer(data=conv2, epsilon=epsilon, axis=bn_axis, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
return act2
def mobile_net(
num_classes=1000,
data_shape=(1, 3, 224, 224),
dtype="float32",
alpha=1.0,
is_shallow=False,
layout="NCHW",
):
"""Function to construct a MobileNet"""
data = relay.var("data", shape=data_shape, dtype=dtype)
body = METHOD_NAME(data, "conv_block_1", int(32 * alpha), strides=(2, 2), layout=layout)
body = separable_conv_block(
body, "separable_conv_block_1", int(32 * alpha), int(64 * alpha), layout=layout, dtype=dtype
)
body = separable_conv_block(
body,
"separable_conv_block_2",
int(64 * alpha),
int(128 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_3",
int(128 * alpha),
int(128 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_4",
int(128 * alpha),
int(256 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_5",
int(256 * alpha),
int(256 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_6",
int(256 * alpha),
int(512 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
if is_shallow:
body = separable_conv_block(
body,
"separable_conv_block_7",
int(512 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_8",
int(1024 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
else:
for i in range(7, 12):
body = separable_conv_block(
body,
f"separable_conv_block_{i}",
int(512 * alpha),
int(512 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_12",
int(512 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_13",
int(1024 * alpha),
int(1024 * alpha),
layout=layout,
dtype=dtype,
)
pool = relay.nn.global_avg_pool2d(data=body, layout=layout)
flatten = relay.nn.batch_flatten(data=pool)
weight = relay.var("fc_weight")
bias = relay.var("fc_bias")
fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes)
fc = relay.nn.bias_add(fc, bias)
softmax = relay.nn.softmax(data=fc)
return relay.Function(relay.analysis.free_vars(softmax), softmax)
def get_workload(
batch_size=1, num_classes=1000, image_shape=(3, 224, 224), dtype="float32", layout="NCHW"
):
"""Get benchmark workload for mobilenet
Parameters
----------
batch_size : int, optional
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape, cooperate with layout
dtype : str, optional
The data type
layout : str, optional
The data layout of image_shape and the operators
cooperate with image_shape
Returns
-------
mod : tvm.IRModule
The relay module that contains a MobileNet network.
params : dict of str to NDArray
The parameters.
"""
data_shape = tuple([batch_size] + list(image_shape))
net = mobile_net(
num_classes=num_classes,
data_shape=data_shape,
dtype=dtype,
alpha=1.0,
is_shallow=False,
layout=layout,
)
return create_workload(net) |
1,066 | test import macro | import pytest
import yaml
import copier
from copier.types import AnyByStrDict
from .helpers import build_file_tree
@pytest.mark.parametrize(
"subdir, settings",
[
(
"",
{
"_exclude": ["includes"],
},
),
(
"template",
{
"_subdirectory": "template",
},
),
],
)
def test_include(
tmp_path_factory: pytest.TempPathFactory, subdir: str, settings: AnyByStrDict
) -> None:
src, dst = map(tmp_path_factory.mktemp, ("src", "dst"))
build_file_tree(
{
(src / "copier.yml"): yaml.safe_dump(
{
**settings,
"name": {
"type": "str",
"default": "The Name",
},
"slug": {
"type": "str",
"default": "{% include 'includes/name-slug.jinja' %}",
},
}
),
(src / "includes" / "name-slug.jinja"): (
"{{ name|lower|replace(' ', '-') }}"
),
# File for testing the Jinja include statement in `copier.yml`.
(src / subdir / "slug-answer.txt.jinja"): "{{ slug }}",
# File for testing the Jinja include statement as content.
(src / subdir / "slug-from-include.txt.jinja"): (
"{% include 'includes/name-slug.jinja' %}"
),
# File for testing the Jinja include statement in the file name.
(
src
/ subdir
/ "{% include pathjoin('includes', 'name-slug.jinja') %}.txt"
): "",
# File for testing the Jinja include statement in the folder name.
(
src
/ subdir
/ "{% include pathjoin('includes', 'name-slug.jinja') %}"
/ "test.txt"
): "",
}
)
copier.run_copy(str(src), dst, defaults=True)
assert (dst / "slug-answer.txt").read_text() == "the-name"
assert (dst / "slug-from-include.txt").read_text() == "the-name"
assert (dst / "the-name.txt").exists()
assert (dst / "the-name" / "test.txt").exists()
assert not (dst / "includes").exists()
@pytest.mark.parametrize(
"subdir, settings",
[
(
"",
{
"_exclude": ["includes"],
},
),
(
"template",
{
"_subdirectory": "template",
},
),
],
)
def METHOD_NAME(
tmp_path_factory: pytest.TempPathFactory, subdir: str, settings: AnyByStrDict
) -> None:
src, dst = map(tmp_path_factory.mktemp, ("src", "dst"))
build_file_tree(
{
(src / "copier.yml"): yaml.safe_dump(
{
**settings,
"name": {
"type": "str",
"default": "The Name",
},
"slug": {
"type": "str",
"default": (
"{% from 'includes/slugify.jinja' import slugify %}"
"{{ slugify(name) }}"
),
},
}
),
(src / "includes" / "slugify.jinja"): (
"""\
{% macro slugify(value) -%}
{{ value|lower|replace(' ', '-') }}
{%- endmacro %}
"""
),
# File for testing the Jinja import statement in `copier.yml`.
(src / subdir / "slug-answer.txt.jinja"): "{{ slug }}",
# File for testing the Jinja import statement as content.
(src / subdir / "slug-from-macro.txt.jinja"): (
"{% from 'includes/slugify.jinja' import slugify %}"
"{{ slugify(name) }}"
),
# File for testing the Jinja import statement in the file name.
(
src
/ subdir
/ "{% from pathjoin('includes', 'slugify.jinja') import slugify %}{{ slugify(name) }}.txt"
): "",
# File for testing the Jinja import statement in the folder name.
(
src
/ subdir
/ "{% from pathjoin('includes', 'slugify.jinja') import slugify %}{{ slugify(name) }}"
/ "test.txt"
): "",
}
)
copier.run_copy(str(src), dst, defaults=True)
assert (dst / "slug-answer.txt").read_text() == "the-name"
assert (dst / "slug-from-macro.txt").read_text() == "the-name"
assert (dst / "the-name.txt").exists()
assert (dst / "the-name" / "test.txt").exists()
assert not (dst / "includes").exists() |
1,067 | build vector store query | """Base vector store index query."""
from typing import Any, Dict, List, Optional
from llama_index.constants import DEFAULT_SIMILARITY_TOP_K
from llama_index.data_structs.data_structs import IndexDict
from llama_index.indices.base_retriever import BaseRetriever
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.utils import log_vector_store_query_result
from llama_index.indices.vector_store.base import VectorStoreIndex
from llama_index.schema import NodeWithScore, ObjectType
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
class VectorIndexRetriever(BaseRetriever):
"""Vector index retriever.
Args:
index (VectorStoreIndex): vector store index.
similarity_top_k (int): number of top k results to return.
vector_store_query_mode (str): vector store query mode
See reference for VectorStoreQueryMode for full list of supported modes.
filters (Optional[MetadataFilters]): metadata filters, defaults to None
alpha (float): weight for sparse/dense retrieval, only used for
hybrid query mode.
doc_ids (Optional[List[str]]): list of documents to constrain search.
vector_store_kwargs (dict): Additional vector store specific kwargs to pass
through to the vector store at query time.
"""
def __init__(
self,
index: VectorStoreIndex,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
vector_store_query_mode: VectorStoreQueryMode = VectorStoreQueryMode.DEFAULT,
filters: Optional[MetadataFilters] = None,
alpha: Optional[float] = None,
node_ids: Optional[List[str]] = None,
doc_ids: Optional[List[str]] = None,
sparse_top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._vector_store = self._index.vector_store
self._service_context = self._index.service_context
self._docstore = self._index.docstore
self._similarity_top_k = similarity_top_k
self._vector_store_query_mode = VectorStoreQueryMode(vector_store_query_mode)
self._alpha = alpha
self._node_ids = node_ids
self._doc_ids = doc_ids
self._filters = filters
self._sparse_top_k = sparse_top_k
self._kwargs: Dict[str, Any] = kwargs.get("vector_store_kwargs", {})
@property
def similarity_top_k(self) -> int:
"""Return similarity top k."""
return self._similarity_top_k
@similarity_top_k.setter
def similarity_top_k(self, similarity_top_k: int) -> None:
"""Set similarity top k."""
self._similarity_top_k = similarity_top_k
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
if self._vector_store.is_embedding_query:
if query_bundle.embedding is None:
query_bundle.embedding = (
self._service_context.embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
return self._get_nodes_with_embeddings(query_bundle)
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
if self._vector_store.is_embedding_query:
if query_bundle.embedding is None:
embed_model = self._service_context.embed_model
query_bundle.embedding = (
await embed_model.aget_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
return await self._aget_nodes_with_embeddings(query_bundle)
def METHOD_NAME(
self, query_bundle_with_embeddings: QueryBundle
) -> VectorStoreQuery:
return VectorStoreQuery(
query_embedding=query_bundle_with_embeddings.embedding,
similarity_top_k=self._similarity_top_k,
node_ids=self._node_ids,
doc_ids=self._doc_ids,
query_str=query_bundle_with_embeddings.query_str,
mode=self._vector_store_query_mode,
alpha=self._alpha,
filters=self._filters,
sparse_top_k=self._sparse_top_k,
)
def _build_node_list_from_query_result(
self, query_result: VectorStoreQueryResult
) -> List[NodeWithScore]:
if query_result.nodes is None:
# NOTE: vector store does not keep text and returns node indices.
# Need to recover all nodes from docstore
if query_result.ids is None:
raise ValueError(
"Vector store query result should return at "
"least one of nodes or ids."
)
assert isinstance(self._index.index_struct, IndexDict)
node_ids = [
self._index.index_struct.nodes_dict[idx] for idx in query_result.ids
]
nodes = self._docstore.get_nodes(node_ids)
query_result.nodes = nodes
else:
# NOTE: vector store keeps text, returns nodes.
# Only need to recover image or index nodes from docstore
for i in range(len(query_result.nodes)):
source_node = query_result.nodes[i].source_node
if (not self._vector_store.stores_text) or (
source_node is not None and source_node.node_type != ObjectType.TEXT
):
node_id = query_result.nodes[i].node_id
if node_id in self._docstore.docs:
query_result.nodes[
i
] = self._docstore.get_node( # type: ignore[index]
node_id
)
log_vector_store_query_result(query_result)
node_with_scores: List[NodeWithScore] = []
for ind, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[ind]
node_with_scores.append(NodeWithScore(node=node, score=score))
return node_with_scores
def _get_nodes_with_embeddings(
self, query_bundle_with_embeddings: QueryBundle
) -> List[NodeWithScore]:
query = self.METHOD_NAME(query_bundle_with_embeddings)
query_result = self._vector_store.query(query, **self._kwargs)
return self._build_node_list_from_query_result(query_result)
async def _aget_nodes_with_embeddings(
self, query_bundle_with_embeddings: QueryBundle
) -> List[NodeWithScore]:
query = self.METHOD_NAME(query_bundle_with_embeddings)
query_result = await self._vector_store.aquery(query, **self._kwargs)
return self._build_node_list_from_query_result(query_result) |
1,068 | ff parse | # EPA_FactsAndFigures.py (scripts)
# !/usr/bin/env python3
# coding=utf-8
"""
Scrapes data from EPA's Facts and Figures Data table PDF. Includes
supporting functions.
"""
import io
from tabula.io import read_pdf
import pandas as pd
import numpy as np
from flowsa.location import US_FIPS
from flowsa.flowbyfunctions import assign_fips_location_system
def ff_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
if year == '2018':
pages = [6, 8, 9]
pdf_pages = []
for page_number in pages:
pdf_page = read_pdf(io.BytesIO(resp.content),
pages=page_number,
stream=True,
guess=True)[0]
if page_number == 6:
# skip the first few rows
pg = pdf_page.loc[2:33].reset_index(drop=True)
# assign column headers
pg.columns = pdf_page.loc[0, ]
pg.columns.values[0] = "FlowName"
pg['FlowName'] = pg['FlowName'].str.replace("–", "-")
# split column
pg[['2000', '2005']] = \
pg['2000 2005'].str.split(' ', expand=True)
pg = pg.drop(columns=['2000 2005'])
# manually address errors generated in df generation - correct 2018
# values for other food management
pg.loc[24, "2018"] = "1840"
pg.loc[26, "2018"] = "5260"
pg.loc[30, "2018"] = "3740"
# drop rows with na for 2018
pg = pg.dropna(subset=['2018']).reset_index(drop=True)
# assign activity based on location in data table
pg.loc[0:11, "ActivityConsumedBy"] = "Recycled"
pg.loc[12:15, "ActivityConsumedBy"] = "Composted"
pg.loc[16:21, "ActivityConsumedBy"] = pg[
"FlowName"].str.replace("Food - ", '')
pg["ActivityConsumedBy"] = pg["ActivityConsumedBy"].str.title()
pg['FlowName'] = pg['FlowName'].str.replace(
"( -).*", "", regex=True)
# melt df and rename cols to standardize before merging with
# additional tables
pg = pg.melt(id_vars=["FlowName", "ActivityConsumedBy"],
var_name="Year", value_name="FlowAmount")
pg["Description"] = "Table 2. Materials Recycled, Composted and " \
"Managed by Other Food Pathways in the " \
"Municipal Waste Stream"
if page_number in [8, 9]:
# skip the first few rows
pg = pdf_page.loc[2:19].reset_index(drop=True)
# assign column headers
pg.columns = pdf_page.loc[1, ]
pg.columns.values[0] = "FlowName"
# split column
pg[['2000', '2005', '2010']] = \
pg['2000 2005 2010'].str.split(' ', expand=True)
pg = pg.drop(columns=['2000 2005 2010'])
pg = pg.dropna(subset=['2018']).reset_index(drop=True)
# melt df and rename cols to standardize before merging with
# additional tables
pg = pg.melt(id_vars="FlowName", var_name="Year",
value_name="FlowAmount")
pg = pg.dropna(subset=["FlowAmount"]).reset_index(drop=True)
if page_number == 8:
pg["ActivityConsumedBy"] = "Combusted with Energy Recovery"
pg["Description"] = "Table 3. Materials Combusted with " \
"Energy Recovery* in the Municipal " \
"Waste Stream"
if page_number == 9:
pg["ActivityConsumedBy"] = "Landfilled"
pg["Description"] = "Table 4. Materials Landfilled in the " \
"Municipal Waste Stream"
# following code used for page 6, 9
# drop nas and harcode metals and inorganic wastes back in
pg["FlowName"] = np.where(pg["FlowName"].str.contains(
"Ferrous|Aluminum|Other Nonferrous"),
'Metals, ' + pg["FlowName"], pg["FlowName"])
pg["FlowName"] = np.where(
pg["FlowName"] == "Wastes",
"Miscellaneous Inorganic " + pg["FlowName"], pg["FlowName"])
# Revise Activity names
pg["ActivityConsumedBy"] = np.where(
pg["ActivityConsumedBy"] == "Bio-Based",
"Bio-Based Materials/Biochemical Processing",
pg["ActivityConsumedBy"])
pg["ActivityConsumedBy"] = np.where(
pg["ActivityConsumedBy"] == "Codigestion/Anaerobic",
"Codigestion/Anaerobic Digestion", pg["ActivityConsumedBy"])
pg["ActivityConsumedBy"] = np.where(
pg["ActivityConsumedBy"] == "Sewer/Wastewater",
"Sewer/Wastewater Treatment", pg["ActivityConsumedBy"])
# drop rows with totals to avoid duplication
pg = pg[~pg["FlowName"].str.contains('Total')].reset_index(
drop=True)
pg['Unit'] = "Thousands of Tons"
pdf_pages.append(pg)
df = pd.concat(pdf_pages, ignore_index=True)
return df
def METHOD_NAME(*, df_list, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:return: df, parsed and partially formatted to
flowbyactivity specifications
"""
# concat list of dataframes (info on each page)
df = pd.concat(df_list, sort=False)
# subset by df
df = df[df["Year"] == year]
# remove non alphanumeric characters
df["FlowName"] = df["FlowName"].str.replace('[^a-zA-Z0-9, ]', '',
regex=True)
# strip trailing white spaces
df["FlowName"] = df["FlowName"].str.strip()
df['SourceName'] = 'EPA_FactsAndFigures'
df['Class'] = 'Other'
df['FlowType'] = "WASTE_FLOW"
df['Location'] = US_FIPS
df = assign_fips_location_system(df, year)
df['Year'] = str(year)
df["FlowAmount"] = df["FlowAmount"].str.replace(',', '', regex=True)
# Facts and Figures defines "Neg." as "Less than 5,000 tons or 0.05
# percent," so replace with 0
df["FlowAmount"] = df["FlowAmount"].str.replace('Neg.', '0', regex=True)
df['DataReliability'] = 5 # tmp
df['DataCollection'] = 5 # tmp
return df |
1,069 | label | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWorkloadClassifierResult',
'AwaitableGetWorkloadClassifierResult',
'get_workload_classifier',
'get_workload_classifier_output',
]
@pulumi.output_type
class GetWorkloadClassifierResult:
"""
Workload classifier operations for a data warehouse
"""
def __init__(__self__, context=None, end_time=None, id=None, importance=None, METHOD_NAME=None, member_name=None, name=None, start_time=None, type=None):
if context and not isinstance(context, str):
raise TypeError("Expected argument 'context' to be a str")
pulumi.set(__self__, "context", context)
if end_time and not isinstance(end_time, str):
raise TypeError("Expected argument 'end_time' to be a str")
pulumi.set(__self__, "end_time", end_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if importance and not isinstance(importance, str):
raise TypeError("Expected argument 'importance' to be a str")
pulumi.set(__self__, "importance", importance)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'label' to be a str")
pulumi.set(__self__, "label", METHOD_NAME)
if member_name and not isinstance(member_name, str):
raise TypeError("Expected argument 'member_name' to be a str")
pulumi.set(__self__, "member_name", member_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if start_time and not isinstance(start_time, str):
raise TypeError("Expected argument 'start_time' to be a str")
pulumi.set(__self__, "start_time", start_time)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def context(self) -> Optional[str]:
"""
The workload classifier context.
"""
return pulumi.get(self, "context")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[str]:
"""
The workload classifier end time for classification.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def importance(self) -> Optional[str]:
"""
The workload classifier importance.
"""
return pulumi.get(self, "importance")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
The workload classifier label.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter(name="memberName")
def member_name(self) -> str:
"""
The workload classifier member name.
"""
return pulumi.get(self, "member_name")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[str]:
"""
The workload classifier start time for classification.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWorkloadClassifierResult(GetWorkloadClassifierResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkloadClassifierResult(
context=self.context,
end_time=self.end_time,
id=self.id,
importance=self.importance,
METHOD_NAME=self.METHOD_NAME,
member_name=self.member_name,
name=self.name,
start_time=self.start_time,
type=self.type)
def get_workload_classifier(database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
workload_classifier_name: Optional[str] = None,
workload_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkloadClassifierResult:
"""
Gets a workload classifier
:param str database_name: The name of the database.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
:param str workload_classifier_name: The name of the workload classifier.
:param str workload_group_name: The name of the workload group from which to receive the classifier from.
"""
__args__ = dict()
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
__args__['workloadClassifierName'] = workload_classifier_name
__args__['workloadGroupName'] = workload_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20221101preview:getWorkloadClassifier', __args__, opts=opts, typ=GetWorkloadClassifierResult).value
return AwaitableGetWorkloadClassifierResult(
context=pulumi.get(__ret__, 'context'),
end_time=pulumi.get(__ret__, 'end_time'),
id=pulumi.get(__ret__, 'id'),
importance=pulumi.get(__ret__, 'importance'),
METHOD_NAME=pulumi.get(__ret__, 'label'),
member_name=pulumi.get(__ret__, 'member_name'),
name=pulumi.get(__ret__, 'name'),
start_time=pulumi.get(__ret__, 'start_time'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_workload_classifier)
def get_workload_classifier_output(database_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
workload_classifier_name: Optional[pulumi.Input[str]] = None,
workload_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadClassifierResult]:
"""
Gets a workload classifier
:param str database_name: The name of the database.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
:param str workload_classifier_name: The name of the workload classifier.
:param str workload_group_name: The name of the workload group from which to receive the classifier from.
"""
... |
1,070 | set up | """Test measurement collection."""
import unittest
import mongomock
from database.measurements import create_measurement
from tests.fixtures import METRIC_ID, REPORT_ID, SOURCE_ID, SUBJECT_ID, create_report
class TestMeasurements(unittest.TestCase):
"""Unit tests for the measurements collection."""
def METHOD_NAME(self) -> None:
"""Set up fixtures."""
self.measurement_data = {
"start": "2023-07-19T16:50:47+00:000",
"end": "2023-07-19T16:50:47+00:001",
"has_error": False,
"sources": [
{
"type": "sonarqube",
"source_uuid": SOURCE_ID,
"name": "Source",
"parameters": {"url": "https://url", "password": "password"},
"parse_error": None,
"connection_error": None,
"value": "10",
"total": "100",
"entities": [{"key": "key", "first_seen": "2023-07-18"}],
},
],
"metric_uuid": METRIC_ID,
"report_uuid": REPORT_ID,
}
self.client: mongomock.MongoClient = mongomock.MongoClient()
self.database = self.client["quality_time_db"]
def test_create_measurement_without_latest_measurement(self):
"""Test that create_measurement without a latest measurement inserts a new measurement."""
self.database["reports"].insert_one(create_report(report_uuid=REPORT_ID))
create_measurement(self.database, self.measurement_data)
self.assertEqual(1, len(list(self.database.measurements.find())))
def test_create_measurement_with_latest_measurement(self):
"""Test that create_measurement with a latest measurement inserts a new measurement."""
self.database["reports"].insert_one(create_report(report_uuid=REPORT_ID))
self.database["measurements"].insert_one(
{
"metric_uuid": METRIC_ID,
"sources": [
{"source_uuid": SOURCE_ID, "parse_error": None, "connection_error": None, "value": "42"},
],
},
)
create_measurement(self.database, self.measurement_data)
self.assertEqual(2, len(list(self.database.measurements.find())))
def test_create_measurement_with_no_latest_metric(self):
"""Test that create_measurement does not insert new measurement when the metric does not exist."""
create_measurement(self.database, self.measurement_data)
self.assertEqual(0, len(list(self.database.measurements.find())))
def test_create_measurement_without_source(self):
"""Test that a new measurement is not created if the sources used for the measurement no longer exist."""
report = create_report(report_uuid=REPORT_ID)
del report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]
self.database["reports"].insert_one(report)
create_measurement(self.database, self.measurement_data)
self.assertEqual(0, len(list(self.database.measurements.find())))
def test_create_measurement_when_its_equal(self):
"""Test that create_measurement with equal measurement does not insert new measurement."""
self.database["reports"].insert_one(create_report(report_uuid=REPORT_ID))
create_measurement(self.database, self.measurement_data)
create_measurement(self.database, self.measurement_data)
self.assertEqual(1, len(list(self.database.measurements.find())))
def test_copy_first_seen_timestamps(self):
"""Test that the first seen timestamps are copied from the latest successful measurement."""
self.database["reports"].insert_one(create_report(report_uuid=REPORT_ID))
create_measurement(self.database, self.measurement_data)
self.measurement_data["sources"][0]["entities"][0]["first_seen"] = "2023-07-19"
create_measurement(self.database, self.measurement_data)
self.assertEqual(
"2023-07-18",
next(self.database.measurements.find())["sources"][0]["entities"][0]["first_seen"],
) |
1,071 | test pauli interaction gates consistent protocols | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import pytest
import numpy as np
import sympy
import cirq
_bools = (False, True)
_paulis = (cirq.X, cirq.Y, cirq.Z)
def _all_interaction_gates(exponents=(1,)):
for pauli0, invert0, pauli1, invert1, e in itertools.product(
_paulis, _bools, _paulis, _bools, exponents
):
yield cirq.PauliInteractionGate(pauli0, invert0, pauli1, invert1, exponent=e)
@pytest.mark.parametrize('gate', _all_interaction_gates())
def METHOD_NAME(gate):
cirq.testing.assert_implements_consistent_protocols(gate)
def test_eq_ne_and_hash():
eq = cirq.testing.EqualsTester()
for pauli0, invert0, pauli1, invert1, e in itertools.product(
_paulis, _bools, _paulis, _bools, (0.125, -0.25, 1)
):
eq.add_equality_group(
cirq.PauliInteractionGate(pauli0, invert0, pauli1, invert1, exponent=e)
)
def test_exponent_shifts_are_equal():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
cirq.PauliInteractionGate(cirq.X, False, cirq.X, False, exponent=e)
for e in [0.1, 0.1, 2.1, -1.9, 4.1]
)
eq.add_equality_group(
cirq.PauliInteractionGate(cirq.X, True, cirq.X, False, exponent=e)
for e in [0.1, 0.1, 2.1, -1.9, 4.1]
)
eq.add_equality_group(
cirq.PauliInteractionGate(cirq.Y, False, cirq.Z, False, exponent=e)
for e in [0.1, 0.1, 2.1, -1.9, 4.1]
)
eq.add_equality_group(
cirq.PauliInteractionGate(cirq.Z, False, cirq.Y, True, exponent=e)
for e in [0.1, 0.1, 2.1, -1.9, 4.1]
)
@pytest.mark.parametrize('gate', _all_interaction_gates(exponents=(0.1, -0.25, 0.5, 1)))
def test_interchangeable_qubits(gate):
q0, q1 = cirq.NamedQubit('q0'), cirq.NamedQubit('q1')
op0 = gate(q0, q1)
op1 = gate(q1, q0)
mat0 = cirq.Circuit(op0).unitary()
mat1 = cirq.Circuit(op1).unitary()
same = op0 == op1
same_check = cirq.allclose_up_to_global_phase(mat0, mat1)
assert same == same_check
def test_exponent():
cnot = cirq.PauliInteractionGate(cirq.Z, False, cirq.X, False)
np.testing.assert_almost_equal(
cirq.unitary(cnot**0.5),
np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0.5 + 0.5j, 0.5 - 0.5j],
[0, 0, 0.5 - 0.5j, 0.5 + 0.5j],
]
),
)
def test_repr():
cnot = cirq.PauliInteractionGate(cirq.Z, False, cirq.X, False)
cirq.testing.assert_equivalent_repr(cnot)
def test_decomposes_despite_symbol():
q0, q1 = cirq.NamedQubit('q0'), cirq.NamedQubit('q1')
gate = cirq.PauliInteractionGate(cirq.Z, False, cirq.X, False, exponent=sympy.Symbol('x'))
assert cirq.decompose_once_with_qubits(gate, [q0, q1])
def test_text_diagrams():
q0, q1 = cirq.NamedQubit('q0'), cirq.NamedQubit('q1')
circuit = cirq.Circuit(
cirq.PauliInteractionGate(cirq.X, False, cirq.X, False)(q0, q1),
cirq.PauliInteractionGate(cirq.X, True, cirq.X, False)(q0, q1),
cirq.PauliInteractionGate(cirq.X, False, cirq.X, True)(q0, q1),
cirq.PauliInteractionGate(cirq.X, True, cirq.X, True)(q0, q1),
cirq.PauliInteractionGate(cirq.X, False, cirq.Y, False)(q0, q1),
cirq.PauliInteractionGate(cirq.Y, False, cirq.Z, False)(q0, q1),
cirq.PauliInteractionGate(cirq.Z, False, cirq.Y, False)(q0, q1),
cirq.PauliInteractionGate(cirq.Y, True, cirq.Z, True)(q0, q1),
cirq.PauliInteractionGate(cirq.Z, True, cirq.Y, True)(q0, q1),
)
assert (
circuit.to_text_diagram().strip()
== """
q0: ───X───(-X)───X──────(-X)───X───Y───@───(-Y)───(-@)───
│ │ │ │ │ │ │ │ │
q1: ───X───X──────(-X)───(-X)───Y───@───Y───(-@)───(-Y)───
""".strip()
) |
1,072 | test skew x transform | from __future__ import annotations
from manim import *
from manim.utils.testing.frames_comparison import frames_comparison
from ..helpers.path_utils import get_svg_resource
__module_test__ = "img_and_svg"
# Tests break down into two kinds: one where the SVG is simple enough to step through
# and ones where the SVG is realistically complex, and the output should be visually inspected.
# First are the simple tests.
@frames_comparison
def test_Line(scene):
line_demo = SVGMobject(get_svg_resource("line.svg"))
scene.add(line_demo)
scene.wait()
@frames_comparison
def test_CubicPath(scene):
cubic_demo = SVGMobject(get_svg_resource("cubic_demo.svg"))
scene.add(cubic_demo)
scene.wait()
@frames_comparison
def test_CubicAndLineto(scene):
cubic_lineto = SVGMobject(get_svg_resource("cubic_and_lineto.svg"))
scene.add(cubic_lineto)
scene.wait()
@frames_comparison
def test_Rhomboid(scene):
rhomboid = SVGMobject(get_svg_resource("rhomboid.svg")).scale(0.5)
rhomboid_fill = rhomboid.copy().set_fill(opacity=1).shift(UP * 2)
rhomboid_no_fill = rhomboid.copy().set_fill(opacity=0).shift(DOWN * 2)
scene.add(rhomboid, rhomboid_fill, rhomboid_no_fill)
scene.wait()
@frames_comparison
def test_Inheritance(scene):
three_arrows = SVGMobject(get_svg_resource("inheritance_test.svg")).scale(0.5)
scene.add(three_arrows)
scene.wait()
@frames_comparison
def test_MultiPartPath(scene):
mpp = SVGMobject(get_svg_resource("multi_part_path.svg"))
scene.add(mpp)
scene.wait()
@frames_comparison
def test_QuadraticPath(scene):
quad = SVGMobject(get_svg_resource("qcurve_demo.svg"))
scene.add(quad)
scene.wait()
@frames_comparison
def test_SmoothCurves(scene):
smooths = SVGMobject(get_svg_resource("smooth_curves.svg"))
scene.add(smooths)
scene.wait()
@frames_comparison
def test_WatchTheDecimals(scene):
def construct(scene):
decimal = SVGMobject(get_svg_resource("watch_the_decimals.svg"))
scene.add(decimal)
scene.wait()
@frames_comparison
def test_UseTagInheritance(scene):
aabbb = SVGMobject(get_svg_resource("aabbb.svg"))
scene.add(aabbb)
scene.wait()
@frames_comparison
def test_HalfEllipse(scene):
half_ellipse = SVGMobject(get_svg_resource("half_ellipse.svg"))
scene.add(half_ellipse)
scene.wait()
@frames_comparison
def test_Heart(scene):
heart = SVGMobject(get_svg_resource("heart.svg"))
scene.add(heart)
scene.wait()
@frames_comparison
def test_Arcs01(scene):
# See: https://www.w3.org/TR/SVG11/images/paths/arcs01.svg
arcs = SVGMobject(get_svg_resource("arcs01.svg"))
scene.add(arcs)
scene.wait()
@frames_comparison(last_frame=False)
def test_Arcs02(scene):
# See: https://www.w3.org/TR/SVG11/images/paths/arcs02.svg
arcs = SVGMobject(get_svg_resource("arcs02.svg"))
scene.add(arcs)
scene.wait()
# Second are the visual tests - these are probably too complex to verify step-by-step, so
# these are really more of a spot-check
@frames_comparison(last_frame=False)
def test_WeightSVG(scene):
path = get_svg_resource("weight.svg")
svg_obj = SVGMobject(path)
scene.add(svg_obj)
scene.wait()
@frames_comparison
def test_BrachistochroneCurve(scene):
brach_curve = SVGMobject(get_svg_resource("curve.svg"))
scene.add(brach_curve)
scene.wait()
@frames_comparison
def test_DesmosGraph1(scene):
dgraph = SVGMobject(get_svg_resource("desmos-graph_1.svg")).scale(3)
scene.add(dgraph)
scene.wait()
@frames_comparison
def test_Penrose(scene):
penrose = SVGMobject(get_svg_resource("penrose.svg"))
scene.add(penrose)
scene.wait()
@frames_comparison
def test_ManimLogo(scene):
background_rect = Rectangle(color=WHITE, fill_opacity=1).scale(2)
manim_logo = SVGMobject(get_svg_resource("manim-logo-sidebar.svg"))
scene.add(background_rect, manim_logo)
scene.wait()
@frames_comparison
def test_UKFlag(scene):
uk_flag = SVGMobject(get_svg_resource("united-kingdom.svg"))
scene.add(uk_flag)
scene.wait()
@frames_comparison
def test_SingleUSState(scene):
single_state = SVGMobject(get_svg_resource("single_state.svg"))
scene.add(single_state)
scene.wait()
@frames_comparison
def test_ContiguousUSMap(scene):
states = SVGMobject(get_svg_resource("states_map.svg")).scale(3)
scene.add(states)
scene.wait()
@frames_comparison
def test_PixelizedText(scene):
background_rect = Rectangle(color=WHITE, fill_opacity=1).scale(2)
rgb_svg = SVGMobject(get_svg_resource("pixelated_text.svg"))
scene.add(background_rect, rgb_svg)
scene.wait()
@frames_comparison
def test_VideoIcon(scene):
video_icon = SVGMobject(get_svg_resource("video_icon.svg"))
scene.add(video_icon)
scene.wait()
@frames_comparison
def test_MultipleTransform(scene):
svg_obj = SVGMobject(get_svg_resource("multiple_transforms.svg"))
scene.add(svg_obj)
scene.wait()
@frames_comparison
def test_MatrixTransform(scene):
svg_obj = SVGMobject(get_svg_resource("matrix.svg"))
scene.add(svg_obj)
scene.wait()
@frames_comparison
def test_ScaleTransform(scene):
svg_obj = SVGMobject(get_svg_resource("scale.svg"))
scene.add(svg_obj)
scene.wait()
@frames_comparison
def test_TranslateTransform(scene):
svg_obj = SVGMobject(get_svg_resource("translate.svg"))
scene.add(svg_obj)
scene.wait()
@frames_comparison
def METHOD_NAME(scene):
svg_obj = SVGMobject(get_svg_resource("skewX.svg"))
scene.add(svg_obj)
scene.wait()
@frames_comparison
def test_SkewYTransform(scene):
svg_obj = SVGMobject(get_svg_resource("skewY.svg"))
scene.add(svg_obj)
scene.wait()
@frames_comparison
def test_RotateTransform(scene):
svg_obj = SVGMobject(get_svg_resource("rotate.svg"))
scene.add(svg_obj)
scene.wait()
@frames_comparison
def test_path_multiple_moves(scene):
svg_obj = SVGMobject(
get_svg_resource("path_multiple_moves.svg"),
fill_color=WHITE,
stroke_color=WHITE,
stroke_width=3,
)
scene.add(svg_obj)
@frames_comparison
def test_ImageMobject(scene):
file_path = get_svg_resource("tree_img_640x351.png")
im1 = ImageMobject(file_path).shift(4 * LEFT + UP)
im2 = ImageMobject(file_path, scale_to_resolution=1080).shift(4 * LEFT + 2 * DOWN)
im3 = ImageMobject(file_path, scale_to_resolution=540).shift(4 * RIGHT)
scene.add(im1, im2, im3)
scene.wait(1)
@frames_comparison
def test_ImageInterpolation(scene):
img = ImageMobject(
np.uint8([[63, 0, 0, 0], [0, 127, 0, 0], [0, 0, 191, 0], [0, 0, 0, 255]]),
)
img.height = 2
img1 = img.copy()
img2 = img.copy()
img3 = img.copy()
img4 = img.copy()
img5 = img.copy()
img1.set_resampling_algorithm(RESAMPLING_ALGORITHMS["nearest"])
img2.set_resampling_algorithm(RESAMPLING_ALGORITHMS["lanczos"])
img3.set_resampling_algorithm(RESAMPLING_ALGORITHMS["linear"])
img4.set_resampling_algorithm(RESAMPLING_ALGORITHMS["cubic"])
img5.set_resampling_algorithm(RESAMPLING_ALGORITHMS["box"])
scene.add(img1, img2, img3, img4, img5)
[s.shift(4 * LEFT + pos * 2 * RIGHT) for pos, s in enumerate(scene.mobjects)]
scene.wait() |
1,073 | handle download | # -*- coding: utf-8 -*-
import re
import time
from datetime import timedelta
from ..base.simple_downloader import SimpleDownloader
class MegasharesCom(SimpleDownloader):
__name__ = "MegasharesCom"
__type__ = "downloader"
__version__ = "0.37"
__status__ = "testing"
__pattern__ = r"http://(?:www\.)?(d\d{2}\.)?megashares\.com/((index\.php)?\?d\d{2}=|dl/)\w+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Megashares.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "zoidberg@mujmail.cz"),
("Walter Purcaro", "vuolter@gmail.com"),
]
NAME_PATTERN = r'<h1 class="black xxl"[^>]*title="(?P<N>.+?)">'
SIZE_PATTERN = r'<strong><span class="black">Filesize:</span></strong> (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = (
r'<dd class="red">(Invalid Link Request|Link has been deleted|Invalid link)'
)
LINK_PATTERN = r'<div id="show_download_button_{}".*?>\s*<a href="(.+?)">'
PASSPORT_LEFT_PATTERN = (
r"Your Download Passport is: <.*?>(\w+).*?You have.*?<.*?>.*?([\d.]+) (\w+)"
)
PASSPORT_RENEW_PATTERN = r"(\d+):<strong>(\d+)</strong>:<strong>(\d+)</strong>"
REACTIVATE_NUM_PATTERN = r'<input[^>]*id="random_num" value="(\d+)" />'
REACTIVATE_PASSPORT_PATTERN = r'<input[^>]*id="passport_num" value="(\w+)" />'
REQUEST_URI_PATTERN = r'var request_uri = "(.+?)";'
NO_SLOTS_PATTERN = (
r'<dd class="red">All download slots for this link are currently filled'
)
def setup(self):
self.resume_download = True
self.multi_dl = self.premium
def handle_premium(self, pyfile):
self.METHOD_NAME(True)
def handle_free(self, pyfile):
if self.NO_SLOTS_PATTERN in self.data:
self.retry(wait=timedelta(minutes=5).total_seconds())
m = re.search(self.REACTIVATE_PASSPORT_PATTERN, self.data)
if m is not None:
passport_num = m.group(1)
request_uri = re.search(self.REQUEST_URI_PATTERN, self.data).group(1)
random_num = re.search(self.REACTIVATE_NUM_PATTERN, self.data).group(1)
verifyinput = self.captcha.decrypt(
"http://d01.megashares.com/index.php",
get={"secgfx": "gfx", "random_num": random_num},
)
self.log_info(
self._("Reactivating passport {}: {} {}").format(
passport_num, random_num, verifyinput
)
)
res = self.load(
"http://d01.megashares.com{}".format(request_uri),
get={
"rs": "check_passport_renewal",
"rsargs[]": verifyinput,
"rsargs[]": random_num,
"rsargs[]": passport_num,
"rsargs[]": "replace_sec_pprenewal",
"rsrnd[]": str(int(time.time() * 1000)),
},
)
if "Thank you for reactivating your passport" in res:
self.captcha.correct()
self.restart()
else:
self.retry_captcha(msg=self._("Failed to reactivate passport"))
m = re.search(self.PASSPORT_RENEW_PATTERN, self.data)
if m is not None:
times = [int(x) for x in m.groups()]
renew = (
times[0]
+ timedelta(minutes=times[1]).total_seconds()
+ timedelta(minutes=times[2]).total_seconds()
)
self.log_debug(f"Waiting {renew} seconds for a new passport")
self.retry(wait=renew, msg=self._("Passport renewal"))
#: Check traffic left on passport
m = re.search(self.PASSPORT_LEFT_PATTERN, self.data, re.M | re.S)
if m is None:
self.fail(self._("Passport not found"))
self.log_info(self._("Download passport: {}").format(m.group(1)))
data_left = (
float(m.group(2)) << 10 ** {"B": 0, "KB": 1, "MB": 2, "GB": 3}[m.group(3)]
)
self.log_info(
self._("Data left: {} {} ({} MiB needed)").format(
m.group(2), m.group(3), self.pyfile.size // 1_048_576
)
)
if not data_left:
self.retry(wait=600, msg=self._("Passport renewal"))
self.METHOD_NAME(False)
def METHOD_NAME(self, premium=False):
m = re.search(self.LINK_PATTERN.format(1 if premium else 2), self.data)
msg = self._("{} download URL").format("Premium" if premium else "Free")
if m is None:
self.error(msg)
self.link = m.group(1)
self.log_debug(f"{msg}: {self.link}") |
1,074 | test symbols | from pathlib import Path
from typing import cast
import cle
from cle import MachO
from cle.backends.macho.binding import MachOChainedFixup, MachORelocation
TEST_BASE = Path(__file__).resolve().parent.parent.parent / "binaries"
def test_fixups():
"""
Tests the pointer format DYLD_CHAINED_PTR_64_OFFSET
:return:
"""
binary: MachO = cast(MachO, cle.Loader(str(TEST_BASE / "tests" / "aarch64" / "dyld_ios15.macho")).main_object)
expected = {
0x100008100: 0x100007A40,
0x1000081E0: 0x1000072B0,
0x1000081E8: 0x1000072DC,
0x1000081F0: 0x1000072E4,
0x1000081F8: 0x100007310,
0x100008200: 0x100007350,
0x100008208: 0x10000735C,
0x100008210: 0x10000738C,
0x100008218: 0x1000073E8,
0x100008238: 0x1000081E0,
0x100008248: 0x100007A40,
0x1000082A0: 0x100007AFC,
0x1000082D8: 0x10000C0E8,
0x10000C018: 0x100007B90,
0x10000C060: 0x100007B90,
0x10000C068: 0x100007998,
0x10000C090: 0x100007C2A,
0x10000C0D0: 0x10000C000,
0x10000C0D8: 0x100007210,
0x10000C0E8: 0x10000C0B0,
0x10000C108: 0x10000C04A,
0x10000C128: 0x1000079F0,
}
actual = {r.rebased_addr: r.value for r in binary.relocs if isinstance(r, MachOChainedFixup)}
assert actual == expected
def METHOD_NAME():
loader = cle.Loader(str(TEST_BASE / "tests" / "aarch64" / "dyld_ios15.macho"))
binary: MachO = cast(MachO, loader.main_object)
expected = [
(0x100008000, "_$s10Foundation5NSLogyySS_s7CVarArg_pdtF"),
(0x100008008, "_$s2os0A4_log_3dso0B04type_ys12StaticStringV_SVSgSo03OS_a1_B0CSo0a1_b1_D2_tas7CVarArg_pdtF"),
(0x100008010, "_$s7SwiftUI11WindowGroupV7contentACyxGxyXE_tcfC"),
(0x100008018, "_$s7SwiftUI11WindowGroupVMn"),
(0x100008020, "_$s7SwiftUI11WindowGroupVyxGAA5SceneAAMc"),
(0x100008028, "_$s7SwiftUI12SceneBuilderV10buildBlockyxxAA0C0RzlFZ"),
(0x100008030, "_$s7SwiftUI13_VStackLayoutVMn"),
(0x100008038, "_$s7SwiftUI13_VariadicViewO4TreeVMn"),
(0x100008040, "_$s7SwiftUI14_PaddingLayoutVMn"),
(0x100008048, "_$s7SwiftUI15ModifiedContentVMn"),
(0x100008050, "_$s7SwiftUI18LocalizedStringKeyV13stringLiteralACSS_tcfC"),
(0x100008058, "_$s7SwiftUI19HorizontalAlignmentV6centerACvgZ"),
(0x100008060, "_$s7SwiftUI3AppPAAE4mainyyFZ"),
(0x100008068, "_$s7SwiftUI4EdgeO3SetV3allAEvgZ"),
(0x100008070, "_$s7SwiftUI4TextVMn"),
(
0x100008078,
"_$s7SwiftUI4ViewPAAE05_makeC04view6inputsAA01_C7OutputsVAA11_GraphValueVyxG_AA01_C6InputsVtFZ",
),
(
0x100008080,
"_$s7SwiftUI4ViewPAAE05_makeC4List4view6inputsAA01_cE7OutputsVAA11_GraphValueVyxG_AA01_cE6InputsVtFZ",
),
(0x100008088, "_$s7SwiftUI4ViewPAAE14_viewListCount6inputsSiSgAA01_ceF6InputsV_tFZ"),
(0x100008090, "_$s7SwiftUI5StateV12wrappedValueACyxGx_tcfC"),
(0x100008098, "_$s7SwiftUI5StateVMn"),
(0x1000080A0, "_$s7SwiftUI6ButtonVA2A4TextVRszrlE_6actionACyAEGAA18LocalizedStringKeyV_yyctcfC"),
(0x1000080A8, "_$s7SwiftUI6ButtonVMn"),
(0x1000080B0, "_$s7SwiftUI6VStackVMn"),
(0x1000080B8, "_$s7SwiftUI6VStackVyxGAA4ViewAAMc"),
(0x1000080C0, "_$sSiN"),
(0x1000080C8, "_$sSo13os_log_type_ta0A0E7defaultABvgZ"),
(0x1000080D0, "_$sSo8NSObjectCs7CVarArg10ObjectiveCMc"),
(0x1000080D8, "_$sSo8NSStringC10FoundationE13stringLiteralABs12StaticStringV_tcfC"),
(0x1000080E0, "_$sSo9OS_os_logC0B0E7defaultABvgZ"),
(0x1000080E8, "_$ss23_ContiguousArrayStorageCMn"),
(0x1000080F0, "_$ss7CVarArgMp"),
(0x1000080F8, "___chkstk_darwin"),
(0x100008108, "__swiftEmptyArrayStorage"),
(0x100008110, "_objc_opt_self"),
(0x100008118, "_objc_release"),
(0x100008120, "_swift_allocObject"),
(0x100008128, "_swift_deallocClassInstance"),
(0x100008130, "_swift_getObjCClassMetadata"),
(0x100008138, "_swift_getOpaqueTypeConformance"),
(0x100008140, "_swift_getTypeByMangledNameInContext"),
(0x100008148, "_swift_getTypeByMangledNameInContextInMetadataState"),
(0x100008150, "_swift_getWitnessTable"),
(0x100008158, "_swift_release"),
(0x100008160, "_swift_retain"),
(0x100008168, "__swift_FORCE_LOAD_$_swiftObjectiveC"),
(0x100008170, "__swift_FORCE_LOAD_$_swiftDarwin"),
(0x100008178, "__swift_FORCE_LOAD_$_swiftos"),
(0x100008180, "__swift_FORCE_LOAD_$_swiftUniformTypeIdentifiers"),
(0x100008188, "__swift_FORCE_LOAD_$_swiftFoundation"),
(0x100008190, "__swift_FORCE_LOAD_$_swiftCoreFoundation"),
(0x100008198, "__swift_FORCE_LOAD_$_swiftDispatch"),
(0x1000081A0, "__swift_FORCE_LOAD_$_swiftCoreGraphics"),
(0x1000081A8, "__swift_FORCE_LOAD_$_swiftUIKit"),
(0x1000081B0, "__swift_FORCE_LOAD_$_swiftCoreImage"),
(0x1000081B8, "__swift_FORCE_LOAD_$_swiftMetal"),
(0x1000081C0, "__swift_FORCE_LOAD_$_swiftQuartzCore"),
(0x1000081C8, "__swift_FORCE_LOAD_$_swiftFileProvider"),
(0x1000081D0, "__swift_FORCE_LOAD_$_swiftDataDetection"),
(0x1000081D8, "__swift_FORCE_LOAD_$_swiftCoreData"),
(0x100008258, "_$s7SwiftUI4ViewMp"),
(0x100008260, "_$s7SwiftUI4ViewP4BodyAC_AaBTn"),
(0x100008268, "_$s4Body7SwiftUI4ViewPTl"),
(
0x100008270,
"_$s7SwiftUI4ViewP05_makeC04view6inputsAA01_C7OutputsVAA11_GraphValueVyxG_AA01_C6InputsVtFZTq",
),
(
0x100008278,
"_$s7SwiftUI4ViewP05_makeC4List4view6inputsAA01_cE7OutputsVAA11_GraphValueVyxG_AA01_cE6InputsVtFZTq",
),
(0x100008280, "_$s7SwiftUI4ViewP14_viewListCount6inputsSiSgAA01_ceF6InputsV_tFZTq"),
(0x100008288, "_$s7SwiftUI4ViewP4body4BodyQzvgTq"),
(0x100008290, "_$sytWV"),
(0x1000082A8, "_$s7SwiftUI3AppMp"),
(0x1000082B0, "_$s7SwiftUI3AppP4BodyAC_AA5SceneTn"),
(0x1000082B8, "_$s4Body7SwiftUI3AppPTl"),
(0x1000082C0, "_$s7SwiftUI3AppP4body4BodyQzvgTq"),
(0x1000082C8, "_$s7SwiftUI3AppPxycfCTq"),
(0x1000082D0, "_$s7SwiftUI5SceneMp"),
(0x10000C098, "_OBJC_CLASS_$_OS_os_log"),
(0x10000C0A0, "_OBJC_CLASS_$_NSString"),
(0x10000C0B0, "_OBJC_METACLASS_$__TtCs12_SwiftObject"),
(0x10000C0B8, "_OBJC_METACLASS_$__TtCs12_SwiftObject"),
(0x10000C0C0, "__objc_empty_cache"),
(0x10000C0E0, "_$sBoWV"),
(0x10000C0F0, "_OBJC_CLASS_$__TtCs12_SwiftObject"),
(0x10000C0F8, "__objc_empty_cache"),
(0x10000C138, "_swift_deletedMethodError"),
(0x10000C140, "_swift_deletedMethodError"),
]
result = [
(r.rebased_addr, r.resolvedby.name)
for r in binary.relocs
if isinstance(r, MachORelocation) # only relocs that deal with symbols
]
assert expected == result
if __name__ == "__main__":
test_fixups()
METHOD_NAME() |
1,075 | test students are unordered first student | # These tests are auto-generated with test data from:
# https://github.com/exercism/problem-specifications/tree/main/exercises/kindergarten-garden/canonical-data.json
# File last updated on 2023-07-19
import unittest
from kindergarten_garden import (
Garden,
)
class KindergartenGardenTest(unittest.TestCase):
def test_partial_garden_garden_with_single_student(self):
garden = Garden("RC\nGG")
self.assertEqual(
garden.plants("Alice"), ["Radishes", "Clover", "Grass", "Grass"]
)
def test_partial_garden_different_garden_with_single_student(self):
garden = Garden("VC\nRC")
self.assertEqual(
garden.plants("Alice"), ["Violets", "Clover", "Radishes", "Clover"]
)
def test_partial_garden_garden_with_two_students(self):
garden = Garden("VVCG\nVVRC")
self.assertEqual(
garden.plants("Bob"), ["Clover", "Grass", "Radishes", "Clover"]
)
def test_partial_garden_second_student_s_garden(self):
garden = Garden("VVCCGG\nVVCCGG")
self.assertEqual(garden.plants("Bob"), ["Clover", "Clover", "Clover", "Clover"])
def test_partial_garden_third_student_s_garden(self):
garden = Garden("VVCCGG\nVVCCGG")
self.assertEqual(garden.plants("Charlie"), ["Grass", "Grass", "Grass", "Grass"])
def test_full_garden_for_alice_first_student_s_garden(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Alice"), ["Violets", "Radishes", "Violets", "Radishes"]
)
def test_full_garden_for_bob_second_student_s_garden(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(garden.plants("Bob"), ["Clover", "Grass", "Clover", "Clover"])
def test_full_garden_for_charlie(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Charlie"), ["Violets", "Violets", "Clover", "Grass"]
)
def test_full_garden_for_david(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("David"), ["Radishes", "Violets", "Clover", "Radishes"]
)
def test_full_garden_for_eve(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(garden.plants("Eve"), ["Clover", "Grass", "Radishes", "Grass"])
def test_full_garden_for_fred(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Fred"), ["Grass", "Clover", "Violets", "Clover"]
)
def test_full_garden_for_ginny(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(garden.plants("Ginny"), ["Clover", "Grass", "Grass", "Clover"])
def test_full_garden_for_harriet(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Harriet"), ["Violets", "Radishes", "Radishes", "Violets"]
)
def test_full_garden_for_ileana(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Ileana"), ["Grass", "Clover", "Violets", "Clover"]
)
def test_full_garden_for_joseph(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Joseph"), ["Violets", "Clover", "Violets", "Grass"]
)
def test_full_garden_for_kincaid_second_to_last_student_s_garden(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Kincaid"), ["Grass", "Clover", "Clover", "Grass"]
)
def test_full_garden_for_larry_last_student_s_garden(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Larry"), ["Grass", "Violets", "Clover", "Violets"]
)
# Additional tests for this track
def METHOD_NAME(self):
garden = Garden(
"VCRRGVRG\nRVGCCGCV", students=["Samantha", "Patricia", "Xander", "Roger"]
)
self.assertEqual(
garden.plants("Patricia"), ["Violets", "Clover", "Radishes", "Violets"]
)
def test_students_are_unordered_last_student(self):
garden = Garden(
"VCRRGVRG\nRVGCCGCV", students=["Samantha", "Patricia", "Xander", "Roger"]
)
self.assertEqual(
garden.plants("Xander"), ["Radishes", "Grass", "Clover", "Violets"]
) |
1,076 | enable | """
Copyright (c) 2008-2011 Volvox Development Team
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <konstantin.lepa@gmail.com>
ANSII Color formatting for output in terminal.
"""
import os
try:
import curses
import fcntl
import struct
import termios
except Exception:
pass
__all__ = ["colored", "cprint"]
VERSION = (1, 1, 0)
ATTRIBUTES = dict(bold=1, dark=2, underline=4, blink=5, reverse=7, concealed=8)
HIGHLIGHTS = dict(
on_grey=40,
on_red=41,
on_green=42,
on_yellow=43,
on_blue=44,
on_magenta=45,
on_cyan=46,
on_white=47,
)
COLORS = dict(grey=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37)
RESET = "\033[0m"
__ISON = True
def METHOD_NAME(true_false):
"""Enable/Disable ANSII Color formatting"""
global __ISON
__ISON = true_false
def ison():
"""True if ANSII Color formatting is activated."""
return __ISON
def stream_has_colours(stream):
"""
True if stream supports colours. Python cookbook, #475186
"""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
return False # guess false in case of error
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if __ISON and os.getenv("ANSI_COLORS_DISABLED") is None:
fmt_str = "\033[%dm%s"
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
try:
print((colored(text, color, on_color, attrs)), **kwargs)
except TypeError:
# flush is not supported by py2.7
kwargs.pop("flush", None)
print((colored(text, color, on_color, attrs)), **kwargs)
def colored_map(text, cmap):
"""
Return colorized text. cmap is a dict mapping tokens to color options.
.. Example:
colored_key("foo bar", {bar: "green"})
colored_key("foo bar", {bar: {"color": "green", "on_color": "on_red"}})
"""
if not __ISON:
return text
for key, v in cmap.items():
if isinstance(v, dict):
text = text.replace(key, colored(key, **v))
else:
text = text.replace(key, colored(key, color=v))
return text
def cprint_map(text, cmap, **kwargs):
"""
Print colorize text.
cmap is a dict mapping keys to color options.
kwargs are passed to print function
Example:
cprint_map("Hello world", {"Hello": "red"})
"""
try:
print(colored_map(text, cmap), **kwargs)
except TypeError:
# flush is not supported by py2.7
kwargs.pop("flush", None)
print(colored_map(text, cmap), **kwargs)
def get_terminal_size():
"""
Return the size of the terminal as (nrow, ncols)
Based on:
http://stackoverflow.com/questions/566746/how-to-get-console-window-
width-in-python
"""
try:
rc = os.popen("stty size", "r").read().split()
return int(rc[0]), int(rc[1])
except Exception:
pass
env = os.environ
def ioctl_GWINSZ(fd):
try:
rc = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
return rc
except Exception:
return None
rc = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not rc:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
rc = ioctl_GWINSZ(fd)
os.close(fd)
except Exception:
pass
if not rc:
rc = (env.get("LINES", 25), env.get("COLUMNS", 80))
return int(rc[0]), int(rc[1]) |
1,077 | ssh login | """ This provides common functions for ipa """
from sssd.testlib.common.exceptions import SSSDException
from sssd.testlib.common.expect import pexpect_ssh
from sssd.testlib.common.exceptions import SSHLoginException
import subprocess
import pexpect
class ipaTools(object):
""" Collection of assorted functions for ipa to be used in fixtures
Attributes:
Host(obj: `Multihost object type`): Multihost Object
"""
def __init__(self, Host):
""" Initialize multihost """
self.multihost = Host
def install_common_pkgs(self):
""" Install common required packages """
pkgs = 'ldb-tools tcpdump wireshark-cli expect python3-libsss_nss_idmap'
if '8.' in self.multihost.distro:
enable_idm1 = "dnf -y module reset idm"
self.multihost.run_command(enable_idm1)
enable_idm2 = "dnf -y module enable idm:DL1"
self.multihost.run_command(enable_idm2)
enable_idm3 = "dnf -y module install idm:DL1/client"
self.multihost.run_command(enable_idm3)
if 'Fedora' in self.multihost.distro:
client_pkgs = ' freeipa-client'
pkgs = pkgs + client_pkgs
self.multihost.package_mgmt(pkgs, action='install')
def setup_chrony(self, ntp_server='pool.ntp.org'):
""" Setup chrony
Attributes:
ntp_server(str): NTP server. Default ntp_server is pool.ntp.org
Return: bool
"""
stop_chrony = 'systemctl stop chronyd'
self.multihost.run_command(stop_chrony)
cmd = "chronyd -q 'server %s iburst'" % ntp_server
try:
self.multihost.run_command(cmd)
except subprocess.CalledProcessError:
raise SSSDException("Unable to set ntp server")
start_chrony = 'systemctl start chronyd'
try:
self.multihost.run_command(start_chrony)
except subprocess.CalledProcessError:
return False
else:
return True
def get_default_nw_uuid(self):
""" Get default network interface uuid"""
nmcli_cmd = "nmcli con show --active"
cmd = self.multihost.run_command(nmcli_cmd, raiseonerr=False)
conn_list = cmd.stdout_text.split('\n')[1].split(' ')
filtered_list = list(filter(None, conn_list))
return filtered_list[2]
def get_interface_ip(self, uuid):
""" Get IP Address associated with interface
using nmcli
"""
if uuid is None:
return False
nmcli_cmd = "nmcli -f IP4.ADDRESS conn show %s" % uuid
cmd = self.multihost.run_command(nmcli_cmd, raiseonerr=False)
if cmd.returncode == 0:
ipaddr = cmd.stdout_text.split()[1].split('/')[0]
return ipaddr
else:
return False
def add_hbac_rule(self, rulename, username, hostname,
service, group=False):
""" Add IPA hbac rule """
# add rule
add_rule = "ipa hbacrule-add %s" % rulename
# add user
if group:
add_user = "ipa hbacrule-add-user --groups %s %s" % (username,
rulename)
else:
add_user = "ipa hbacrule-add-user --users %s %s" % (username,
rulename)
# add host
add_host = "ipa hbacrule-add-host --hosts %s %s " % (hostname,
rulename)
# add service
add_service = "ipa hbacrule-add-service --hbacsvcs=%s %s" % (service,
rulename)
cmd_list = [add_rule, add_user, add_host, add_service]
for cmd in cmd_list:
ret = self.multihost.run_command(cmd, raiseonerr=False)
if ret.returncode != 0:
raise SSSDException(ret.stderr_text)
def del_hbac_rule(self, rulename):
""" Delete hbac rule """
# delete rule
del_rule = "ipa hbacrule-del %s" % rulename
ret = self.multihost.run_command(del_rule, raiseonerr=False)
if ret.returncode != 0:
raise SSSDException(ret.stderr_text)
def METHOD_NAME(self, username, password, host, command=None):
""" SSH login to host """
pxssh = pexpect_ssh(host, username, password, debug=False)
try:
pxssh.login()
except SSHLoginException:
return False
except pexpect.exceptions.EOF:
return False
else:
if command:
(output, ret) = pxssh.command('id')
print(output)
print("Return status: ", ret)
pxssh.logout()
del pxssh
return True
def create_group(self, group_name, external=False):
""" Create external groups for Active Directory """
if external:
grp_add = "ipa group-add --desc='%s users external map' "\
"%s --external" % (group_name, group_name)
else:
grp_add = "ipa group-add --desc='%s users' %s" % (group_name,
group_name)
cmd = self.multihost.run_command(grp_add, raiseonerr=False)
if cmd.returncode != 0:
raise SSSDException(cmd.stderr_text)
def group_add_member(self, source_group, target_group, external=False):
""" Make source group member of target group """
if external:
add_mem = "ipa -n group-add-member %s "\
"--external %s" % (target_group, source_group)
else:
add_mem = "ipa group-add-member %s --groups %s" % (target_group,
source_group)
cmd = self.multihost.run_command(add_mem, raiseonerr=False)
if cmd.returncode != 0:
raise SSSDException(cmd.stderr_text) |
1,078 | test invalid block size | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.nddata import block_reduce, block_replicate, reshape_as_blocks
class TestReshapeAsBlocks:
def test_1d(self):
data = np.arange(16)
reshaped = reshape_as_blocks(data, 2)
assert reshaped.shape == (8, 2)
reshaped = reshape_as_blocks(data, 4)
assert reshaped.shape == (4, 4)
reshaped = reshape_as_blocks(data, 8)
assert reshaped.shape == (2, 8)
def test_2d(self):
data = np.arange(16).reshape(4, 4)
reshaped = reshape_as_blocks(data, (2, 2))
assert reshaped.shape == (2, 2, 2, 2)
data = np.arange(64).reshape(8, 8)
reshaped = reshape_as_blocks(data, (2, 2))
assert reshaped.shape == (4, 4, 2, 2)
reshaped = reshape_as_blocks(data, (4, 4))
assert reshaped.shape == (2, 2, 4, 4)
def test_3d(self):
data = np.arange(64).reshape(4, 4, 4)
reshaped = reshape_as_blocks(data, (2, 2, 2))
assert reshaped.shape == (2, 2, 2, 2, 2, 2)
data = np.arange(2 * 3 * 4).reshape(2, 3, 4)
reshaped = reshape_as_blocks(data, (2, 1, 2))
assert reshaped.shape == (1, 3, 2, 2, 1, 2)
def test_view(self):
data = np.arange(16).reshape(4, 4)
reshaped = reshape_as_blocks(data, (2, 2))
data[0, 0] = 100
assert reshaped[0, 0, 0, 0] == 100
def test_invalid_block_dim(self):
data = np.arange(64).reshape(4, 4, 4)
match = (
"block_size must be a scalar or have the same "
"length as the number of data dimensions"
)
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2, 2))
def METHOD_NAME(self):
data = np.arange(16).reshape(4, 4)
match = (
"Each dimension of block_size must divide evenly "
"into the corresponding dimension of data"
)
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2, 3))
def test_invalid_block_value(self):
data = np.arange(16).reshape(4, 4)
match = "block_size elements must be integers"
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2.1, 2))
match = "block_size elements must be strictly positive"
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (-1, 0))
class TestBlockReduce:
def test_1d(self):
"""Test 1D array."""
data = np.arange(4)
expected = np.array([1, 5])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_1d_mean(self):
"""Test 1D array with func=np.mean."""
data = np.arange(4)
block_size = 2.0
expected = block_reduce(data, block_size, func=np.sum) / block_size
result_mean = block_reduce(data, block_size, func=np.mean)
assert np.all(result_mean == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(4).reshape(2, 2)
expected = np.array([[6]])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_2d_mean(self):
"""Test 2D array with func=np.mean."""
data = np.arange(4).reshape(2, 2)
block_size = 2.0
expected = block_reduce(data, block_size, func=np.sum) / block_size**2
result = block_reduce(data, block_size, func=np.mean)
assert np.all(result == expected)
def test_2d_trim(self):
"""
Test trimming of 2D array when size is not perfectly divisible
by block_size.
"""
data1 = np.arange(15).reshape(5, 3)
result1 = block_reduce(data1, 2)
data2 = data1[0:4, 0:2]
result2 = block_reduce(data2, 2)
assert np.all(result1 == result2)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(16).reshape(4, 4)
result1 = block_reduce(data, 2)
result2 = block_reduce(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.ones((2, 2))
with pytest.raises(ValueError):
block_reduce(data, (2, 2, 2))
class TestBlockReplicate:
def test_1d(self):
"""Test 1D array."""
data = np.arange(2)
expected = np.array([0, 0, 0.5, 0.5])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_1d_conserve_sum(self):
"""Test 1D array with conserve_sum=False."""
data = np.arange(2)
block_size = 2.0
expected = block_replicate(data, block_size) * block_size
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(2).reshape(2, 1)
expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_2d_conserve_sum(self):
"""Test 2D array with conserve_sum=False."""
data = np.arange(6).reshape(2, 3)
block_size = 2.0
expected = block_replicate(data, block_size) * block_size**2
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(4).reshape(2, 2)
result1 = block_replicate(data, 2)
result2 = block_replicate(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.arange(5)
with pytest.raises(ValueError):
block_replicate(data, (2, 2)) |
1,079 | get model filename | import sys
from typing import Optional, Sequence
import requests
import typer
from wasabi import msg
from .. import about
from ..errors import OLD_MODEL_SHORTCUTS
from ..util import get_minor_version, is_package, is_prerelease_version, run_command
from ._util import SDIST_SUFFIX, WHEEL_SUFFIX, Arg, Opt, app
@app.command(
"download",
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
)
def download_cli(
# fmt: off
ctx: typer.Context,
model: str = Arg(..., help="Name of pipeline package to download"),
direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"),
sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel"),
# fmt: on
):
"""
Download compatible trained pipeline from the default download path using
pip. If --direct flag is set, the command expects the full package name with
version. For direct downloads, the compatibility check will be skipped. All
additional arguments provided to this command will be passed to `pip install`
on package installation.
DOCS: https://spacy.io/api/cli#download
AVAILABLE PACKAGES: https://spacy.io/models
"""
download(model, direct, sdist, *ctx.args)
def download(
model: str,
direct: bool = False,
sdist: bool = False,
*pip_args,
) -> None:
if (
not (is_package("spacy") or is_package("spacy-nightly"))
and "--no-deps" not in pip_args
):
msg.warn(
"Skipping pipeline package dependencies and setting `--no-deps`. "
"You don't seem to have the spaCy package itself installed "
"(maybe because you've built from source?), so installing the "
"package dependencies would cause spaCy to be downloaded, which "
"probably isn't what you want. If the pipeline package has other "
"dependencies, you'll have to install them manually."
)
pip_args = pip_args + ("--no-deps",)
if direct:
components = model.split("-")
model_name = "".join(components[:-1])
version = components[-1]
else:
model_name = model
if model in OLD_MODEL_SHORTCUTS:
msg.warn(
f"As of spaCy v3.0, shortcuts like '{model}' are deprecated. Please "
f"use the full pipeline package name '{OLD_MODEL_SHORTCUTS[model]}' instead."
)
model_name = OLD_MODEL_SHORTCUTS[model]
compatibility = get_compatibility()
version = get_version(model_name, compatibility)
filename = METHOD_NAME(model_name, version, sdist)
download_model(filename, pip_args)
msg.good(
"Download and installation successful",
f"You can now load the package via spacy.load('{model_name}')",
)
def METHOD_NAME(model_name: str, version: str, sdist: bool = False) -> str:
dl_tpl = "{m}-{v}/{m}-{v}{s}"
suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
filename = dl_tpl.format(m=model_name, v=version, s=suffix)
return filename
def get_compatibility() -> dict:
if is_prerelease_version(about.__version__):
version: Optional[str] = about.__version__
else:
version = get_minor_version(about.__version__)
r = requests.get(about.__compatibility__)
if r.status_code != 200:
msg.fail(
f"Server error ({r.status_code})",
f"Couldn't fetch compatibility table. Please find a package for your spaCy "
f"installation (v{about.__version__}), and download it manually. "
f"For more details, see the documentation: "
f"https://spacy.io/usage/models",
exits=1,
)
comp_table = r.json()
comp = comp_table["spacy"]
if version not in comp:
msg.fail(f"No compatible packages found for v{version} of spaCy", exits=1)
return comp[version]
def get_version(model: str, comp: dict) -> str:
if model not in comp:
msg.fail(
f"No compatible package found for '{model}' (spaCy v{about.__version__})",
exits=1,
)
return comp[model][0]
def get_latest_version(model: str) -> str:
comp = get_compatibility()
return get_version(model, comp)
def download_model(
filename: str, user_pip_args: Optional[Sequence[str]] = None
) -> None:
download_url = about.__download_url__ + "/" + filename
pip_args = list(user_pip_args) if user_pip_args is not None else []
cmd = [sys.executable, "-m", "pip", "install"] + pip_args + [download_url]
run_command(cmd) |
1,080 | generate parser rst | """
Convert an argparse parser to option directives.
Inspired by sphinxcontrib.autoprogram but with a few differences:
- Contains some simple pre-processing on the help messages to make
the Sphinx version a bit prettier.
"""
import argparse
import re
from textwrap import dedent
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives import unchanged
from docutils.statemachine import ViewList
from sphinx.util.nodes import nested_parse_with_titles
_block_re = re.compile(r":\n{2}\s{2}")
_default_re = re.compile(r"Default is (.+)\.\n")
_note_re = re.compile(r"Note: (.*?)(?:\n\n|\n*$)", re.DOTALL)
_option_line_re = re.compile(r"^(?!\s{2,}%\(prog\)s|\s{2,}--\w[\w-]*\w\b|Example: )(.+)$", re.MULTILINE)
_option_re = re.compile(r"(?:^|(?<=\s))(?P<arg>--\w[\w-]*\w)(?P<val>=\w+)?\b")
_prog_re = re.compile(r"%\(prog\)s")
_percent_re = re.compile(r"%%")
_inline_code_block_re = re.compile(r"(?<!`)`([^`]+?)`")
_example_inline_code_block_re = re.compile(r"(?<=^Example: )(.+)$", re.MULTILINE)
def get_parser(module_name, attr):
module = __import__(module_name, globals(), locals(), [attr])
parser = getattr(module, attr)
return parser if not callable(parser) else parser()
def indent(value, length=4):
space = " " * length
return "\n".join(space + line for line in value.splitlines())
class ArgparseDirective(Directive):
has_content = True
option_spec = {
"module": unchanged,
"attr": unchanged,
}
_headlines = ["^", "~"]
def process_help(self, helptext):
# Dedent the help to make sure we are always dealing with
# non-indented text.
helptext = dedent(helptext)
helptext = _inline_code_block_re.sub(
lambda m: (
":code:`{0}`".format(m.group(1).replace("\\", "\\\\"))
),
helptext,
)
helptext = _example_inline_code_block_re.sub(r":code:`\1`", helptext)
# Replace option references with links.
# Do this before indenting blocks and notes.
helptext = _option_line_re.sub(
lambda m: _option_re.sub(
lambda m2: f":option:`{m2['arg']}{m2['val'] or ''}`" if m2["arg"] in self._available_options else m2[0],
m[1],
),
helptext,
)
# Create simple blocks.
helptext = _block_re.sub("::\n\n ", helptext)
# Boldify the default value.
helptext = _default_re.sub(r"Default is: **\1**.\n", helptext)
# Create note directives from "Note: " paragraphs.
helptext = _note_re.sub(
lambda m: ".. note::\n\n" + indent(m.group(1)) + "\n\n",
helptext,
)
# workaround to replace %(prog)s with streamlink
helptext = _prog_re.sub("streamlink", helptext)
# fix escaped chars for percent-formatted argparse help strings
helptext = _percent_re.sub("%", helptext)
# create cross-links for the "Metadata variables" and "Plugins" sections
helptext = re.sub(
r"the \"Metadata variables\" section",
"the \":ref:`Metadata variables <cli/metadata:Variables>`\" section",
helptext,
)
helptext = re.sub(
r"the \"Plugins\" section",
"the \":ref:`Plugins <plugins:Plugins>`\" section",
helptext,
)
return indent(helptext)
def generate_group_rst(self, group):
for action in group._group_actions:
# don't document suppressed parameters
if action.help == argparse.SUPPRESS:
continue
metavar = action.metavar
if isinstance(metavar, tuple):
metavar = " ".join(metavar)
options = []
# parameter(s) with metavar
if action.option_strings and metavar:
for arg in action.option_strings:
# optional parameter value
if action.nargs == "?":
metavar = f"[{metavar}]"
options.append(f"{arg} {metavar}")
# positional parameter
elif metavar:
options.append(metavar)
# parameter(s) without metavar
else:
options += action.option_strings
directive = ".. option:: "
options = f"\n{' ' * len(directive)}".join(options)
yield f"{directive}{options}"
yield ""
for line in self.process_help(action.help).split("\n"):
yield line
yield ""
def METHOD_NAME(self, parser, parent=None, depth=0):
if depth >= len(self._headlines):
return
for group in parser.NESTED_ARGUMENT_GROUPS[parent]:
is_parent = group in parser.NESTED_ARGUMENT_GROUPS
# Exclude empty groups
if not group._group_actions and not is_parent:
continue
title = group.title
yield ""
yield title
yield self._headlines[depth] * len(title)
yield from self.generate_group_rst(group)
if is_parent:
yield ""
yield from self.METHOD_NAME(parser, group, depth + 1)
def run(self):
module = self.options.get("module")
attr = self.options.get("attr")
parser = get_parser(module, attr)
self._available_options = []
for action in parser._actions:
# positional parameters have an empty option_strings list
self._available_options += action.option_strings or [action.dest]
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.METHOD_NAME(parser):
result.append(line, "argparse")
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.add_directive("argparse", ArgparseDirective) |
1,081 | debug cs | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2018 (ita)
"""
C# support. A simple example::
def configure(conf):
conf.load('cs')
def build(bld):
bld(features='cs', source='main.cs', gen='foo')
Note that the configuration may compile C# snippets::
FRAG = '''
namespace Moo {
public class Test { public static int Main(string[] args) { return 0; } }
}'''
def configure(conf):
conf.check(features='cs', fragment=FRAG, compile_filename='test.cs', gen='test.exe',
bintype='exe', csflags=['-pkg:gtk-sharp-2.0'], msg='Checking for Gtksharp support')
"""
from waflib import Utils, Task, Options, Errors
from waflib.TaskGen import before_method, after_method, feature
from waflib.Tools import ccroot
from waflib.Configure import conf
ccroot.USELIB_VARS['cs'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES'])
ccroot.lib_patterns['csshlib'] = ['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
"""
Create a C# task bound to the attribute *cs_task*. There can be only one C# task by task generator.
"""
cs_nodes = []
no_nodes = []
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source = no_nodes
bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe')
self.cs_task = tsk = self.create_task('mcs', cs_nodes, self.path.find_or_declare(self.gen))
tsk.env.CSTYPE = '/target:%s' % bintype
tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS', '/platform:%s' % getattr(self, 'platform', 'anycpu'))
inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}')
if inst_to:
# note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically
mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644)
self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
"""
C# applications honor the **use** keyword::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', name='mylib')
bld(features='cs', source='Hi.cs', includes='.', bintype='exe', gen='hi.exe', use='mylib', name='hi')
"""
names = self.to_list(getattr(self, 'use', []))
get = self.bld.get_tgen_by_name
for x in names:
try:
y = get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS', '/reference:%s' % x)
continue
y.post()
tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r' % self)
self.cs_task.dep_nodes.extend(tsk.outputs) # dependency
self.cs_task.set_run_after(tsk) # order (redundant, the order is inferred from the nodes inputs/outputs)
self.env.append_value('CSFLAGS', '/reference:%s' % tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs', 'use_cs')
def METHOD_NAME(self):
"""
The C# targets may create .mdb or .pdb files::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdebug='full')
# csdebug is a value in (True, 'full', 'pdbonly')
"""
csdebug = getattr(self, 'csdebug', self.env.CSDEBUG)
if not csdebug:
return
node = self.cs_task.outputs[0]
if self.env.CS_NAME == 'mono':
out = node.parent.find_or_declare(node.name + '.mdb')
else:
out = node.change_ext('.pdb')
self.cs_task.outputs.append(out)
if getattr(self, 'install_task', None):
self.pdb_install_task = self.add_install_files(
install_to=self.install_task.install_to, install_from=out)
if csdebug == 'pdbonly':
val = ['/debug+', '/debug:pdbonly']
elif csdebug == 'full':
val = ['/debug+', '/debug:full']
else:
val = ['/debug-']
self.env.append_value('CSFLAGS', val)
@feature('cs')
@after_method('debug_cs')
def doc_cs(self):
"""
The C# targets may create .xml documentation files::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdoc=True)
# csdoc is a boolean value
"""
csdoc = getattr(self, 'csdoc', self.env.CSDOC)
if not csdoc:
return
node = self.cs_task.outputs[0]
out = node.change_ext('.xml')
self.cs_task.outputs.append(out)
if getattr(self, 'install_task', None):
self.doc_install_task = self.add_install_files(
install_to=self.install_task.install_to, install_from=out)
self.env.append_value('CSFLAGS', '/doc:%s' % out.abspath())
class mcs(Task.Task):
"""
Compile C# files
"""
color = 'YELLOW'
run_str = '${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def split_argfile(self, cmd):
inline = [cmd[0]]
infile = []
for x in cmd[1:]:
# csc doesn't want /noconfig in @file
if x.lower() == '/noconfig':
inline.append(x)
else:
infile.append(self.quote_flag(x))
return (inline, infile)
def configure(conf):
"""
Find a C# compiler, set the variable MCS for the compiler and CS_NAME (mono or csc)
"""
csc = getattr(Options.options, 'cscbinary', None)
if csc:
conf.env.MCS = csc
conf.find_program(['csc', 'mcs', 'gmcs'], var='MCS')
conf.env.ASS_ST = '/r:%s'
conf.env.RES_ST = '/resource:%s'
conf.env.CS_NAME = 'csc'
if str(conf.env.MCS).lower().find('mcs') > -1:
conf.env.CS_NAME = 'mono'
def options(opt):
"""
Add a command-line option for the configuration::
$ waf configure --with-csc-binary=/foo/bar/mcs
"""
opt.add_option('--with-csc-binary', type='string', dest='cscbinary')
class fake_csshlib(Task.Task):
"""
Task used for reading a foreign .net assembly and adding the dependency on it
"""
color = 'YELLOW'
inst_to = None
def runnable_status(self):
return Task.SKIP_ME
@conf
def read_csshlib(self, name, paths=[]):
"""
Read a foreign .net assembly for the *use* system::
def build(bld):
bld.read_csshlib('ManagedLibrary.dll', paths=[bld.env.mylibrarypath])
bld(features='cs', source='Hi.cs', bintype='exe', gen='hi.exe', use='ManagedLibrary.dll')
:param name: Name of the library
:type name: string
:param paths: Folders in which the library may be found
:type paths: list of string
:return: A task generator having the feature *fake_lib* which will call :py:func:`waflib.Tools.ccroot.process_lib`
:rtype: :py:class:`waflib.TaskGen.task_gen`
"""
return self(name=name, features='fake_lib', lib_paths=paths, lib_type='csshlib')
|
1,082 | cxx11 flag | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import sys
import llnl.util.lang
from spack.compiler import Compiler, UnsupportedCompilerFlag
from spack.version import Version
#: compiler symlink mappings for mixed f77 compilers
f77_mapping = [
("gfortran", os.path.join("clang", "gfortran")),
("xlf_r", os.path.join("xl_r", "xlf_r")),
("xlf", os.path.join("xl", "xlf")),
("pgfortran", os.path.join("pgi", "pgfortran")),
("ifort", os.path.join("intel", "ifort")),
]
#: compiler symlink mappings for mixed f90/fc compilers
fc_mapping = [
("gfortran", os.path.join("clang", "gfortran")),
("xlf90_r", os.path.join("xl_r", "xlf90_r")),
("xlf90", os.path.join("xl", "xlf90")),
("pgfortran", os.path.join("pgi", "pgfortran")),
("ifort", os.path.join("intel", "ifort")),
]
class Clang(Compiler):
# Subclasses use possible names of C compiler
cc_names = ["clang"]
# Subclasses use possible names of C++ compiler
cxx_names = ["clang++"]
# Subclasses use possible names of Fortran 77 compiler
f77_names = ["flang", "gfortran", "xlf_r"]
# Subclasses use possible names of Fortran 90 compiler
fc_names = ["flang", "gfortran", "xlf90_r"]
version_argument = "--version"
@property
def debug_flags(self):
return [
"-gcodeview",
"-gdwarf-2",
"-gdwarf-3",
"-gdwarf-4",
"-gdwarf-5",
"-gline-tables-only",
"-gmodules",
"-gz",
"-g",
]
@property
def opt_flags(self):
return ["-O0", "-O1", "-O2", "-O3", "-Ofast", "-Os", "-Oz", "-Og", "-O", "-O4"]
# Clang has support for using different fortran compilers with the
# clang executable.
@property
def link_paths(self):
# clang links are always the same
link_paths = {
"cc": os.path.join("clang", "clang"),
"cxx": os.path.join("clang", "clang++"),
}
# fortran links need to look at the actual compiler names from
# compilers.yaml to figure out which named symlink to use
for compiler_name, link_path in f77_mapping:
if self.f77 and compiler_name in self.f77:
link_paths["f77"] = link_path
break
else:
link_paths["f77"] = os.path.join("clang", "flang")
for compiler_name, link_path in fc_mapping:
if self.fc and compiler_name in self.fc:
link_paths["fc"] = link_path
break
else:
link_paths["fc"] = os.path.join("clang", "flang")
return link_paths
@property
def verbose_flag(self):
return "-v"
openmp_flag = "-fopenmp"
@property
def METHOD_NAME(self):
if self.real_version < Version("3.3"):
raise UnsupportedCompilerFlag(self, "the C++11 standard", "cxx11_flag", "< 3.3")
return "-std=c++11"
@property
def cxx14_flag(self):
if self.real_version < Version("3.4"):
raise UnsupportedCompilerFlag(self, "the C++14 standard", "cxx14_flag", "< 3.5")
elif self.real_version < Version("3.5"):
return "-std=c++1y"
return "-std=c++14"
@property
def cxx17_flag(self):
if self.real_version < Version("3.5"):
raise UnsupportedCompilerFlag(self, "the C++17 standard", "cxx17_flag", "< 3.5")
elif self.real_version < Version("5.0"):
return "-std=c++1z"
return "-std=c++17"
@property
def c99_flag(self):
return "-std=c99"
@property
def c11_flag(self):
if self.real_version < Version("3.0"):
raise UnsupportedCompilerFlag(self, "the C11 standard", "c11_flag", "< 3.0")
if self.real_version < Version("3.1"):
return "-std=c1x"
return "-std=c11"
@property
def c17_flag(self):
if self.real_version < Version("6.0"):
raise UnsupportedCompilerFlag(self, "the C17 standard", "c17_flag", "< 6.0")
return "-std=c17"
@property
def c23_flag(self):
if self.real_version < Version("9.0"):
raise UnsupportedCompilerFlag(self, "the C23 standard", "c23_flag", "< 9.0")
return "-std=c2x"
@property
def cc_pic_flag(self):
return "-fPIC"
@property
def cxx_pic_flag(self):
return "-fPIC"
@property
def f77_pic_flag(self):
return "-fPIC"
@property
def fc_pic_flag(self):
return "-fPIC"
required_libs = ["libclang"]
@classmethod
@llnl.util.lang.memoized
def extract_version_from_output(cls, output):
ver = "unknown"
if ("Apple" in output) or ("AMD" in output):
return ver
match = re.search(
# Normal clang compiler versions are left as-is
r"clang version ([^ )\n]+)-svn[~.\w\d-]*|"
# Don't include hyphenated patch numbers in the version
# (see https://github.com/spack/spack/pull/14365 for details)
r"clang version ([^ )\n]+?)-[~.\w\d-]*|" r"clang version ([^ )\n]+)",
output,
)
if match:
ver = match.group(match.lastindex)
return ver
@classmethod
def fc_version(cls, fc):
# We could map from gcc/gfortran version to clang version, but on macOS
# we normally mix any version of gfortran with any version of clang.
if sys.platform == "darwin":
return cls.default_version("clang")
else:
return cls.default_version(fc)
@classmethod
def f77_version(cls, f77):
return cls.fc_version(f77) |
1,083 | get view | # Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
import logging
import re
from typing import TYPE_CHECKING, Optional, List
from gi.repository import Gtk
from range_typed_integers import i32_checked, i32, u16_checked, u16
from skytemple.core.module_controller import AbstractController
from skytemple.controller.main import MainController
from skytemple.core.message_dialog import SkyTempleMessageDialog
from skytemple.core.string_provider import StringType
from skytemple.core.ui_utils import catch_overflow
from skytemple_files.hardcoded.menus import MenuEntry, MenuType
from skytemple_files.common.i18n_util import _
if TYPE_CHECKING:
from skytemple.module.lists.module import ListsModule
PATTERN_ITEM_ENTRY = re.compile(r'.*\(#(\d+)\).*')
logger = logging.getLogger(__name__)
class MenuListController(AbstractController):
def __init__(self, module: 'ListsModule', *args):
super().__init__(module, *args)
self.module = module
self._string_provider = module.project.get_string_provider()
def METHOD_NAME(self) -> Gtk.Widget:
self.builder = self._get_builder(__file__, 'menu_list.glade')
lst: Gtk.Box = self.builder.get_object('box_list')
self._rank_up_table = self.module.get_rank_list()
self._init_combos()
self.on_lang_changed()
self.builder.connect_signals(self)
return lst
def _init_combos(self):
# Init available menus
cb_store: Gtk.ListStore = self.builder.get_object('cb_store_menu')
cb: Gtk.ComboBoxText = self.builder.get_object('cb_menu')
# Init combobox
cb_store.clear()
for v in sorted(MenuType, key=lambda x:x.menu_name):
cb_store.append([v.value, v.menu_name])
cb.set_active(0)
# Init available languages
cb_store = self.builder.get_object('cb_store_lang')
cb = self.builder.get_object('cb_lang')
# Init combobox
cb_store.clear()
for lang in self._string_provider.get_languages():
cb_store.append([lang.locale, lang.name_localized])
cb.set_active(0)
def _get_current_settings(self) -> int:
cb_store: Gtk.ListStore = self.builder.get_object('cb_store_menu')
cb: Gtk.ComboBoxText = self.builder.get_object('cb_menu')
return cb_store[cb.get_active_iter()][0]
def on_btn_help_clicked(self, *args):
md = SkyTempleMessageDialog(MainController.window(),
Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
_("Menus are hardcoded so you can't add any option, but you can edit a few things.\n"
"However, there are some rules: \n"
" - Only the Game Main & Sub menus can have descriptions. \n"
" - The action code tells the game what to do when selecting this menu. \n"
"The meaning of the code values depends on the menu. \n"
"It is also used to determine if a menu should be disabled (or hidden in the main menu).\n"
" - The end of the menu is detected by the game with an entry in which the Name ID is set to 0. \n"
"Also, the action code of that entry is used when pressing the B button (if the game allows it for this menu). \n"
" - Editing a string with a specific ID will result of all strings using that ID to be changed."))
md.run()
md.destroy()
def on_lang_changed(self, *args):
cb_store: Gtk.ListStore = self.builder.get_object('cb_store_lang')
cb: Gtk.ComboBoxText = self.builder.get_object('cb_lang')
self._current_lang = cb_store[cb.get_active_iter()][0]
self._refresh_list()
def on_menu_changed(self, *args):
self._refresh_list()
def _regenerate_list(self):
menu_id = self._get_current_settings()
tree_store: Gtk.ListStore = self.builder.get_object('tree_store')
new_list = []
for row in tree_store:
new_list.append(MenuEntry(row[0], row[2], row[4]))
self.module.set_menu(menu_id, new_list)
self._refresh_list()
@catch_overflow(u16)
def on_id_name_edited(self, widget, path, text):
try:
tree_store: Gtk.ListStore = self.builder.get_object('tree_store')
tree_store[path][0] = u16_checked(int(text))
except ValueError:
return
self._regenerate_list()
@catch_overflow(u16)
def on_id_description_edited(self, widget, path, text):
try:
tree_store: Gtk.ListStore = self.builder.get_object('tree_store')
tree_store[path][2] = u16_checked(int(text))
except ValueError:
return
self._regenerate_list()
@catch_overflow(i32)
def on_action_edited(self, widget, path, text):
try:
tree_store: Gtk.ListStore = self.builder.get_object('tree_store')
tree_store[path][4] = i32_checked(int(text))
except ValueError:
return
self._regenerate_list()
def on_string_name_edited(self, widget, path, text):
tree_store: Gtk.ListStore = self.builder.get_object('tree_store')
current_id = int(tree_store[path][0])
if current_id>0:
self._string_provider.get_model(self._current_lang).strings[
current_id-1
] = text
self._regenerate_list()
self.module.mark_string_as_modified()
def on_string_description_edited(self, widget, path, text):
tree_store: Gtk.ListStore = self.builder.get_object('tree_store')
current_id = int(tree_store[path][2])
if current_id>0:
self._string_provider.get_model(self._current_lang).strings[
current_id-1
] = text
self._regenerate_list()
self.module.mark_string_as_modified()
def _refresh_list(self):
tree: Gtk.TreeView = self.builder.get_object('tree')
self._list_store: Gtk.ListStore = tree.get_model()
self._list_store.clear()
# Iterate list
menu_id = self._get_current_settings()
menu_list = self.module.get_menu(menu_id)
tree_store: Gtk.ListStore = self.builder.get_object('tree_store')
tree_store.clear()
str_list = self._string_provider.get_model(self._current_lang).strings
for m in menu_list:
if m.name_id>0:
name = str_list[m.name_id-1]
else:
name = ""
if m.description_id>0:
description = str_list[m.description_id-1]
else:
description = ""
tree_store.append([m.name_id, name, m.description_id, description, m.action]) |
1,084 | get size | from pathlib import Path
from torch.autograd.profiler import profile
from .prof_utils import BaseProfiler, _format_time, _format_memory, _format_bandwidth
from typing import List
def METHOD_NAME(dtype: str):
if dtype == "fp16":
return 2
elif dtype == "fp32":
return 4
else:
raise NotImplementedError
def _get_numel(my_list: List[int]) -> int:
from functools import reduce
from operator import mul
return reduce(mul, my_list)
def _reduce_location(locations: List[str]) -> str:
ret = []
for lo in locations:
ret.append(lo)
ret.append("\n")
ret = ret[:-1]
return ''.join(ret)
class PcieEvent(object):
"""Pcie Event.
"""
def __init__(self, count: int = 0, pcie_vol: int = 0, cuda_time: int = 0):
self.count = count
self.pcie_vol = pcie_vol
self.cuda_time = cuda_time
def add(self, rhs):
self.count += rhs.count
self.pcie_vol += rhs.pcie_vol
self.cuda_time += rhs.cuda_time
class PcieProfiler(BaseProfiler):
"""Pcie profiler. Records all data transmission between CPU and GPU.
TODO: Merge pcie profiler into communication profiler
"""
def __init__(self, dtype: str = "fp32", depth: int = 1):
super().__init__(profiler_name="Pcie", priority=10)
self.depth = depth
self.data_size = METHOD_NAME(dtype)
self.h2d_count = 0
self.h2d_time = 0
self.d2h_count = 0
self.d2h_time = 0
self.ops_record = dict()
self.profiler = None
def reset(self):
self.h2d_count = 0
self.h2d_time = 0
self.d2h_count = 0
self.d2h_time = 0
self.ops_record = dict()
self.profiler = None
def enable(self):
self.profiler = profile(enabled=True,
use_cuda=True,
use_cpu=True,
use_kineto=True,
record_shapes=True,
with_stack=True)
self.profiler.__enter__()
def disable(self):
self.profiler.__exit__(None, None, None)
if self.profiler.enabled:
events = self.profiler.function_events
for event in events:
if event.name == "aten::copy_":
t_shape = event.input_shapes[0]
if len(t_shape) == 0 or event.cuda_time_total == 0 or len(event.stack) == 0:
continue
current_comm_event = PcieEvent(1, self.data_size * _get_numel(t_shape), event.cuda_time_total)
code_location = _reduce_location(event.stack[:self.depth])
if code_location in self.ops_record:
self.ops_record[code_location].add(current_comm_event)
else:
self.ops_record[code_location] = current_comm_event
elif 'Memcpy HtoD' in event.name:
self.h2d_count += 1
self.h2d_time += event.cuda_time_total
elif 'Memcpy DtoH' in event.name:
self.d2h_count += 1
self.d2h_time += event.cuda_time_total
self.profiler = None
def to_tensorboard(self, writer):
writer.add_text(tag="Data Transmission", text_string=self.result_str("\n\n"))
def to_file(self, filename: Path):
with open(filename, "w") as f:
f.write(self.result_str())
def show(self):
print(self.result_str())
def result_str(self, sep: str = "\n"):
res = []
def append(s: str = None):
if s is not None:
res.append(s)
res.append(sep)
append("Pcie profiling result:")
append("time of data transmission (CPU -> GPU): {}".format(_format_time(self.h2d_time)))
append("number of transmission (CPU -> GPU): {}".format(self.h2d_count))
append("time of data transmission (GPU -> CPU): {}".format(_format_time(self.d2h_time)))
append("number of transmission (GPU -> CPU): {}".format(self.d2h_count))
append("Possible data transmission events in PCIE:")
separation = '-' * 62
row_format = '{:^10}' + '{:^12}' + '{:^16}' + '{:^12}' * 2
append(separation)
append(row_format.format('Location', 'GPU time', 'Trans volume', 'Bandwidth', 'Num of calls'))
append(separation)
show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].cuda_time)
for location, event in show_list:
append(location)
append(
row_format.format('', _format_time(event.cuda_time), _format_memory(event.pcie_vol),
_format_bandwidth(event.pcie_vol, event.cuda_time), event.count))
append()
return ''.join(res) |
1,085 | current filetype completion enabled | # Copyright (C) 2013-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import threading
from importlib import import_module
from ycmd.completers.general.general_completer_store import (
GeneralCompleterStore )
from ycmd.completers.language_server import generic_lsp_completer
from ycmd.utils import LOGGER
def _GetGenericLSPCompleter( user_options, filetype ):
custom_lsp = user_options[ 'language_server' ]
for server_settings in custom_lsp:
if filetype in server_settings[ 'filetypes' ]:
try:
return generic_lsp_completer.GenericLSPCompleter(
user_options, server_settings )
except Exception:
LOGGER.exception( "Unable to instantiate generic completer for "
f"filetype { filetype }" )
# We might just use a built-in completer
return None
class ServerState:
def __init__( self, user_options ):
self._user_options = user_options
self._filetype_completers = {}
self._filetype_completers_lock = threading.Lock()
self._gencomp = GeneralCompleterStore( self._user_options )
@property
def user_options( self ):
return self._user_options
def Shutdown( self ):
with self._filetype_completers_lock:
for completer in self._filetype_completers.values():
if completer:
completer.Shutdown()
self._gencomp.Shutdown()
def _GetFiletypeCompleterForFiletype( self, filetype ):
with self._filetype_completers_lock:
try:
return self._filetype_completers[ filetype ]
except KeyError:
pass
completer = _GetGenericLSPCompleter( self._user_options, filetype )
if completer is None:
try:
module = import_module( f'ycmd.completers.{ filetype }.hook' )
completer = module.GetCompleter( self._user_options )
except ImportError:
completer = None
supported_filetypes = { filetype }
if completer:
supported_filetypes.update( completer.SupportedFiletypes() )
for supported_filetype in supported_filetypes:
if supported_filetype not in self._filetype_completers:
self._filetype_completers[ supported_filetype ] = completer
return completer
def GetFiletypeCompleter( self, current_filetypes ):
completers = [ self._GetFiletypeCompleterForFiletype( filetype )
for filetype in current_filetypes ]
for completer in completers:
if completer:
return completer
raise ValueError(
f'No semantic completer exists for filetypes: { current_filetypes }' )
def GetLoadedFiletypeCompleters( self ):
with self._filetype_completers_lock:
return { completer for completer in
self._filetype_completers.values() if completer }
def FiletypeCompletionAvailable( self, filetypes, silent = False ):
"""Returns True if there is a ycmd semantic completer defined for any
filetype in the list |filetypes|. Otherwise, returns False and prints an
error to the log file, unless silent = True."""
try:
self.GetFiletypeCompleter( filetypes )
return True
except Exception:
if not silent:
LOGGER.exception( 'Semantic completion not available for %s',
filetypes )
return False
def FiletypeCompletionUsable( self, filetypes, silent = False ):
"""Return True if ycmd supports semantic compltion for any filetype in the
list |filetypes| and those filetypes are not disabled by user options."""
return ( self.METHOD_NAME( filetypes ) and
self.FiletypeCompletionAvailable( filetypes, silent ) )
def ShouldUseFiletypeCompleter( self, request_data ):
"""Determines whether or not the semantic completion should be called for
completion request."""
filetypes = request_data[ 'filetypes' ]
if not self.FiletypeCompletionUsable( filetypes ):
# don't use semantic, ignore whether or not the user requested forced
# completion as that's not relevant to signatures.
return False
if request_data[ 'force_semantic' ]:
# use semantic, and it was forced
return True
filetype_completer = self.GetFiletypeCompleter( filetypes )
# was not forced. check the conditions for triggering
return filetype_completer.ShouldUseNow( request_data )
def GetGeneralCompleter( self ):
return self._gencomp
def METHOD_NAME( self, current_filetypes ):
"""Return False if all filetypes in the list |current_filetypes| are
disabled by the user option 'filetype_specific_completion_to_disable'."""
filetype_to_disable = self._user_options[
'filetype_specific_completion_to_disable' ]
if '*' in filetype_to_disable:
return False
else:
return not all( x in filetype_to_disable for x in current_filetypes ) |
1,086 | max | from collections import defaultdict, deque, OrderedDict
import copy
import datetime
import hashlib
import time
import paddle
import paddle.distributed as dist
import errno
import os
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
t = paddle.to_tensor([self.count, self.total], dtype='float64')
t = t.numpy().tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = paddle.to_tensor(list(self.deque))
return d.median().numpy().item()
@property
def avg(self):
d = paddle.to_tensor(list(self.deque), dtype='float32')
return d.mean().numpy().item()
@property
def global_avg(self):
return self.total / self.count
@property
def METHOD_NAME(self):
return METHOD_NAME(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
METHOD_NAME=self.METHOD_NAME,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, paddle.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = self.delimiter.join([
header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}',
'time: {time}', 'data: {data}'
])
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {}'.format(header, total_time_str))
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with paddle.no_grad():
maxk = METHOD_NAME(topk)
batch_size = target.shape[0]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.equal(target.astype("int64"))
res = []
for k in topk:
correct_k = correct.astype(paddle.int32)[:k].flatten().sum(
dtype='float32')
res.append(correct_k / batch_size)
return res
def get_world_size():
return dist.get_world_size()
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise |
1,087 | test t5 bundler train | import copy
from unittest.mock import patch
import torch
from torch.nn import functional as F
from torchtext_unittest.common.case_utils import TestBaseMixin
class T5BaseTestModels(TestBaseMixin):
def test_t5_bundler_build_model(self) -> None:
from torchtext.models import T5Conf, T5Model, T5Bundle
# case: user provides encoder checkpoint state dict
dummy_encoder_conf = T5Conf(
encoder_only=True,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
)
dummy_t5_encoder = T5Model(dummy_encoder_conf)
t5_encoder_model = T5Bundle.build_model(config=dummy_encoder_conf, checkpoint=dummy_t5_encoder.state_dict())
self.assertEqual(t5_encoder_model.state_dict(), dummy_t5_encoder.state_dict())
# case: user provides encoder-decoder checkpoint state dict
dummy_t5_conf = T5Conf(
encoder_only=False,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
)
dummy_t5 = T5Model(dummy_t5_conf)
t5_model = T5Bundle.build_model(config=dummy_t5_conf, checkpoint=dummy_t5.state_dict())
self.assertEqual(t5_model.state_dict(), dummy_t5.state_dict())
# case: user provides checkpoint state dict for encoder-decoder with generation
dummy_t5_generation_conf = T5Conf(
encoder_only=False,
linear_head=True,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
)
dummy_t5_generation = T5Model(dummy_t5_generation_conf)
t5_generation_model = T5Bundle.build_model(
config=dummy_t5_generation_conf, checkpoint=dummy_t5_generation.state_dict()
)
self.assertEqual(t5_generation_model.state_dict(), dummy_t5_generation.state_dict())
@patch("logging.Logger.warning")
def test_t5_bundler_get_model(self, mock):
from torchtext.models import T5Conf, T5Bundle
# encoder-decoder with generation
dummy_t5_generation_conf = T5Conf(
encoder_only=False,
linear_head=True,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
)
t5_generation_bundle = T5Bundle(dummy_t5_generation_conf)
t5_generation_bundle.get_model(load_weights=False, freeze_model=True)
mock.assert_called_with(
"The model is not loaded with pre-trained weights. Setting freeze_model to True will hinder model from learning appropriate weights."
)
def test_t5_bundler_raise_checkpoint(self) -> None:
from torchtext.models import T5Conf, T5Bundle
# encoder-only
with self.assertRaises(TypeError):
dummy_encoder_conf = T5Conf(
encoder_only=True,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
)
T5Bundle.build_model(
config=dummy_encoder_conf,
freeze_model=True,
checkpoint=1,
)
# encoder-decoder
with self.assertRaises(TypeError):
dummy_t5_conf = T5Conf(
encoder_only=False,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
)
T5Bundle.build_model(
config=dummy_t5_conf,
freeze_model=True,
checkpoint=1,
)
# encoder-decoder with generation
with self.assertRaises(TypeError):
dummy_t5_generation_conf = T5Conf(
encoder_only=False,
linear_head=True,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
)
T5Bundle.build_model(
config=dummy_t5_generation_conf,
freeze_model=True,
checkpoint=1,
)
def test_t5_bundler_conf_property(self) -> None:
from torchtext.models import T5Conf, T5Bundle
dummy_t5_conf = T5Conf(
encoder_only=False,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
)
t5_bundle = T5Bundle(dummy_t5_conf)
self.assertTrue(isinstance(t5_bundle.config, T5Conf))
def METHOD_NAME(self) -> None:
from torch.optim import SGD
from torchtext.models import T5Conf, T5Model, T5Bundle
torch.manual_seed(123)
def _train(model):
optim = SGD(model.parameters(), lr=1)
model_input = torch.tensor([[1, 2, 3, 4, 5]]).to(device=self.device)
target = torch.tensor([1]).to(device=self.device)
output = model(model_input)["decoder_output"]
logits = F.log_softmax(output[:, -1], dim=-1)
loss = F.cross_entropy(logits, target)
loss.backward()
optim.step()
dummy_conf = T5Conf(
encoder_only=False,
linear_head=True,
vocab_size=10,
embedding_dim=16,
ffn_dimension=64,
num_attention_heads=2,
num_encoder_layers=2,
num_decoder_layers=2,
training=True,
)
dummy_model = T5Model(dummy_conf)
model = T5Bundle.build_model(
config=dummy_conf,
freeze_model=False,
checkpoint=dummy_model.state_dict(),
)
model.to(device=self.device, dtype=self.dtype)
current_state_dict = copy.deepcopy(model.state_dict())
_train(model)
self.assertNotEqual(model.state_dict(), current_state_dict)
def test_shift_right(self) -> None:
from torchtext.models import T5Conf, T5Model
dummy_encoder_conf = T5Conf()
dummy_t5_encoder = T5Model(dummy_encoder_conf)
padding_idx = dummy_t5_encoder.padding_idx
valid_cases_input = [[[1, 2], [3, 4]], [[1]]]
valid_cases_expected = [[[padding_idx, 1], [padding_idx, 3]], [[padding_idx]]]
invalid_cases_input = [[0], [], [[]]]
for input_ids, expected in zip(valid_cases_input, valid_cases_expected):
input_ids = torch.Tensor(input_ids)
expected = torch.Tensor(expected)
self.assertEqual(dummy_t5_encoder._shift_right(input_ids), expected)
for input_ids in invalid_cases_input:
input_ids = torch.Tensor(input_ids)
with self.assertRaises(IndexError):
dummy_t5_encoder._shift_right(input_ids) |
1,088 | run | # Urwid main loop code
# Copyright (C) 2004-2012 Ian Ward
# Copyright (C) 2008 Walter Mundt
# Copyright (C) 2009 Andrew Psaltis
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: https://urwid.org/
"""Abstract shared code for urwid EventLoop implementation."""
from __future__ import annotations
import abc
import signal
import typing
if typing.TYPE_CHECKING:
from collections.abc import Callable
from types import FrameType
__all__ = ("ExitMainLoop", "EventLoop")
class ExitMainLoop(Exception):
"""
When this exception is raised within a main loop the main loop
will exit cleanly.
"""
class EventLoop(abc.ABC):
"""
Abstract class representing an event loop to be used by :class:`MainLoop`.
"""
@abc.abstractmethod
def alarm(self, seconds: float, callback: Callable[[], typing.Any]) -> typing.Any:
"""
Call callback() a given time from now. No parameters are
passed to callback.
This method has no default implementation.
Returns a handle that may be passed to remove_alarm()
seconds -- floating point time to wait before calling callback
callback -- function to call from event loop
"""
@abc.abstractmethod
def enter_idle(self, callback):
"""
Add a callback for entering idle.
This method has no default implementation.
Returns a handle that may be passed to remove_idle()
"""
@abc.abstractmethod
def remove_alarm(self, handle) -> bool:
"""
Remove an alarm.
This method has no default implementation.
Returns True if the alarm exists, False otherwise
"""
@abc.abstractmethod
def remove_enter_idle(self, handle) -> bool:
"""
Remove an idle callback.
This method has no default implementation.
Returns True if the handle was removed.
"""
@abc.abstractmethod
def remove_watch_file(self, handle) -> bool:
"""
Remove an input file.
This method has no default implementation.
Returns True if the input file exists, False otherwise
"""
@abc.abstractmethod
def METHOD_NAME(self) -> None:
"""
Start the event loop. Exit the loop when any callback raises
an exception. If ExitMainLoop is raised, exit cleanly.
This method has no default implementation.
"""
@abc.abstractmethod
def watch_file(self, fd: int, callback: Callable[[], typing.Any]):
"""
Call callback() when fd has some data to read. No parameters
are passed to callback.
This method has no default implementation.
Returns a handle that may be passed to remove_watch_file()
fd -- file descriptor to watch for input
callback -- function to call when input is available
"""
def set_signal_handler(
self,
signum: int,
handler: Callable[[int, FrameType | None], typing.Any] | int | signal.Handlers,
) -> Callable[[int, FrameType | None], typing.Any] | int | signal.Handlers | None:
"""
Sets the signal handler for signal signum.
The default implementation of :meth:`set_signal_handler`
is simply a proxy function that calls :func:`signal.signal()`
and returns the resulting value.
signum -- signal number
handler -- function (taking signum as its single argument),
or `signal.SIG_IGN`, or `signal.SIG_DFL`
"""
return signal.signal(signum, handler) |
1,089 | step | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from typing import Any, Dict, Optional, Type, Union
import torch.nn as nn
from torch import Tensor
from flash.audio.speech_recognition.backbone import SPEECH_RECOGNITION_BACKBONES
from flash.audio.speech_recognition.collate import DataCollatorCTCWithPadding
from flash.audio.speech_recognition.input import SpeechRecognitionDeserializer
from flash.audio.speech_recognition.output_transform import SpeechRecognitionOutputTransform
from flash.core.data.io.input import ServeInput
from flash.core.data.io.input_transform import InputTransform
from flash.core.data.io.output import Output
from flash.core.model import Task
from flash.core.registry import FlashRegistry
from flash.core.serve import Composition
from flash.core.utilities.imports import _TOPIC_AUDIO_AVAILABLE, requires
from flash.core.utilities.types import INPUT_TRANSFORM_TYPE, LR_SCHEDULER_TYPE, OPTIMIZER_TYPE
if _TOPIC_AUDIO_AVAILABLE:
from transformers import AutoProcessor
class SpeechRecognition(Task):
"""The ``SpeechRecognition`` task is a :class:`~flash.Task` for converting speech to text. For more details, see
:ref:`speech_recognition`.
Args:
backbone: Any speech recognition model from `HuggingFace/transformers
<https://huggingface.co/models?pipeline_tag=automatic-speech-recognition>`_.
learning_rate: Learning rate to use for training, defaults to ``1e-5``.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
"""
backbones: FlashRegistry = SPEECH_RECOGNITION_BACKBONES
required_extras = "audio"
def __init__(
self,
backbone: str = "facebook/wav2vec2-base-960h",
processor_backbone: str = None,
optimizer: OPTIMIZER_TYPE = "Adam",
lr_scheduler: LR_SCHEDULER_TYPE = None,
learning_rate: Optional[float] = None,
):
os.environ["TOKENIZERS_PARALLELISM"] = "TRUE"
# disable HF thousand warnings
warnings.simplefilter("ignore")
# set os environ variable for multiprocesses
os.environ["PYTHONWARNINGS"] = "ignore"
model = self.backbones.get(backbone)()
super().__init__(
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
learning_rate=learning_rate,
output_transform=SpeechRecognitionOutputTransform(backbone)
if processor_backbone is None
else SpeechRecognitionOutputTransform(processor_backbone),
)
self.save_hyperparameters()
self.collate_fn = DataCollatorCTCWithPadding(
AutoProcessor.from_pretrained(backbone)
if processor_backbone is None
else AutoProcessor.from_pretrained(processor_backbone)
)
def modules_to_freeze(self) -> Optional[nn.Module]:
return self.model.base_model
def forward(self, batch: Dict[str, Tensor]):
return self.model(batch["input_values"]).logits
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
return self(batch)
def METHOD_NAME(self, batch: Any, batch_idx: int, metrics: nn.ModuleDict) -> Any:
out = self.model(batch["input_values"], labels=batch["labels"])
out["logs"] = {"loss": out.loss}
return out
@requires("serve")
def serve(
self,
host: str = "127.0.0.1",
port: int = 8000,
sanity_check: bool = True,
input_cls: Optional[Type[ServeInput]] = SpeechRecognitionDeserializer,
transform: INPUT_TRANSFORM_TYPE = InputTransform,
transform_kwargs: Optional[Dict] = None,
output: Optional[Union[str, Output]] = None,
) -> Composition:
return super().serve(host, port, sanity_check, input_cls, transform, transform_kwargs, output) |
1,090 | poison func 6 | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import logging
import unittest
import numpy as np
from art.attacks.poisoning.backdoor_attack import PoisoningAttackBackdoor
from art.attacks.poisoning.perturbations import add_pattern_bd, add_single_bd, insert_image
from art.utils import to_categorical
from tests.utils import TestBase, master_seed, get_image_classifier_kr
logger = logging.getLogger(__name__)
PP_POISON = 0.33
NB_EPOCHS = 3
class TestBackdoorAttack(TestBase):
"""
A unittest class for testing Backdoor Poisoning attack.
"""
def setUp(self):
master_seed(seed=301)
self.backdoor_path = os.path.join(
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
"utils",
"data",
"backdoors",
"alert.png",
)
super().setUp()
@staticmethod
def poison_dataset(x_clean, y_clean, poison_func):
x_poison = np.copy(x_clean)
y_poison = np.copy(y_clean)
is_poison = np.zeros(np.shape(y_poison)[0])
for i in range(10):
src = i
tgt = (i + 1) % 10
n_points_in_tgt = np.round(np.sum(np.argmax(y_clean, axis=1) == tgt))
num_poison = int((PP_POISON * n_points_in_tgt) / (1 - PP_POISON))
src_imgs = np.copy(x_clean[np.argmax(y_clean, axis=1) == src])
n_points_in_src = np.shape(src_imgs)[0]
if num_poison:
indices_to_be_poisoned = np.random.choice(n_points_in_src, num_poison)
imgs_to_be_poisoned = src_imgs[indices_to_be_poisoned]
backdoor_attack = PoisoningAttackBackdoor(poison_func)
poison_images, poison_labels = backdoor_attack.poison(
imgs_to_be_poisoned, y=to_categorical(np.ones(num_poison) * tgt, 10)
)
x_poison = np.append(x_poison, poison_images, axis=0)
y_poison = np.append(y_poison, poison_labels, axis=0)
is_poison = np.append(is_poison, np.ones(num_poison))
is_poison = is_poison != 0
return is_poison, x_poison, y_poison
def poison_func_1(self, x):
max_val = np.max(self.x_train_mnist)
return np.expand_dims(add_pattern_bd(x.squeeze(3), pixel_value=max_val), axis=3)
def poison_func_2(self, x):
max_val = np.max(self.x_train_mnist)
return np.expand_dims(add_single_bd(x.squeeze(3), pixel_value=max_val), axis=3)
def poison_func_3(self, x):
return insert_image(x, backdoor_path=self.backdoor_path, size=(5, 5), random=False, x_shift=3, y_shift=3)
def poison_func_4(self, x):
return insert_image(x, backdoor_path=self.backdoor_path, size=(5, 5), random=True)
def poison_func_5(self, x):
return insert_image(x, backdoor_path=self.backdoor_path, random=True, size=(100, 100))
def METHOD_NAME(self, x):
return insert_image(x, backdoor_path=self.backdoor_path, random=True, size=(100, 100))
def test_backdoor_pattern(self):
"""
Test the backdoor attack with a pattern-based perturbation can be trained on classifier
"""
krc = get_image_classifier_kr()
(is_poison_train, x_poisoned_raw, y_poisoned_raw) = self.poison_dataset(
self.x_train_mnist, self.y_train_mnist, self.poison_func_1
)
# Shuffle training data
n_train = np.shape(y_poisoned_raw)[0]
shuffled_indices = np.arange(n_train)
np.random.shuffle(shuffled_indices)
x_train = x_poisoned_raw[shuffled_indices]
y_train = y_poisoned_raw[shuffled_indices]
krc.fit(x_train, y_train, nb_epochs=NB_EPOCHS, batch_size=32)
def test_backdoor_pixel(self):
"""
Test the backdoor attack with a pixel-based perturbation can be trained on classifier
"""
krc = get_image_classifier_kr()
(is_poison_train, x_poisoned_raw, y_poisoned_raw) = self.poison_dataset(
self.x_train_mnist, self.y_train_mnist, self.poison_func_2
)
# Shuffle training data
n_train = np.shape(y_poisoned_raw)[0]
shuffled_indices = np.arange(n_train)
np.random.shuffle(shuffled_indices)
x_train = x_poisoned_raw[shuffled_indices]
y_train = y_poisoned_raw[shuffled_indices]
krc.fit(x_train, y_train, nb_epochs=NB_EPOCHS, batch_size=32)
def test_backdoor_image(self):
"""
Test the backdoor attack with a image-based perturbation can be trained on classifier
"""
krc = get_image_classifier_kr()
(is_poison_train, x_poisoned_raw, y_poisoned_raw) = self.poison_dataset(
self.x_train_mnist, self.y_train_mnist, self.poison_func_3
)
# Shuffle training data
n_train = np.shape(y_poisoned_raw)[0]
shuffled_indices = np.arange(n_train)
np.random.shuffle(shuffled_indices)
x_train = x_poisoned_raw[shuffled_indices]
y_train = y_poisoned_raw[shuffled_indices]
krc.fit(x_train, y_train, nb_epochs=NB_EPOCHS, batch_size=32)
def test_multiple_perturbations(self):
"""
Test using multiple perturbation functions in the same attack can be trained on classifier
"""
krc = get_image_classifier_kr()
(is_poison_train, x_poisoned_raw, y_poisoned_raw) = self.poison_dataset(
self.x_train_mnist, self.y_train_mnist, [self.poison_func_4, self.poison_func_1]
)
# Shuffle training data
n_train = np.shape(y_poisoned_raw)[0]
shuffled_indices = np.arange(n_train)
np.random.shuffle(shuffled_indices)
x_train = x_poisoned_raw[shuffled_indices]
y_train = y_poisoned_raw[shuffled_indices]
krc.fit(x_train, y_train, nb_epochs=NB_EPOCHS, batch_size=32)
def test_image_failure_modes(self):
"""
Tests failure modes for image perturbation functions
"""
backdoor_attack = PoisoningAttackBackdoor(self.poison_func_5)
adv_target = np.argmax(self.y_train_mnist) + 1 % 10
with self.assertRaises(ValueError) as context:
backdoor_attack.poison(self.x_train_mnist, y=adv_target)
self.assertIn("Backdoor does not fit inside original image", str(context.exception))
backdoor_attack = PoisoningAttackBackdoor(self.METHOD_NAME)
with self.assertRaises(ValueError) as context:
backdoor_attack.poison(np.zeros(5), y=np.ones(5))
self.assertIn("Invalid array shape", str(context.exception))
def test_check_params(self):
with self.assertRaises(ValueError):
_ = PoisoningAttackBackdoor("self.poison_func_5")
if __name__ == "__main__":
unittest.main() |
1,091 | compute output resolution | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library to compute order of computations in a graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters
from tensorflow.python.platform import tf_logging as logging
def parse_graph_nodes(graph_def):
"""Helper function to parse GraphDef's nodes.
It returns a dict mapping from node name to NodeDef.
Args:
graph_def: A GraphDef object.
Returns:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
name_to_node = {}
for node_def in graph_def.node:
name_to_node[node_def.name] = node_def
return name_to_node
# Named tuple used to collect information from each node in a computation graph.
_node_info = collections.namedtuple(
'NodeInfo', field_names=['order', 'node', 'input_size', 'output_size'])
def METHOD_NAME(input_spatial_resolution, kernel_size, stride,
total_padding):
"""Computes output resolution, given input resolution and layer parameters.
Note that this computation is done only over one dimension (eg, x or y).
If any of the inputs is None, returns None.
Args:
input_spatial_resolution: Input spatial resolution (int).
kernel_size: Kernel size (int).
stride: Stride (int).
total_padding: Total padding to be applied (int).
Returns:
output_resolution: Output dimension (int) or None.
"""
if (input_spatial_resolution is None) or (kernel_size is None) or (
stride is None) or (total_padding is None):
return None
return int(
math.ceil((
input_spatial_resolution + total_padding - kernel_size + 1) / stride))
def _get_computed_nodes(name_to_node,
current,
node_info,
input_node_name='',
input_node_size=None):
"""Traverses the graph recursively to compute its topological order.
Optionally, the function may also compute the input and output feature map
resolutions at each node. In this case, input_node_name and input_node_size
must be set. Note that if a node's op type is unknown, the input and output
resolutions are ignored and set to None.
Args:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
current: Current node name.
node_info: Map of nodes we've already traversed, containing their _node_info
information.
input_node_name: Name of node with fixed input resolution (optional).
input_node_size: Fixed input resolution to use (optional).
Returns:
order: Order in topological sort for 'current'.
input_size: Tensor spatial resolution at input of current node.
output_size: Tensor spatial resolution at output of current node.
"""
if current in node_info:
return (node_info[current].order, node_info[current].input_size,
node_info[current].output_size)
node_def = name_to_node[current]
if current == input_node_name:
order = 0
input_size = None
output_size = input_node_size
node_info[current] = _node_info(order, node_def, input_size, output_size)
return (order, input_size, output_size)
input_size = None
output_size = None
order = 0
number_inputs = 0
for each in node_def.input:
# Parses name of input node.
if each.startswith('^'):
# The character '^' denotes a control dependency, so this input node can
# be safely ignored.
continue
each = each.split(':')[0]
# Recursively computes ordering.
(parent_order, _, parent_output_size) = _get_computed_nodes(
name_to_node, each, node_info, input_node_name, input_node_size)
order = max(order, parent_order + 1)
if number_inputs == 0:
# For all the types of nodes we consider, the first input corresponds to
# the feature map.
input_size = parent_output_size
number_inputs += 1
# Figure out output size for this layer.
logging.vlog(3, 'input_size = %s', input_size)
if input_size is None:
output_size = None
else:
(kernel_size_x, kernel_size_y, stride_x, stride_y, _, _, total_padding_x,
total_padding_y) = (
parse_layer_parameters.get_layer_params(
node_def, name_to_node, input_size, force=True))
logging.vlog(3, 'kernel_size_x = %s, kernel_size_y = %s, '
'stride_x = %s, stride_y = %s, '
'total_padding_x = %s, total_padding_y = %s' %
(kernel_size_x, kernel_size_y, stride_x, stride_y,
total_padding_x, total_padding_y))
output_size = [None] * 2
output_size[0] = METHOD_NAME(input_size[0], kernel_size_x,
stride_x, total_padding_x)
output_size[1] = METHOD_NAME(input_size[1], kernel_size_y,
stride_y, total_padding_y)
logging.vlog(3, 'output_size = %s', output_size)
node_info[current] = _node_info(order, node_def, input_size, output_size)
return order, input_size, output_size
def get_compute_order(graph_def, input_node_name='', input_node_size=None):
"""Computes order of computation for a given CNN graph.
Optionally, the function may also compute the input and output feature map
resolutions at each node. In this case, input_node_name and input_node_size
must be set. Note that if a node's op type is unknown, the input and output
resolutions are ignored and set to None.
Args:
graph_def: GraphDef object.
input_node_name: Name of node with fixed input resolution (optional). This
is usually the node name for the input image in a CNN.
input_node_size: 2D list of integers, fixed input resolution to use
(optional). This is usually the input resolution used for the input image
in a CNN (common examples are: [224, 224], [299, 299], [321, 321]).
Returns:
node_info: Default dict keyed by node name, mapping to a named tuple with
the following fields:
- order: Integer denoting topological order;
- node: NodeDef for the given node;
- input_size: 2D list of integers, denoting the input spatial resolution
to the node;
- output_size: 2D list of integers, denoting the output spatial resolution
of the node.
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
name_to_node = parse_graph_nodes(graph_def)
node_info = collections.defaultdict(_node_info)
for each in graph_def.node:
_get_computed_nodes(name_to_node, each.name, node_info, input_node_name,
input_node_size)
return node_info, name_to_node |
1,092 | test jtvec adjoint test jxi bform | import unittest
import numpy as np
from scipy.constants import mu_0
from SimPEG.electromagnetics.utils.testing_utils import getFDEMProblem
testE = True
testB = True
verbose = False
TOL = 1e-5
FLR = 1e-20 # "zero", so if residual below this --> pass regardless of order
CONDUCTIVITY = 1e1
MU = mu_0
freq = 1e-1
addrandoms = True
SrcList = ["RawVec", "MagDipole"] # or 'MAgDipole_Bfield', 'CircularLoop', 'RawVec'
def adjointTest(fdemType, comp):
prb = getFDEMProblem(fdemType, comp, SrcList, freq)
# prb.solverOpts = dict(check_accuracy=True)
print("Adjoint {0!s} formulation - {1!s}".format(fdemType, comp))
m = np.log(np.ones(prb.sigmaMap.nP) * CONDUCTIVITY)
mu = np.ones(prb.mesh.nC) * MU
if addrandoms is True:
m = m + np.random.randn(prb.sigmaMap.nP) * np.log(CONDUCTIVITY) * 1e-1
mu = mu + np.random.randn(prb.mesh.nC) * MU * 1e-1
survey = prb.survey
# prb.PropMap.PropModel.mu = mu
# prb.PropMap.PropModel.mui = 1./mu
u = prb.fields(m)
v = np.random.rand(survey.nD)
w = np.random.rand(prb.mesh.nC)
vJw = v.dot(prb.Jvec(m, w, u))
wJtv = w.dot(prb.Jtvec(m, v, u))
tol = np.max([TOL * (10 ** int(np.log10(np.abs(vJw)))), FLR])
print(vJw, wJtv, vJw - wJtv, tol, np.abs(vJw - wJtv) < tol)
return np.abs(vJw - wJtv) < tol
class FDEM_AdjointTests(unittest.TestCase):
if testE:
def test_Jtvec_adjointTest_exr_Eform(self):
self.assertTrue(adjointTest("e", ["ElectricField", "x", "r"]))
def test_Jtvec_adjointTest_eyr_Eform(self):
self.assertTrue(adjointTest("e", ["ElectricField", "y", "r"]))
def test_Jtvec_adjointTest_ezr_Eform(self):
self.assertTrue(adjointTest("e", ["ElectricField", "z", "r"]))
def test_Jtvec_adjointTest_exi_Eform(self):
self.assertTrue(adjointTest("e", ["ElectricField", "x", "i"]))
def test_Jtvec_adjointTest_eyi_Eform(self):
self.assertTrue(adjointTest("e", ["ElectricField", "y", "i"]))
def test_Jtvec_adjointTest_ezi_Eform(self):
self.assertTrue(adjointTest("e", ["ElectricField", "z", "i"]))
def test_Jtvec_adjointTest_bxr_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "x", "r"]))
def test_Jtvec_adjointTest_byr_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "y", "r"]))
def test_Jtvec_adjointTest_bzr_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "z", "r"]))
def test_Jtvec_adjointTest_bxi_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "x", "i"]))
def test_Jtvec_adjointTest_byi_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "y", "i"]))
def test_Jtvec_adjointTest_bzi_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticFluxDensity", "z", "i"]))
def test_Jtvec_adjointTest_jxr_Eform(self):
self.assertTrue(adjointTest("e", ["CurrentDensity", "x", "r"]))
def test_Jtvec_adjointTest_jyr_Eform(self):
self.assertTrue(adjointTest("e", ["CurrentDensity", "y", "r"]))
def test_Jtvec_adjointTest_jzr_Eform(self):
self.assertTrue(adjointTest("e", ["CurrentDensity", "z", "r"]))
def test_Jtvec_adjointTest_jxi_Eform(self):
self.assertTrue(adjointTest("e", ["CurrentDensity", "x", "i"]))
def test_Jtvec_adjointTest_jyi_Eform(self):
self.assertTrue(adjointTest("e", ["CurrentDensity", "y", "i"]))
def test_Jtvec_adjointTest_jzi_Eform(self):
self.assertTrue(adjointTest("e", ["CurrentDensity", "z", "i"]))
def test_Jtvec_adjointTest_hxr_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticField", "x", "r"]))
def test_Jtvec_adjointTest_hyr_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticField", "y", "r"]))
def test_Jtvec_adjointTest_hzr_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticField", "z", "r"]))
def test_Jtvec_adjointTest_hxi_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticField", "x", "i"]))
def test_Jtvec_adjointTest_hyi_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticField", "y", "i"]))
def test_Jtvec_adjointTest_hzi_Eform(self):
self.assertTrue(adjointTest("e", ["MagneticField", "z", "i"]))
if testB:
def test_Jtvec_adjointTest_exr_Bform(self):
self.assertTrue(adjointTest("b", ["ElectricField", "x", "r"]))
def test_Jtvec_adjointTest_eyr_Bform(self):
self.assertTrue(adjointTest("b", ["ElectricField", "y", "r"]))
def test_Jtvec_adjointTest_ezr_Bform(self):
self.assertTrue(adjointTest("b", ["ElectricField", "z", "r"]))
def test_Jtvec_adjointTest_exi_Bform(self):
self.assertTrue(adjointTest("b", ["ElectricField", "x", "i"]))
def test_Jtvec_adjointTest_eyi_Bform(self):
self.assertTrue(adjointTest("b", ["ElectricField", "y", "i"]))
def test_Jtvec_adjointTest_ezi_Bform(self):
self.assertTrue(adjointTest("b", ["ElectricField", "z", "i"]))
def test_Jtvec_adjointTest_bxr_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "x", "r"]))
def test_Jtvec_adjointTest_byr_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "y", "r"]))
def test_Jtvec_adjointTest_bzr_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "z", "r"]))
def test_Jtvec_adjointTest_bxi_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "x", "i"]))
def test_Jtvec_adjointTest_byi_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "y", "i"]))
def test_Jtvec_adjointTest_bzi_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticFluxDensity", "z", "i"]))
def test_Jtvec_adjointTest_jxr_Bform(self):
self.assertTrue(adjointTest("b", ["CurrentDensity", "x", "r"]))
def test_Jtvec_adjointTest_jyr_Bform(self):
self.assertTrue(adjointTest("b", ["CurrentDensity", "y", "r"]))
def test_Jtvec_adjointTest_jzr_Bform(self):
self.assertTrue(adjointTest("b", ["CurrentDensity", "z", "r"]))
def METHOD_NAME(self):
self.assertTrue(adjointTest("b", ["CurrentDensity", "x", "i"]))
def test_Jtvec_adjointTest_jyi_Bform(self):
self.assertTrue(adjointTest("b", ["CurrentDensity", "y", "i"]))
def test_Jtvec_adjointTest_jzi_Bform(self):
self.assertTrue(adjointTest("b", ["CurrentDensity", "z", "i"]))
def test_Jtvec_adjointTest_hxr_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticField", "x", "r"]))
def test_Jtvec_adjointTest_hyr_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticField", "y", "r"]))
def test_Jtvec_adjointTest_hzr_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticField", "z", "r"]))
def test_Jtvec_adjointTest_hxi_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticField", "x", "i"]))
def test_Jtvec_adjointTest_hyi_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticField", "y", "i"]))
def test_Jtvec_adjointTest_hzi_Bform(self):
self.assertTrue(adjointTest("b", ["MagneticField", "z", "i"])) |
1,093 | test get setting not found with default | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import shutil
import tempfile
import hashlib
import unittest
from sawtooth_validator.database.native_lmdb import NativeLmdbDatabase
from sawtooth_validator.protobuf.setting_pb2 import Setting
from sawtooth_validator.state.settings_view import SettingsViewFactory
from sawtooth_validator.state.state_view import StateViewFactory
from sawtooth_validator.state.merkle import MerkleDatabase
class TestSettingsView(unittest.TestCase):
def __init__(self, test_name):
super().__init__(test_name)
self._temp_dir = None
self._settings_view_factory = None
self._current_root_hash = None
def setUp(self):
self._temp_dir = tempfile.mkdtemp()
database = NativeLmdbDatabase(
os.path.join(self._temp_dir, 'test_config_view.lmdb'),
indexes=MerkleDatabase.create_index_configuration(),
_size=10 * 1024 * 1024)
state_view_factory = StateViewFactory(database)
self._settings_view_factory = SettingsViewFactory(state_view_factory)
merkle_db = MerkleDatabase(database)
self._current_root_hash = merkle_db.update({
TestSettingsView._address('my.setting'):
TestSettingsView._setting_entry('my.setting', '10'),
TestSettingsView._address('my.setting.list'):
TestSettingsView._setting_entry('my.setting.list', '10,11,12'),
TestSettingsView._address('my.other.list'):
TestSettingsView._setting_entry('my.other.list', '13;14;15')
}, virtual=False)
def tearDown(self):
shutil.rmtree(self._temp_dir)
def test_get_setting(self):
"""Verifies the correct operation of get_setting() by using it to get
the config setting stored as "my.setting" and compare it to '10' (the
value set during setUp()).
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
self.assertEqual('10', settings_view.get_setting('my.setting'))
def test_get_setting_with_type_coercion(self):
"""Verifies the correct operation of get_setting() by using it to get
the config setting stored as "my.setting" with a int type coercion
function and compare it to the int 10 (the value set during setUp()).
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
self.assertEqual(10, settings_view.get_setting('my.setting',
value_type=int))
def test_get_setting_not_found(self):
"""Verifies the correct operation of get_setting() by using it to
return None when an unknown setting is requested.
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
self.assertIsNone(settings_view.get_setting('non-existant.setting'))
def METHOD_NAME(self):
"""Verifies the correct operation of get_setting() by using it to
return a default value when an unknown setting is requested.
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
self.assertEqual('default',
settings_view.get_setting('non-existant.setting',
default_value='default'))
def test_get_setting_list(self):
"""Verifies the correct operation of get_setting_list() by using it to
get the config setting stored as "my.setting.list" and compare it to
['10', '11', '12'] (the split value set during setUp()).
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
# Verify we can still get the "raw" setting
self.assertEqual('10,11,12',
settings_view.get_setting('my.setting.list'))
# And now the split setting
self.assertEqual(
['10', '11', '12'],
settings_view.get_setting_list('my.setting.list'))
def test_get_setting_list_not_found(self):
"""Verifies the correct operation of get_setting_list() by using it to
return None when an unknown setting is requested.
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
self.assertIsNone(
settings_view.get_setting_list('non-existant.setting.list'))
def test_get_setting_list_not_found_with_default(self):
"""Verifies the correct operation of get_setting_list() by using it to
return a default value when an unknown setting is requested.
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
self.assertEqual(
[],
settings_view.get_setting_list('non-existant.list',
default_value=[]))
def test_get_setting_list_alternate_delimiter(self):
"""Verifies the correct operation of get_setting_list() by using it to
get the config setting stored as "my.other.list" and compare it to
['13', '14', '15'] (the value, split along an alternate delimiter, set
during setUp()).
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
self.assertEqual(
['13', '14', '15'],
settings_view.get_setting_list('my.other.list', delimiter=';'))
def test_get_setting_list_with_type_coercion(self):
"""Verifies the correct operation of get_setting_list() by using it to
get the integer type-coerced config setting stored as "my.setting.list"
and compare it to [10, 11, 12] (the split, type-coerced, value set
during setUp()).
"""
settings_view = self._settings_view_factory.create_settings_view(
self._current_root_hash)
self.assertEqual(
[10, 11, 12],
settings_view.get_setting_list('my.setting.list', value_type=int))
@staticmethod
def _address(key):
return '000000' + _key_to_address(key)
@staticmethod
def _setting_entry(key, value):
return Setting(
entries=[Setting.Entry(key=key, value=value)]
).SerializeToString()
_MAX_KEY_PARTS = 4
_ADDRESS_PART_SIZE = 16
def _short_hash(name):
return hashlib.sha256(name.encode()).hexdigest()[:_ADDRESS_PART_SIZE]
def _key_to_address(key):
key_parts = key.split('.', maxsplit=_MAX_KEY_PARTS - 1)
key_parts.extend([''] * (_MAX_KEY_PARTS - len(key_parts)))
return ''.join(_short_hash(x) for x in key_parts) |
1,094 | build | import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.METHOD_NAME import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, export_conandata_patches, replace_in_file, get, copy
from conan.tools.microsoft import is_msvc_static_runtime
from conan.tools.scm import Version
required_conan_version = ">=1.53.0"
class OpenTDFConan(ConanFile):
name = "opentdf-client"
description = "openTDF core c++ client library for creating and accessing TDF protected data"
license = "BSD-3-Clause-Clear"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.virtru.com"
topics = ("opentdf", "tdf", "virtru")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
copy(self, "conan_cmake_project_include.cmake", self.recipe_folder, os.path.join(self.export_sources_folder, "src"))
export_conandata_patches(self)
@property
def _minimum_cpp_standard(self):
return 17
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "17" if Version(self.version) < "1.1.5" else "15",
"msvc": "193" if Version(self.version) < "1.1.5" else "191",
"gcc": "7.5.0",
"clang": "12",
"apple-clang": "12.0.0",
}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
# Uses openssl 3.x for 1.5.0 and newer
if Version(self.version) >= "1.5.0":
self.requires("openssl/[>=3.1 <4]")
else:
self.requires("openssl/1.1.1u")
# Uses magic_enum for 1.4.0 and newer
if Version(self.version) >= "1.4.0":
self.requires("magic_enum/0.8.2")
self.requires("ms-gsl/2.1.0")
self.requires("nlohmann_json/3.11.1")
self.requires("jwt-cpp/0.4.0")
self.requires("zlib/1.2.13")
# Use newer boost+libxml2 after 1.3.6
if Version(self.version) <= "1.3.6":
self.requires("boost/1.79.0")
self.requires("libxml2/2.9.14")
else:
self.requires("boost/1.82.0")
self.requires("libxml2/2.11.4")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._minimum_cpp_standard)
# check minimum version of compiler
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warning(
f"{self.name} recipe lacks information about the {self.settings.compiler} compiler support."
)
else:
if Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration(
f"{self.name} requires {self.settings.compiler} {self.settings.compiler.version} "
f"but found {min_version}"
)
# Disallow MT and MTd
if is_msvc_static_runtime(self):
raise ConanInvalidConfiguration(f"{self.name} can not be built with MT or MTd at this time")
if self.options.shared and self.settings.os == "Windows":
raise ConanInvalidConfiguration(f"{self.name} does not currently support shared library on Windows")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
if not self.settings.get_safe("compiler.cppstd"):
tc.variables["CMAKE_CXX_STANDARD"] = 17
tc.cache_variables["CMAKE_PROJECT_opentdf_INCLUDE"] = os.path.join(self.source_folder, "conan_cmake_project_include.cmake")
tc.generate()
tc = CMakeDeps(self)
tc.generate()
def _patch_sources(self):
apply_conandata_patches(self)
def METHOD_NAME(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.METHOD_NAME()
def package(self):
cmake = CMake(self)
cmake.install()
copy(self, "*",
dst=os.path.join(self.package_folder, "lib"),
src=os.path.join(os.path.join(self.source_folder, "tdf-lib-cpp"), "lib"),
keep_path=False)
copy(self, "*",
dst=os.path.join(self.package_folder, "include"),
src=os.path.join(os.path.join(self.source_folder, "tdf-lib-cpp"), "include"),
keep_path=False)
copy(self, "LICENSE",
dst=os.path.join(self.package_folder, "licenses"),
src=self.source_folder,
ignore_case=True,
keep_path=False)
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "opentdf-client")
self.cpp_info.set_property("cmake_target_name", "opentdf-client::opentdf-client")
self.cpp_info.set_property("pkg_config_name", "opentdf-client")
self.cpp_info.components["libopentdf"].libs = ["opentdf_static"] if not self.options.shared else ["opentdf"]
self.cpp_info.components["libopentdf"].set_property("cmake_target_name", "copentdf-client::opentdf-client")
self.cpp_info.components["libopentdf"].names["cmake_find_package"] = "opentdf-client"
self.cpp_info.components["libopentdf"].names["cmake_find_package_multi"] = "opentdf-client"
self.cpp_info.components["libopentdf"].requires = [
"openssl::openssl",
"boost::boost",
"ms-gsl::ms-gsl",
"libxml2::libxml2",
"jwt-cpp::jwt-cpp",
"nlohmann_json::nlohmann_json",
"zlib::zlib"
]
if Version(self.version) >= "1.4.0":
self.cpp_info.components["libopentdf"].requires.append("magic_enum::magic_enum")
if Version(self.version) < "1.1.0":
self.cpp_info.components["libopentdf"].requires.append("libarchive::libarchive")
if Version(self.version) >= "1.4.0":
self.cpp_info.components["libopentdf"].requires.append("magic_enum::magic_enum") |
1,095 | command signature | import datetime
import discord
import io
import random
import re
import traceback
from .checks import check_staff
from discord.ext import commands
from discord.utils import format_dt
from typing import Optional
class ConsoleColor(discord.Color):
@classmethod
def n3ds(cls):
return cls(0xCE181E)
@classmethod
def wiiu(cls):
return cls(0x009AC7)
@classmethod
def switch(cls):
return cls(0xE60012)
@classmethod
def wii(cls):
return cls(0x009AC7)
@classmethod
def legacy(cls):
return cls(0x707070)
async def send_dm_message(member: discord.Member, message: str, ctx: Optional[commands.Context] = None, **kwargs) -> bool:
"""A helper method for sending a message to a member's DMs.
Returns a boolean indicating success of the DM
and notifies of the failure if ctx is supplied."""
try:
await member.send(message, **kwargs)
return True
except (discord.HTTPException, discord.Forbidden, discord.NotFound, AttributeError):
if ctx:
await ctx.send(f"Failed to send DM message to {member.mention}")
return False
async def get_user(ctx: commands.Context | discord.Interaction, user_id: int) -> Optional[discord.Member | discord.User]:
if ctx.guild and (user := ctx.guild.get_member(user_id)):
return user
else:
bot = ctx.bot if isinstance(ctx, commands.Context) else ctx.client
return await bot.fetch_user(user_id)
def METHOD_NAME(command, *, prefix=".") -> str:
"""Helper method for a command signature
Parameters
-----------
command: :class:`discord.ext.commands.Command`
The command to generate a signature for
prefix: str
The prefix to include in the signature"""
return f"{discord.utils.escape_markdown(prefix)}{command.qualified_name} {command.signature}"
def gen_color(seed) -> discord.Color:
random.seed(seed)
c_r = random.randint(0, 255)
c_g = random.randint(0, 255)
c_b = random.randint(0, 255)
return discord.Color((c_r << 16) + (c_g << 8) + c_b)
def parse_time(time_string: str) -> int:
"""Parses a time string in dhms format to seconds"""
# thanks, Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
match = re.findall("([0-9]+[smhd])", time_string) # Thanks to 3dshax server's former bot
if not match:
return -1
return sum(int(item[:-1]) * units[item[-1]] for item in match)
def parse_date(date_string: str) -> Optional[datetime.datetime]:
date_lst = date_string.split(' ')
if len(date_lst) == 1:
date_lst.append('00:00')
elif len(date_lst) != 2:
return None
try:
datetime_obj = datetime.datetime.strptime(' '.join(date_lst), "%Y-%m-%d %H:%M")
except ValueError:
return None
return datetime_obj
def create_error_embed(ctx: commands.Context | discord.Interaction, exc: Exception) -> discord.Embed:
app_command: bool = isinstance(ctx, discord.Interaction)
author = ctx.user if app_command else ctx.author
command: str = ctx.command.name if ctx.command else "unknown command"
embed = discord.Embed(title=f"Unexpected exception in command {command}", color=0xe50730)
trace = "".join(traceback.format_exception(exc))
if len(trace) > 4080:
trace = trace[-4080:]
embed.description = f'```py\n{trace}```'
embed.add_field(name="Exception Type", value=exc.__class__.__name__)
embed.add_field(name="Information", value=f"channel: {ctx.channel.mention if isinstance(ctx.channel, discord.TextChannel) else 'Direct Message'}\ncommand: {command}\nauthor: {author.mention}\n{f'message: {ctx.message.content}' if not app_command else ''}", inline=False)
return embed
def paginate_message(msg: str, prefix: str = '```', suffix: str = '```', max_size: int = 2000):
paginator = commands.Paginator(prefix=prefix, suffix=suffix, max_size=max_size)
sep = max_size - len(prefix) - len(suffix) - 2
for chunk in [msg[i:i + sep] for i in range(0, len(msg), sep)]:
paginator.add_line(chunk)
return paginator
def text_to_discord_file(text: str, *, name: str = 'output.txt'):
encoded = text.encode("utf-8")
return discord.File(filename=name, fp=io.BytesIO(encoded))
# https://stackoverflow.com/questions/9647202/ordinal-numbers-replacement
# but modified to be an f-string
def ordinal(n: int) -> str:
# noinspection SpellCheckingInspection
return f'{n}{"tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4]}'
class KurisuCooldown:
def __init__(self, rate: float, per: float):
self.rate = rate
self.per = per
def __call__(self, ctx: commands.Context) -> Optional[commands.Cooldown]:
if check_staff(ctx.bot, 'Helper', ctx.author.id):
return None
else:
return commands.Cooldown(self.rate, self.per)
async def create_userinfo_embed(user: discord.Member | discord.User, guild: discord.Guild) -> discord.Embed:
embed = discord.Embed(color=gen_color(user.id))
embed.description = (
f"**User:** {user.mention}\n"
f"**User's ID:** {user.id}\n"
f"**Created on:** {format_dt(user.created_at)} ({format_dt(user.created_at, style='R')})\n"
f"**Default Profile Picture:** {user.default_avatar}\n"
)
if isinstance(user, discord.Member):
member_type = "member"
embed.description += (
f"**Join date:** {format_dt(user.joined_at) if user.joined_at else None} ({format_dt(user.joined_at, style='R') if user.joined_at else None})\n"
f"**Current Status:** {user.status}\n"
f"**User Activity:** {user.activity}\n"
f"**Current Display Name:** {user.display_name}\n"
f"**Nitro Boost Info:** {f'Boosting since {format_dt(user.premium_since)}' if user.premium_since else 'Not a booster'}\n"
f"**Current Top Role:** {user.top_role}\n"
f"**Color:** {user.color}\n"
f"**Profile Picture:** [link]({user.avatar})"
)
if user.guild_avatar:
embed.description += f"\n**Guild Profile Picture:** [link]({user.guild_avatar})"
else:
member_type = "user"
try:
ban = await guild.fetch_ban(user)
embed.description += f"\n**Banned**, reason: {ban.reason}"
except discord.NotFound:
pass
member_type = member_type if not user.bot else "bot"
embed.title = f"**Userinfo for {member_type} {user}**"
embed.set_thumbnail(url=user.display_avatar.url)
return embed |
1,096 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetStaticSiteLinkedBackendResult',
'AwaitableGetStaticSiteLinkedBackendResult',
'get_static_site_linked_backend',
'get_static_site_linked_backend_output',
]
@pulumi.output_type
class GetStaticSiteLinkedBackendResult:
"""
Static Site Linked Backend ARM resource.
"""
def __init__(__self__, backend_resource_id=None, created_on=None, id=None, kind=None, METHOD_NAME=None, provisioning_state=None, region=None, type=None):
if backend_resource_id and not isinstance(backend_resource_id, str):
raise TypeError("Expected argument 'backend_resource_id' to be a str")
pulumi.set(__self__, "backend_resource_id", backend_resource_id)
if created_on and not isinstance(created_on, str):
raise TypeError("Expected argument 'created_on' to be a str")
pulumi.set(__self__, "created_on", created_on)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(METHOD_NAME="backendResourceId")
def backend_resource_id(self) -> Optional[str]:
"""
The resource id of the backend linked to the static site
"""
return pulumi.get(self, "backend_resource_id")
@property
@pulumi.getter(METHOD_NAME="createdOn")
def created_on(self) -> str:
"""
The date and time on which the backend was linked to the static site.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(METHOD_NAME="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the linking process.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def region(self) -> Optional[str]:
"""
The region of the backend linked to the static site
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetStaticSiteLinkedBackendResult(GetStaticSiteLinkedBackendResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStaticSiteLinkedBackendResult(
backend_resource_id=self.backend_resource_id,
created_on=self.created_on,
id=self.id,
kind=self.kind,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
region=self.region,
type=self.type)
def get_static_site_linked_backend(linked_backend_name: Optional[str] = None,
METHOD_NAME: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStaticSiteLinkedBackendResult:
"""
Static Site Linked Backend ARM resource.
:param str linked_backend_name: Name of the linked backend that should be retrieved
:param str name: Name of the static site
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['linkedBackendName'] = linked_backend_name
__args__['name'] = METHOD_NAME
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20220901:getStaticSiteLinkedBackend', __args__, opts=opts, typ=GetStaticSiteLinkedBackendResult).value
return AwaitableGetStaticSiteLinkedBackendResult(
backend_resource_id=pulumi.get(__ret__, 'backend_resource_id'),
created_on=pulumi.get(__ret__, 'created_on'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
region=pulumi.get(__ret__, 'region'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_static_site_linked_backend)
def get_static_site_linked_backend_output(linked_backend_name: Optional[pulumi.Input[str]] = None,
METHOD_NAME: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetStaticSiteLinkedBackendResult]:
"""
Static Site Linked Backend ARM resource.
:param str linked_backend_name: Name of the linked backend that should be retrieved
:param str name: Name of the static site
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
... |
1,097 | option | #!/usr/bin/env pmpython
#
# Copyright (C) 2014-2017 Red Hat.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# pylint: disable=bad-continuation,invalid-name,global-statement
# pylint: disable=line-too-long,too-many-locals
""" Display device mapper cache statistics for the system """
import sys
from pcp import pmapi, pmcc
if sys.version >= '3':
long = int # python2 to python3 portability (no long() in python3)
CACHE_METRICS = ['dmcache.cache.used', 'dmcache.cache.total',
'dmcache.metadata.used', 'dmcache.metadata.total',
'dmcache.read_hits', 'dmcache.read_misses',
'dmcache.write_hits', 'dmcache.write_misses',
'disk.dm.read', 'disk.dm.write']
COLUMN_HEADING = \
' ---%used--- ---------reads--------- --------writes---------'
SUBHEAD_IOPS = \
' meta cache hit miss ops hit miss ops'
SUBHEAD_RATIO = \
' meta cache hit miss ratio hit miss ratio'
RATIO = True # default to displaying cache hit ratios
REPEAT = 10 # repeat heading after every N samples
def METHOD_NAME(opt, optarg, _):
""" Perform setup for an individual command line option """
global RATIO
global REPEAT
if opt == 'R':
REPEAT = int(optarg)
elif opt == 'i':
RATIO = False
def cache_value(group, device, width, values):
""" Lookup value for device instance, return it in a short string """
if device not in values:
return '?'.rjust(width)
result = group.contextCache.pmNumberStr(values[device])
return result.strip(' ').rjust(width)
def cache_percent(device, width, used, total):
""" From used and total values (dict), calculate 'percentage used' """
if device not in used or device not in total:
return '?%'.rjust(width)
numerator = float(used[device])
denominator = float(total[device])
if denominator == 0.0:
return '0%'.rjust(width)
value = 100.0 * numerator / denominator
if value >= 100.0:
return '100%'.rjust(width)
return ('%3.1f%%' % value).rjust(width)
def cache_dict(group, metric):
""" Create an instance:value dictionary for the given metric """
values = group[metric].netConvValues
if not values:
return {}
return dict(map(lambda x: (x[1], x[2]), values))
def max_lv_length(group):
""" look at the observation group and return the max length of all the lvnames """
cache_used = cache_dict(group, 'dmcache.cache.used')
if not cache_used:
return 0
lv_names = cache_used.keys()
return len(max(lv_names, key=len))
class DmCachePrinter(pmcc.MetricGroupPrinter):
""" Report device mapper cache statistics """
def __init__(self, devices):
""" Construct object - prepare for command line handling """
pmcc.MetricGroupPrinter.__init__(self)
self.hostname = None
self.devices = devices
def report_values(self, group, width=12):
""" Report values for one of more device mapper cache devices """
# Build several dictionaries, keyed on cache names, with the values
cache_used = cache_dict(group, 'dmcache.cache.used')
cache_total = cache_dict(group, 'dmcache.cache.total')
meta_used = cache_dict(group, 'dmcache.metadata.used')
meta_total = cache_dict(group, 'dmcache.metadata.total')
read_hits = cache_dict(group, 'dmcache.read_hits')
read_misses = cache_dict(group, 'dmcache.read_misses')
read_ops = cache_dict(group, 'disk.dm.read')
write_hits = cache_dict(group, 'dmcache.write_hits')
write_misses = cache_dict(group, 'dmcache.write_misses')
write_ops = cache_dict(group, 'disk.dm.write')
devicelist = self.devices
if not devicelist:
devicelist = cache_used.keys()
if devicelist:
for name in sorted(devicelist):
if RATIO:
read_column = cache_percent(name, 7, read_hits, read_ops)
write_column = cache_percent(name, 7, write_hits, write_ops)
else:
read_column = cache_value(group, name, 7, read_ops)
write_column = cache_value(group, name, 7, write_ops)
print('%s %s %s %s %s %s %s %s %s' % (name[:width],
cache_percent(name, 5, meta_used, meta_total),
cache_percent(name, 5, cache_used, cache_total),
cache_value(group, name, 7, read_hits),
cache_value(group, name, 7, read_misses),
read_column,
cache_value(group, name, 7, write_hits),
cache_value(group, name, 7, write_misses),
write_column))
else:
print('No values available')
def report(self, manager):
""" Report driver routine - headings, sub-headings and values """
self.convert(manager)
group = manager['dmcache']
max_lv = max_lv_length(group)
padding = " "*max_lv
if manager.counter % REPEAT == 0:
if not self.hostname:
self.hostname = group.contextCache.pmGetContextHostName()
stamp = group.contextCache.pmCtime(long(group.timestamp))
title = '@ %s (host %s)' % (stamp.rstrip(), self.hostname)
if RATIO:
style = "%s%s" % (padding, SUBHEAD_RATIO)
else:
style = "%s%s" % (padding, SUBHEAD_IOPS)
heading = ' device '.center(max_lv, '-') + COLUMN_HEADING
print('%s\n%s\n%s' % (title, heading, style))
self.report_values(group, width=max_lv)
if __name__ == '__main__':
try:
options = pmapi.pmOptions('iR:?')
options.pmSetShortUsage('[options] [device ...]')
options.pmSetOptionCallback(METHOD_NAME)
options.pmSetLongOptionHeader('Options')
options.pmSetLongOption('repeat', 1, 'R', 'N', 'repeat the header after every N samples')
options.pmSetLongOption('iops', 0, 'i', '', 'display IOPs instead of cache hit ratio')
options.pmSetLongOptionVersion()
options.pmSetLongOptionHelp()
dmcache = pmcc.MetricGroupManager.builder(options, sys.argv)
missing = dmcache.checkMissingMetrics(CACHE_METRICS)
if missing is not None:
sys.stderr.write('Error: not all required metrics are available\nMissing: %s\n' % (missing))
sys.exit(1)
dmcache.printer = DmCachePrinter(options.pmGetOperands())
dmcache['dmcache'] = CACHE_METRICS
dmcache.run()
except pmapi.pmErr as error:
print('%s: %s\n' % (error.progname(), error.message()))
except pmapi.pmUsageErr as usage:
usage.message()
except KeyboardInterrupt:
pass |
1,098 | preprocess | """
Handler for Torchrec DLRM based recommendation system
"""
import json
import logging
import os
from abc import ABC
import torch
from torchrec.datasets.criteo import DEFAULT_CAT_NAMES
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from ts.torch_handler.base_handler import BaseHandler
logger = logging.getLogger(__name__)
class TorchRecDLRMHandler(BaseHandler, ABC):
"""
Handler for TorchRec DLRM example
"""
def initialize(self, context):
"""Initialize function loads the model.pt file and initialized the model object.
This version creates and initialized the model on cpu fist and transfers to gpu in a second step to prevent GPU OOM.
Args:
context (context): It is a JSON Object containing information
pertaining to the model artifacts parameters.
Raises:
RuntimeError: Raises the Runtime error when the model.py is missing
"""
properties = context.system_properties
# Set device to cpu to prevent GPU OOM errors
self.device = "cpu"
self.manifest = context.manifest
model_dir = properties.get("model_dir")
model_pt_path = None
if "serializedFile" in self.manifest["model"]:
serialized_file = self.manifest["model"]["serializedFile"]
model_pt_path = os.path.join(model_dir, serialized_file)
# model def file
model_file = self.manifest["model"].get("modelFile", "")
if not model_file:
raise RuntimeError("model.py not specified")
logger.debug("Loading eager model")
self.model = self._load_pickled_model(model_dir, model_file, model_pt_path)
self.map_location = (
"cuda"
if torch.cuda.is_available() and properties.get("gpu_id") is not None
else "cpu"
)
self.device = torch.device(
self.map_location + ":" + str(properties.get("gpu_id"))
if torch.cuda.is_available() and properties.get("gpu_id") is not None
else self.map_location
)
self.model.to(self.device)
self.model.eval()
logger.debug("Model file %s loaded successfully", model_pt_path)
self.initialized = True
def METHOD_NAME(self, data):
"""
The input values for the DLRM model are twofold. There is a dense part and a sparse part.
The sparse part consists of a list of ids where each entry can consist of zero, one or multiple ids.
Due to the inconsistency in elements, the sparse part is represented by the KeyJaggedTensor class provided by TorchRec.
Args:
data (str): The input data is in the form of a string
Returns:
Tuple of:
(Tensor): Dense features
(KeyJaggedTensor): Sparse features
"""
float_features, id_list_features_lengths, id_list_features_values = [], [], []
for row in data:
input = row.get("data") or row.get("body")
if not isinstance(input, dict):
input = json.loads(input)
# This is the dense feature part
assert "float_features" in input
# The sparse input consists of a length vector and the values.
# The length vector contains the number of elements which are part fo the same entry in the linear list provided as input.
assert "id_list_features.lengths" in input
assert "id_list_features.values" in input
float_features.append(input["float_features"])
id_list_features_lengths.extend(input["id_list_features.lengths"])
id_list_features_values.append(input["id_list_features.values"])
# Reformat the values input for KeyedJaggedTensor
id_list_features_values = torch.FloatTensor(id_list_features_values)
id_list_features_values = torch.transpose(id_list_features_values, 0, 1)
id_list_features_values = [value for value in id_list_features_values]
# Dense and Sparse Features for DLRM model
dense_features = torch.FloatTensor(float_features)
sparse_features = KeyedJaggedTensor(
keys=DEFAULT_CAT_NAMES,
lengths=torch.LongTensor(id_list_features_lengths),
values=torch.cat(id_list_features_values),
)
return dense_features, sparse_features
def inference(self, data):
"""
The inference call moves the elements of the tuple onto the device and calls the model
Args:
data (torch tensor): The data is in the form of Torch Tensor
whose shape should match that of the
Model Input shape.
Returns:
(Torch Tensor): The predicted response from the model is returned
in this function.
"""
with torch.no_grad():
data = map(lambda x: x.to(self.device), data)
results = self.model(*data)
return results
def postprocess(self, data):
"""
The post process function converts the prediction response into a
Torchserve compatible format
Args:
data (Torch Tensor): The data parameter comes from the prediction output
output_explain (None): Defaults to None.
Returns:
(list): Returns the response containing the predictions which consist of a single score per input entry.
"""
result = []
for item in data:
res = {}
res["score"] = item.squeeze().float().tolist()
result.append(res)
return result |
1,099 | get weight index | from .GenericSource import *
from .GANSourceDefaultGenerator import GANSourceDefaultGenerator
import time
import scipy
class GANSourceDefaultPairsGenerator(GANSourceDefaultGenerator):
"""
Like GANSourceDefaultGenerator but for pairs of particle (PET)
"""
def __init__(self, user_info):
super().__init__(user_info)
self.is_paired = True
def __getstate__(self):
self.lock = None
self.gaga = None
self.gan_info = None
return self.__dict__
def check_parameters(self, g):
# position
if g.position_is_set_by_GAN and len(self.user_info.position_keys) != 6:
dim = len(self.user_info.position_keys)
self.fatal(f"you must provide 6 values for position, while it was {dim}")
# direction
if g.direction_is_set_by_GAN and len(self.user_info.direction_keys) != 6:
dim = len(self.user_info.direction_keys)
self.fatal(f"you must provide 6 values for direction, while it was {dim}")
def get_energy_index(self, g, the_keys, n):
# get energy index from GAN
if not g.energy_is_set_by_GAN:
return
ek = self.user_info.energy_key
dim = len(ek)
if dim != 2:
self.fatal(f"you must provide 2 values for energy, while it was {dim}")
g.energy_gan_index = [the_keys.index(ek[0]), the_keys.index(ek[1])]
def get_time_index(self, g, the_keys, n):
# get time index from GAN
if not g.time_is_set_by_GAN:
return
ek = self.user_info.time_key
dim = len(ek)
if dim != 2:
self.fatal(f"you must provide 2 values for time, while it was {dim}")
g.time_gan_index = [the_keys.index(ek[0]), the_keys.index(ek[1])]
def METHOD_NAME(self, g, the_keys, n):
# get weight index from GAN
if not g.weight_is_set_by_GAN:
return
ek = self.user_info.weight_key
dim = len(ek)
if dim != 2:
self.fatal(f"you must provide 2 values for weight, while it was {dim}")
g.weight_gan_index = [the_keys.index(ek[0]), the_keys.index(ek[1])]
def generator(self, source):
"""
Main function that will be called from the cpp side every time a batch
of particles should be created.
Once created here, the particles are copied to cpp.
(Yes maybe the copy could be avoided, but I did not manage to do it)
"""
# get the info
g = self.gan_info
n = self.user_info.batch_size
start = None
# verbose and timing ?
if self.user_info.verbose_generator:
start = time.time()
print(f"Generate {n} particles from GAN ", end="")
# generate samples (this is the most time-consuming part)
fake = self.gaga.generate_samples2(
g.params,
g.G,
g.D,
n=n,
batch_size=n,
normalize=False,
to_numpy=True,
silence=True,
)
# consider the names of the output keys position/direction/energy/time/weight
self.get_output_keys()
# move particle backward ?
self.move_backward(g, fake)
# copy to cpp
self.copy_generated_particle_to_g4(source, g, fake)
# verbose
if self.user_info.verbose_generator:
end = time.time()
print(f"in {end - start:0.1f} sec (GPU={g.params.current_gpu})")
def copy_generated_particle_to_g4(self, source, g, fake):
# position
if g.position_is_set_by_GAN:
pos = []
dim = len(g.position_gan_index)
for i in range(dim):
if g.position_use_index[i]:
pos.append(fake[:, g.position_gan_index[i]])
else:
pos.append(g.position_gan_index[i])
# copy to c++
source.fPositionX = pos[0]
source.fPositionY = pos[1]
source.fPositionZ = pos[2]
source.fPositionX2 = pos[3]
source.fPositionY2 = pos[4]
source.fPositionZ2 = pos[5]
# direction
if g.direction_is_set_by_GAN:
dir = []
dim = len(g.direction_gan_index)
for i in range(dim):
if g.direction_use_index[i]:
dir.append(fake[:, g.direction_gan_index[i]])
else:
dir.append(g.direction_gan_index[i])
# copy to c++
source.fDirectionX = dir[0]
source.fDirectionY = dir[1]
source.fDirectionZ = dir[2]
source.fDirectionX2 = dir[3]
source.fDirectionY2 = dir[4]
source.fDirectionZ2 = dir[5]
# energy
if g.energy_is_set_by_GAN:
# copy to c++
source.fEnergy = fake[:, g.energy_gan_index[0]]
source.fEnergy2 = fake[:, g.energy_gan_index[1]]
# time
if g.time_is_set_by_GAN:
# copy to c++
source.fTime = fake[:, g.time_gan_index[0]]
source.fTime2 = fake[:, g.time_gan_index[1]]
# weight
if g.weight_is_set_by_GAN:
# copy to c++
source.fWeight = fake[:, g.weight_gan_index[0]]
source.fWeight2 = fake[:, g.weight_gan_index[1]]
def move_backward(self, g, fake):
# move particle backward ?
back = self.user_info.backward_distance
if not back:
return
if not g.time_is_set_by_GAN and not self.user_info.backward_force:
gate.fatal(
f"If backward is enabled the time is not managed by GAN,"
f" time is wrong. IT can be forced, however, with the option 'backward_force'"
)
# move particle position
position = fake[:, g.position_gan_index[0] : g.position_gan_index[0] + 3]
direction = fake[:, g.direction_gan_index[0] : g.direction_gan_index[0] + 3]
fake[:, g.position_gan_index[0] : g.position_gan_index[0] + 3] = (
position - back * direction
)
# move second particle position
position = fake[:, g.position_gan_index[3] : g.position_gan_index[3] + 3]
direction = fake[:, g.direction_gan_index[3] : g.direction_gan_index[3] + 3]
fake[:, g.position_gan_index[3] : g.position_gan_index[3] + 3] = (
position - back * direction
)
# modify the time because we move the particle backward
if g.time_is_set_by_GAN:
c = scipy.constants.speed_of_light * 1000 / 1e9 # in mm/ns
xt = back / c
fake[:, g.time_gan_index[0]] -= xt
fake[:, g.time_gan_index[1]] -= xt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.