hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
486c690d2ec75c48619b4b301b76eacb54bb9a8b | 276 | py | Python | script.service.hue/service.py | toupeira/script.service.hue | c2811ef792b67084ab2e82c91aaa0947d9cb90d3 | [
"MIT"
] | null | null | null | script.service.hue/service.py | toupeira/script.service.hue | c2811ef792b67084ab2e82c91aaa0947d9cb90d3 | [
"MIT"
] | null | null | null | script.service.hue/service.py | toupeira/script.service.hue | c2811ef792b67084ab2e82c91aaa0947d9cb90d3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import xbmc
from resources.lib import core
from resources.lib import reporting
try:
core.core() #Run Hue service
except Exception as exc:
xbmc.log("[script.service.hue] Core service exception")
reporting.process_exception(exc)
| 23 | 60 | 0.699275 |
a9102e7065398b7cbae1536ef952c0462a33a51c | 13,443 | py | Python | pex/resolve/lockfile/json_codec.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
] | null | null | null | pex/resolve/lockfile/json_codec.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
] | null | null | null | pex/resolve/lockfile/json_codec.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import json
from pex import compatibility
from pex.dist_metadata import Requirement, RequirementParseError
from pex.enum import Enum
from pex.pep_440 import Version
from pex.pep_503 import ProjectName
from pex.resolve.locked_resolve import Artifact, LockedRequirement, LockedResolve, LockStyle
from pex.resolve.lockfile.model import Lockfile
from pex.resolve.path_mappings import PathMappings
from pex.resolve.resolved_requirement import Fingerprint, Pin
from pex.resolve.resolver_configuration import ResolverVersion
from pex.sorted_tuple import SortedTuple
from pex.third_party.packaging import tags
from pex.third_party.packaging.specifiers import InvalidSpecifier, SpecifierSet
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from typing import (
Any,
Container,
Dict,
List,
Mapping,
Optional,
Text,
Tuple,
Type,
TypeVar,
Union,
)
import attr # vendor:skip
_V = TypeVar("_V", bound=Enum.Value)
else:
from pex.third_party import attr
class ParseError(Exception):
"""Indicates an error parsing a Pex lock file."""
@attr.s(frozen=True)
class PathMappingError(ParseError):
"""Indicates missing path mappings when parsing a Pex lock file."""
required_path_mappings = attr.ib() # type: Mapping[str, Optional[str]]
unspecified_paths = attr.ib() # type: Container[str]
def _load_json(
lockfile_contents, # type: Text
source, # type: str
):
# type: (...) -> Mapping
try:
return cast("Mapping", json.loads(lockfile_contents))
except ValueError as e:
raise ParseError(
"The lock file at {source} does not contain valid JSON: "
"{err}".format(source=source, err=e)
)
def loads(
lockfile_contents, # type: Text
source="<string>", # type: str
path_mappings=PathMappings(), # type: PathMappings
):
# type: (...) -> Lockfile
def get(
key, # type: str
expected_type=compatibility.string, # type: Union[Type, Tuple[Type, ...]]
data=_load_json(lockfile_contents, source=source), # type: Mapping
path=".", # type: str
optional=False,
):
# type: (...) -> Any
if not isinstance(data, dict):
raise ParseError(
"Cannot retrieve '{path}[\"{key}\"]' in {source} because '{path}' is not a "
"JSON object but a {type} with value {value}.".format(
path=path,
key=key,
source=source,
type=type(data).__name__,
value=data,
)
)
if key not in data:
if optional:
return None
raise ParseError(
"The object at '{path}' in {source} did not have the expected key "
"{key!r}.".format(path=path, source=source, key=key)
)
value = data[key]
if not optional and not isinstance(value, expected_type):
raise ParseError(
"Expected '{path}[\"{key}\"]' in {source} to be of type {expected_type} "
"but given {type} with value {value}.".format(
path=path,
key=key,
source=source,
expected_type=(
" or ".join(t.__name__ for t in expected_type)
if isinstance(expected_type, tuple)
else expected_type.__name__
),
type=type(value).__name__,
value=value,
)
)
return value
def get_enum_value(
enum_type, # type: Type[Enum[_V]]
key, # type: str
path=".", # type: str
):
# type: (...) -> _V
try:
return enum_type.for_value(get(key, path=path))
except ValueError as e:
raise ParseError(
"The '{path}[\"{key}\"]' is invalid: {err}".format(key=key, path=path, err=e)
)
def parse_requirement(
raw_requirement, # type: str
path, # type: str
):
# type: (...) -> Requirement
try:
return Requirement.parse(path_mappings.maybe_reify(raw_requirement))
except RequirementParseError as e:
raise ParseError(
"The requirement string at '{path}' is invalid: {err}".format(path=path, err=e)
)
def parse_version_specifier(
raw_version_specifier, # type: str
path, # type: str
):
# type: (...) -> SpecifierSet
try:
return SpecifierSet(raw_version_specifier)
except InvalidSpecifier as e:
raise ParseError(
"The version specifier at '{path}' is invalid: {err}".format(path=path, err=e)
)
required_path_mappings = get("path_mappings", dict, optional=True) or {}
given_mappings = set(mapping.name for mapping in path_mappings.mappings)
unspecified_paths = set(required_path_mappings) - given_mappings
if unspecified_paths:
raise PathMappingError(
required_path_mappings=required_path_mappings, unspecified_paths=unspecified_paths
)
requirements = [
parse_requirement(req, path=".requirements[{index}]".format(index=index))
for index, req in enumerate(get("requirements", list))
]
constraints = [
parse_requirement(constraint, path=".constraints[{index}]".format(index=index))
for index, constraint in enumerate(get("constraints", list))
]
def assemble_tag(
components, # type: List[str]
path, # type: str
):
# type: (...) -> tags.Tag
if len(components) != 3 or not all(isinstance(c, compatibility.string) for c in components):
raise ParseError(
"The tag at '{path}' must have 3 string components. Given {count} with types "
"[{types}]: {components}".format(
path=path,
count=len(components),
types=", ".join(type(c).__name__ for c in components),
components=components,
)
)
return tags.Tag(interpreter=components[0], abi=components[1], platform=components[2])
locked_resolves = []
for lock_index, locked_resolve in enumerate(get("locked_resolves", list)):
lock_path = ".locked_resolves[{lock_index}]".format(lock_index=lock_index)
platform_tag_components = get(
"platform_tag", list, data=locked_resolve, path=lock_path, optional=True
)
platform_tag = (
assemble_tag(
components=platform_tag_components,
path='{lock_path}["platform_tag"]'.format(lock_path=lock_path),
)
if platform_tag_components
else None
)
locked_reqs = []
for req_index, req in enumerate(
get("locked_requirements", list, data=locked_resolve, path=lock_path)
):
req_path = "{lock_path}[{req_index}]".format(lock_path=lock_path, req_index=req_index)
artifacts = []
for i, artifact in enumerate(get("artifacts", list, data=req, path=req_path)):
ap = '{path}["artifacts"][{index}]'.format(path=req_path, index=i)
artifacts.append(
Artifact.from_url(
url=path_mappings.maybe_reify(get("url", data=artifact, path=ap)),
fingerprint=Fingerprint(
algorithm=get("algorithm", data=artifact, path=ap),
hash=get("hash", data=artifact, path=ap),
),
)
)
if not artifacts:
raise ParseError(
"Expected '{path}' in {source} to have at least one artifact.".format(
path=req_path, source=source
)
)
requires_python = None
version_specifier = get("requires_python", data=req, path=req_path, optional=True)
if version_specifier:
requires_python = parse_version_specifier(
version_specifier, path='{path}["requires_python"]'.format(path=req_path)
)
locked_reqs.append(
LockedRequirement.create(
pin=Pin(
project_name=ProjectName(get("project_name", data=req, path=req_path)),
version=Version(get("version", data=req, path=req_path)),
),
requires_python=requires_python,
requires_dists=[
parse_requirement(
requires_dist,
path='{path}["requires_dists"][{index}]'.format(path=req_path, index=i),
)
for i, requires_dist in enumerate(
get("requires_dists", list, data=req, path=req_path)
)
],
artifact=artifacts[0],
additional_artifacts=artifacts[1:],
)
)
locked_resolves.append(
LockedResolve(locked_requirements=SortedTuple(locked_reqs), platform_tag=platform_tag)
)
return Lockfile.create(
pex_version=get("pex_version"),
style=get_enum_value(LockStyle, "style"),
requires_python=get("requires_python", list),
resolver_version=get_enum_value(ResolverVersion, "resolver_version"),
requirements=requirements,
constraints=constraints,
allow_prereleases=get("allow_prereleases", bool),
allow_wheels=get("allow_wheels", bool),
allow_builds=get("allow_builds", bool),
prefer_older_binary=get("prefer_older_binary", bool),
use_pep517=get("use_pep517", bool, optional=True),
build_isolation=get("build_isolation", bool),
transitive=get("transitive", bool),
locked_resolves=locked_resolves,
source=source,
)
def load(
lockfile_path, # type: str
path_mappings=PathMappings(), # type: PathMappings
):
# type: (...) -> Lockfile
try:
with open(lockfile_path) as fp:
return loads(fp.read(), source=lockfile_path, path_mappings=path_mappings)
except IOError as e:
raise ParseError(
"Failed to read lock file at {path}: {err}".format(path=lockfile_path, err=e)
)
def as_json_data(
lockfile, # type: Lockfile
path_mappings=PathMappings(), # type: PathMappings
):
# type: (...) -> Dict[str, Any]
return {
"pex_version": lockfile.pex_version,
"style": str(lockfile.style),
"requires_python": list(lockfile.requires_python),
"resolver_version": str(lockfile.resolver_version),
"requirements": [
path_mappings.maybe_canonicalize(str(req)) for req in lockfile.requirements
],
"constraints": [str(constraint) for constraint in lockfile.constraints],
"allow_prereleases": lockfile.allow_prereleases,
"allow_wheels": lockfile.allow_wheels,
"allow_builds": lockfile.allow_builds,
"prefer_older_binary": lockfile.prefer_older_binary,
"use_pep517": lockfile.use_pep517,
"build_isolation": lockfile.build_isolation,
"transitive": lockfile.transitive,
"locked_resolves": [
{
"platform_tag": [
locked_resolve.platform_tag.interpreter,
locked_resolve.platform_tag.abi,
locked_resolve.platform_tag.platform,
]
if locked_resolve.platform_tag
else None,
"locked_requirements": [
{
"project_name": str(req.pin.project_name),
"version": str(req.pin.version),
"requires_dists": [
path_mappings.maybe_canonicalize(str(dependency))
for dependency in req.requires_dists
],
"requires_python": str(req.requires_python)
if req.requires_python
else None,
"artifacts": [
{
"url": path_mappings.maybe_canonicalize(artifact.url),
"algorithm": artifact.fingerprint.algorithm,
"hash": artifact.fingerprint.hash,
}
for artifact in req.iter_artifacts()
],
}
for req in locked_resolve.locked_requirements
],
}
for locked_resolve in lockfile.locked_resolves
],
"path_mappings": {
path_mapping.name: path_mapping.description for path_mapping in path_mappings.mappings
},
}
| 37.341667 | 100 | 0.558506 |
f34a177b74cf8e82b816289e7f4e5ee3cf7ffeb3 | 89 | py | Python | backend/entries/apps.py | dwightgunning/daily-writing | 1e39ba5172d6f6a98a4af9553dc8b26c980e4fcc | [
"MIT"
] | 1 | 2019-05-01T12:09:47.000Z | 2019-05-01T12:09:47.000Z | backend/entries/apps.py | dwightgunning/daily-writing | 1e39ba5172d6f6a98a4af9553dc8b26c980e4fcc | [
"MIT"
] | 34 | 2020-02-12T00:21:07.000Z | 2022-03-02T13:43:11.000Z | backend/entries/apps.py | dwightgunning/daily-writing | 1e39ba5172d6f6a98a4af9553dc8b26c980e4fcc | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class EntriesConfig(AppConfig):
name = "entries"
| 14.833333 | 33 | 0.752809 |
f47342001f7c1b1e4f56939c078dcfc1089b8444 | 1,796 | py | Python | docker-app/qfieldcloud/core/migrations/0011_export.py | livelihoods-and-landscapes/qfieldcloud-tcs | 3075e19d89caa3090a0d2027a376336526572764 | [
"MIT"
] | 34 | 2021-06-08T12:06:24.000Z | 2022-03-07T11:45:10.000Z | docker-app/qfieldcloud/core/migrations/0011_export.py | livelihoods-and-landscapes/qfieldcloud-tcs | 3075e19d89caa3090a0d2027a376336526572764 | [
"MIT"
] | 139 | 2021-06-08T00:24:51.000Z | 2022-03-28T09:59:54.000Z | docker-app/qfieldcloud/core/migrations/0011_export.py | livelihoods-and-landscapes/qfieldcloud-tcs | 3075e19d89caa3090a0d2027a376336526572764 | [
"MIT"
] | 8 | 2021-06-11T04:18:36.000Z | 2022-02-15T20:52:58.000Z | # Generated by Django 2.2.17 on 2021-01-06 08:42
import uuid
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0010_auto_20210106_1543"),
]
operations = [
migrations.CreateModel(
name="Export",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
(
"status",
models.PositiveSmallIntegerField(
choices=[
(1, "STATUS_PENDING"),
(2, "STATUS_BUSY"),
(3, "STATUS_EXPORTED"),
(4, "STATUS_ERROR"),
],
default=1,
),
),
("output", models.TextField(null=True)),
(
"exportlog",
django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="exports",
to="core.Project",
),
),
],
),
]
| 30.440678 | 78 | 0.396993 |
b0f7dd3367d180a92052327ae819eb78bd554e38 | 2,443 | py | Python | openliveq/nlp/parser.py | mpkato/openliveq | 3b87778e57c1b4176cecce7a0282b80831ed541b | [
"MIT"
] | 5 | 2016-11-12T16:12:04.000Z | 2017-12-12T02:37:37.000Z | openliveq/nlp/parser.py | mpkato/openliveq | 3b87778e57c1b4176cecce7a0282b80831ed541b | [
"MIT"
] | 1 | 2018-08-01T03:57:48.000Z | 2018-08-01T03:57:48.000Z | openliveq/nlp/parser.py | mpkato/openliveq | 3b87778e57c1b4176cecce7a0282b80831ed541b | [
"MIT"
] | 4 | 2017-06-29T16:34:27.000Z | 2019-10-22T09:19:31.000Z | # -*- coding: utf-8 -*-
import MeCab
class Parser(object):
CONTENT_POS_PREFIXES = ["形容詞", "名詞", "動詞", "副詞"]
def __init__(self):
self.tagger = MeCab.Tagger()
self.tagger.parse("")
def word_tokenize(self, sentence):
'''
Word tokenization
'''
tokens = [w[0] for w in self._mecab_tokenize(sentence)]
tokens = self.normalize(tokens)
return tokens
def noun_tokenize(self, sentence):
'''
Extract only nouns
'''
tagged_tokens = self.pos_tokenize(sentence)
nouns = self.noun_filter(tagged_tokens)
nouns = self.normalize(nouns)
return nouns
def content_word_tokenize(self, sentence):
'''
Extract only content words
'''
tagged_tokens = self.pos_tokenize(sentence)
content_words = self.content_word_filter(tagged_tokens)
content_words = self.lemmatize(content_words)
content_words = self.normalize(content_words)
return content_words
def pos_tokenize(self, sentence):
'''
Parse sentences and output pos-tagged tokens.
'''
return self._mecab_tokenize(sentence)
def noun_filter(self, tokens):
'''
Filter out non-noun tokens (pos starts with 名詞)
'''
return [token for token in tokens
if token[1].startswith('名詞')]
def content_word_filter(self, tokens):
'''
Include function words (pos starts with CONTENT_POS_PREFIXES)
'''
return [token for token in tokens
if any([token[1].startswith(pos)
for pos in self.CONTENT_POS_PREFIXES])]
def normalize(self, tokens):
'''
Convert tokens to lowercase
'''
return [token[0].lower() for token in tokens]
def lemmatize(self, tokens):
'''
Convert tokens to original forms
'''
result = []
for token in tokens:
w = token[0]
fs = token[1].split(",")
if fs[6] != '*':
w = fs[6]
result.append((w, token[1]))
return result
def _mecab_tokenize(self, sentence):
'''
Sentence tokenization
'''
node = self.tagger.parseToNode(sentence)
result = []
while node:
result.append((node.surface, node.feature))
node = node.next
return result
| 27.761364 | 69 | 0.561195 |
9935d5e8479001a869f0471829260ab3a115a305 | 3,707 | py | Python | ucsmsdk/mometa/initiator/InitiatorRequestorGrpEp.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 78 | 2015-11-30T14:10:05.000Z | 2022-02-13T00:29:08.000Z | ucsmsdk/mometa/initiator/InitiatorRequestorGrpEp.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 113 | 2015-11-20T09:42:46.000Z | 2022-03-16T16:53:29.000Z | ucsmsdk/mometa/initiator/InitiatorRequestorGrpEp.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 86 | 2015-12-12T08:22:18.000Z | 2022-01-23T03:56:34.000Z | """This module contains the general information for InitiatorRequestorGrpEp ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class InitiatorRequestorGrpEpConsts:
ALLOC_STATE_ALLOCATED = "allocated"
ALLOC_STATE_ALLOCATING = "allocating"
ALLOC_STATE_FAILED = "failed"
ALLOC_STATE_NONE = "none"
LC_ALLOCATED = "allocated"
LC_AVAILABLE = "available"
LC_DEALLOCATED = "deallocated"
LC_REPURPOSED = "repurposed"
TYPE_DEDICATED = "dedicated"
TYPE_POLICY = "policy"
TYPE_SHARED = "shared"
class InitiatorRequestorGrpEp(ManagedObject):
"""This is InitiatorRequestorGrpEp class."""
consts = InitiatorRequestorGrpEpConsts()
naming_props = set(['id'])
mo_meta = MoMeta("InitiatorRequestorGrpEp", "initiatorRequestorGrpEp", "req-grp-[id]", VersionMeta.Version211a, "InputOutput", 0x3f, [], ["read-only"], ['topSystem'], ['initiatorMemberEp', 'initiatorUnitEp'], [None])
prop_meta = {
"alloc_state": MoPropertyMeta("alloc_state", "allocState", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["allocated", "allocating", "failed", "none"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"ep_dn": MoPropertyMeta("ep_dn", "epDn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version211a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"lc": MoPropertyMeta("lc", "lc", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["allocated", "available", "deallocated", "repurposed"], []),
"pol_dn": MoPropertyMeta("pol_dn", "polDn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["dedicated", "policy", "shared"], []),
}
prop_map = {
"allocState": "alloc_state",
"childAction": "child_action",
"dn": "dn",
"epDn": "ep_dn",
"id": "id",
"lc": "lc",
"polDn": "pol_dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"type": "type",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.alloc_state = None
self.child_action = None
self.ep_dn = None
self.lc = None
self.pol_dn = None
self.sacl = None
self.status = None
self.type = None
ManagedObject.__init__(self, "InitiatorRequestorGrpEp", parent_mo_or_dn, **kwargs)
| 52.211268 | 247 | 0.649852 |
6d39eea1e99148c6440a4f98b5eae196f7236683 | 603 | py | Python | datetime/komand_datetime/actions/date_from_epoch/action.py | hashtagcyber/insightconnect-plugins | 8d54973cf51b1c27a26e4100b5a4dd223d1c714c | [
"MIT"
] | null | null | null | datetime/komand_datetime/actions/date_from_epoch/action.py | hashtagcyber/insightconnect-plugins | 8d54973cf51b1c27a26e4100b5a4dd223d1c714c | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | datetime/komand_datetime/actions/date_from_epoch/action.py | hashtagcyber/insightconnect-plugins | 8d54973cf51b1c27a26e4100b5a4dd223d1c714c | [
"MIT"
] | null | null | null | import insightconnect_plugin_runtime
from .schema import DateFromEpochInput, DateFromEpochOutput, Input, Output
import maya
class DateFromEpoch(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='date_from_epoch',
description='Convert an Epoch as a float to a Datetime',
input=DateFromEpochInput(),
output=DateFromEpochOutput())
def run(self, params={}):
new_datetime = maya.MayaDT(params.get(Input.EPOCH)).rfc3339()
return {Output.DATE: new_datetime}
| 33.5 | 74 | 0.679934 |
a064c99d2a7c2693ca9c1d41368ae5356355e74d | 1,991 | py | Python | sdk/python/pulumi_aws/ec2/placement_group.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/placement_group.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/placement_group.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class PlacementGroup(pulumi.CustomResource):
"""
Provides an EC2 placement group. Read more about placement groups
in [AWS Docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html).
"""
def __init__(__self__, __name__, __opts__=None, name=None, strategy=None):
"""Create a PlacementGroup resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
The name of the placement group.
"""
__props__['name'] = name
if not strategy:
raise TypeError('Missing required property strategy')
elif not isinstance(strategy, basestring):
raise TypeError('Expected property strategy to be a basestring')
__self__.strategy = strategy
"""
The placement strategy.
"""
__props__['strategy'] = strategy
super(PlacementGroup, __self__).__init__(
'aws:ec2/placementGroup:PlacementGroup',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'name' in outs:
self.name = outs['name']
if 'strategy' in outs:
self.strategy = outs['strategy']
| 37.566038 | 94 | 0.637368 |
fe9c8f5d2ed4e92d93f5f8587638460e00d10c5f | 422 | py | Python | src/python/turicreate/toolkits/image_analysis/__init__.py | cookingcodewithme/turicreate | a89e203d60529d2d72547c03ec9753ea979ee342 | [
"BSD-3-Clause"
] | 11,356 | 2017-12-08T19:42:32.000Z | 2022-03-31T16:55:25.000Z | src/python/turicreate/toolkits/image_analysis/__init__.py | cookingcodewithme/turicreate | a89e203d60529d2d72547c03ec9753ea979ee342 | [
"BSD-3-Clause"
] | 2,402 | 2017-12-08T22:31:01.000Z | 2022-03-28T19:25:52.000Z | src/python/turicreate/toolkits/image_analysis/__init__.py | cookingcodewithme/turicreate | a89e203d60529d2d72547c03ec9753ea979ee342 | [
"BSD-3-Clause"
] | 1,343 | 2017-12-08T19:47:19.000Z | 2022-03-26T11:31:36.000Z | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
# __all__ = ["image_analysis"]
from .image_analysis import * | 35.166667 | 85 | 0.760664 |
07203c84c5ed8916c66cbd8ee05e9e9e3f5e8cfd | 3,441 | py | Python | source/code/services/tagging_service.py | mobri2a/aws-ops-automator | a3564a8eb142b1ca0487f9e91e83d2c1284af8f2 | [
"Apache-2.0"
] | 94 | 2017-08-01T05:28:45.000Z | 2021-09-10T07:18:46.000Z | source/code/services/tagging_service.py | mobri2a/aws-ops-automator | a3564a8eb142b1ca0487f9e91e83d2c1284af8f2 | [
"Apache-2.0"
] | 27 | 2018-02-15T17:14:09.000Z | 2021-04-27T11:28:42.000Z | source/code/services/tagging_service.py | mobri2a/aws-ops-automator | a3564a8eb142b1ca0487f9e91e83d2c1284af8f2 | [
"Apache-2.0"
] | 50 | 2017-08-01T05:29:04.000Z | 2021-08-11T20:09:07.000Z | ######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
from services.aws_service import AwsService
RESOURCES = "Resources"
CUSTOM_RESULT_PATHS = {
RESOURCES: "ResourceTagMappingList"
}
RESOURCE_NAMES = [
RESOURCES
]
NEXT_TOKEN_ARGUMENT = "PaginationToken"
NEXT_TOKEN_RESULT = "PaginationToken"
MAPPED_PARAMETERS = {}
class TaggingService(AwsService):
def __init__(self, role_arn=None, session=None, tags_as_dict=True, as_named_tuple=False, service_retry_strategy=None):
"""
:param role_arn: Optional (cross account) role to use to retrieve services
:param session: Optional session to use to retrieve services
:param tags_as_dict: Set to True true to convert resource tags to dictionaries
:param as_named_tuple: Set to True to return resources as named tuples instead of a dictionary
:param service_retry_strategy: service retry strategy for making boto api calls
"""
AwsService.__init__(self, service_name='resourcegroupstaggingapi',
resource_names=RESOURCE_NAMES,
role_arn=role_arn,
session=session,
tags_as_dict=tags_as_dict,
as_named_tuple=as_named_tuple,
custom_result_paths=CUSTOM_RESULT_PATHS,
mapped_parameters=MAPPED_PARAMETERS,
next_token_argument=NEXT_TOKEN_ARGUMENT,
next_token_result=NEXT_TOKEN_RESULT,
service_retry_strategy=service_retry_strategy)
def describe_resources_function_name(self, resource_name):
"""
Returns the name of the boto client method call to retrieve the specified resource.
:param resource_name:
:return: Name of the boto3 client function to retrieve the specified resource type
"""
s = AwsService.describe_resources_function_name(self, resource_name)
return s.replace("describe_", "get_")
| 51.358209 | 122 | 0.503051 |
b4cfd8fdbf1ae4744176283d9547f7a9635b35a6 | 5,887 | py | Python | examples/pytorch/IterativePruning/iterative_prune_mnist.py | yazdipour/mlflow | 5cd5f61ad0157e8f9a19bac3e4499da34f77d2ea | [
"Apache-2.0"
] | null | null | null | examples/pytorch/IterativePruning/iterative_prune_mnist.py | yazdipour/mlflow | 5cd5f61ad0157e8f9a19bac3e4499da34f77d2ea | [
"Apache-2.0"
] | 1 | 2022-02-20T20:09:24.000Z | 2022-02-20T20:09:24.000Z | examples/pytorch/IterativePruning/iterative_prune_mnist.py | yazdipour/mlflow | 5cd5f61ad0157e8f9a19bac3e4499da34f77d2ea | [
"Apache-2.0"
] | null | null | null | import argparse
import copy
import os
import shutil
import tempfile
from pathlib import Path
import pytorch_lightning as pl
import torch
from ax.service.ax_client import AxClient
from prettytable import PrettyTable
from torch.nn.utils import prune
import mlflow.pytorch
from mnist import (
MNISTDataModule,
LightningMNISTClassifier,
)
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
class IterativePrune:
def __init__(self):
self.parser_args = None
self.ax_client = None
self.base_model_path = "base_model"
self.pruning_amount = None
def run_mnist_model(self, base=False):
parser_dict = vars(self.parser_args)
if base:
mlflow.start_run(run_name="BaseModel")
mlflow.pytorch.autolog()
dm = MNISTDataModule(**parser_dict)
dm.setup(stage="fit")
model = LightningMNISTClassifier(**parser_dict)
trainer = pl.Trainer.from_argparse_args(self.parser_args)
trainer.fit(model, dm)
trainer.test(datamodule=dm)
if os.path.exists(self.base_model_path):
shutil.rmtree(self.base_model_path)
mlflow.pytorch.save_model(trainer.lightning_module, self.base_model_path)
return trainer
def load_base_model(self):
path = Path(_download_artifact_from_uri(self.base_model_path))
model_file_path = os.path.join(path, "data/model.pth")
return torch.load(model_file_path)
def initialize_ax_client(self):
self.ax_client = AxClient()
self.ax_client.create_experiment(
parameters=[
{"name": "amount", "type": "range", "bounds": [0.05, 0.15], "value_type": "float"}
],
objective_name="test_accuracy",
)
@staticmethod
def prune_and_save_model(model, amount):
for _, module in model.named_modules():
if isinstance(module, (torch.nn.Conv2d, torch.nn.Linear)):
prune.l1_unstructured(module, name="weight", amount=amount)
prune.remove(module, "weight")
mlflow.pytorch.save_state_dict(model.state_dict(), ".")
model = torch.load("state_dict.pth")
os.remove("state_dict.pth")
return model
@staticmethod
def count_model_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad:
continue
param = parameter.nonzero(as_tuple=False).size(0)
table.add_row([name, param])
total_params += param
return table, total_params
@staticmethod
def write_prune_summary(summary, params):
tempdir = tempfile.mkdtemp()
try:
summary_file = os.path.join(tempdir, "pruned_model_summary.txt")
params = f'Total Trainable Parameters :{str(params)}'
with open(summary_file, "w") as f:
f.write(str(summary))
f.write("\n")
f.write(str(params))
mlflow.log_artifact(local_path=summary_file)
finally:
shutil.rmtree(tempdir)
def iterative_prune(self, model, parametrization):
if not self.pruning_amount:
self.pruning_amount = parametrization.get("amount")
else:
self.pruning_amount += 0.15
mlflow.log_metric("PRUNING PERCENTAGE", self.pruning_amount)
pruned_model = self.prune_and_save_model(model, self.pruning_amount)
model.load_state_dict(copy.deepcopy(pruned_model))
summary, params = self.count_model_parameters(model)
self.write_prune_summary(summary, params)
trainer = self.run_mnist_model()
metrics = trainer.callback_metrics
return metrics.get("avg_test_acc")
def initiate_pruning_process(self, model):
total_trials = int(vars(self.parser_args)["total_trials"])
trial_index = None
for i in range(total_trials):
parameters, trial_index = self.ax_client.get_next_trial()
print("***************************************************************************")
print("Running Trial {}".format(i + 1))
print("***************************************************************************")
with mlflow.start_run(nested=True, run_name=f'Iteration{str(i)}'):
mlflow.set_tags({"AX_TRIAL": i})
# calling the model
test_accuracy = self.iterative_prune(model, parameters)
# completion of trial
self.ax_client.complete_trial(trial_index=trial_index, raw_data=test_accuracy.item())
# Ending the Base run
mlflow.end_run()
def get_parser_args(self):
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parent_parser=parser)
parser = LightningMNISTClassifier.add_model_specific_args(parent_parser=parser)
parser.add_argument(
"--total_trials",
default=3,
help="Number of AX trials to be run for the optimization experiment",
)
self.parser_args = parser.parse_args()
if __name__ == "__main__":
# Initializing
iterative_prune_obj = IterativePrune()
# Deriving parser arguments
iterative_prune_obj.get_parser_args()
# Running the base model
print("***************************************************************************")
print("Running Base Model")
print("***************************************************************************")
iterative_prune_obj.run_mnist_model(base=True)
# Iterative Pruning
iterative_prune_obj.initialize_ax_client()
base_model = iterative_prune_obj.load_base_model()
iterative_prune_obj.initiate_pruning_process(base_model)
| 34.629412 | 98 | 0.616443 |
843e051fb45b9e9db8485af3cbb1f7a2d97c1bfc | 36,357 | py | Python | tests/test_constraints.py | rongfengliang/edgedb-pg-expose | 1ddc279511595b4b1a3a1532ea873ed4e05e8b01 | [
"Apache-2.0"
] | null | null | null | tests/test_constraints.py | rongfengliang/edgedb-pg-expose | 1ddc279511595b4b1a3a1532ea873ed4e05e8b01 | [
"Apache-2.0"
] | null | null | null | tests/test_constraints.py | rongfengliang/edgedb-pg-expose | 1ddc279511595b4b1a3a1532ea873ed4e05e8b01 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2012-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import unittest
import edgedb
from edb.testbase import server as tb
class TestConstraintsSchema(tb.QueryTestCase):
ISOLATED_METHODS = False
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'constraints.esdl')
async def _run_link_tests(self, cases, objtype, link):
qry = """
INSERT {objtype} {{{{
{link} := {{value!r}}
}}}};
""".format(
objtype=objtype, link=link
)
for val, expected in cases:
expr = qry.format(value=str(val))
if expected == 'good':
try:
await self.con.execute(expr)
except Exception as ex:
raise AssertionError(f'{expr!r} failed') from ex
else:
with self.assertRaisesRegex(
edgedb.ConstraintViolationError, expected):
await self.con.execute(expr)
async def test_constraints_scalar_length(self):
data = {
# max-length is 10
(10 ** 10, 'must be no longer than 10 characters.'),
(10 ** 10 - 1, 'good'),
(10 ** 7 - 1, 'must be no shorter than 8 characters'),
(10 ** 7, 'good'),
}
await self._run_link_tests(data, 'test::Object', 'c_length')
data = {
(10 ** 10, 'must be no longer than 10 characters.'),
(10 ** 10 - 1, 'good'),
(10 ** 8 - 1, 'must be no shorter than 9 characters'),
(10 ** 8, 'good'),
}
await self._run_link_tests(data, 'test::Object', 'c_length_2')
data = {
(10 ** 10, 'must be no longer than 10 characters.'),
(10 ** 10 - 1, 'good'),
(10 ** 9 - 1, 'must be no shorter than 10 characters'),
}
await self._run_link_tests(data, 'test::Object', 'c_length_3')
async def test_constraints_scalar_minmax(self):
data = {
# max-value is "9999999989"
(10 ** 9 - 1, 'Maximum allowed value for .* is "9999999989".'),
(10 ** 9 - 11, 'good'),
# min-value is "99990000"
(10 ** 8 - 10 ** 4 - 1,
'Minimum allowed value for .* is "99990000".'),
(10 ** 8 - 21, 'good'),
}
await self._run_link_tests(data, 'test::Object', 'c_minmax')
async def test_constraints_scalar_strvalue(self):
data = {
# last digit is 9
(10 ** 9 - 12, 'invalid .*'),
# and the first is 9 too
(10 ** 9 - 10 ** 8 - 1, 'invalid .*'),
# and that all characters are digits
('99900~0009', 'invalid .*'),
# and that first three chars are nines
('9900000009', 'invalid .*'),
('9999000009', 'good'),
}
await self._run_link_tests(data, 'test::Object', 'c_strvalue')
async def test_constraints_scalar_enum_01(self):
data = {
('foobar', 'must be one of:'),
('bar', 'good'),
('foo', 'good'),
}
await self._run_link_tests(data, 'test::Object', 'c_enum')
async def test_constraints_scalar_enum_02(self):
data = {
('foobar', 'invalid'),
('bar', 'good'),
('foo', 'good'),
}
await self._run_link_tests(data, 'test::Object', 'c_my_enum')
async def test_constraints_exclusive_simple(self):
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
await self.con.execute("""
INSERT test::UniqueName {
name := 'Test'
};
INSERT test::UniqueName {
name := 'Test'
};
""")
async def test_constraints_exclusive_inherited(self):
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
await self.con.execute("""
INSERT test::UniqueNameInherited {
name := 'Test'
};
INSERT test::UniqueNameInherited {
name := 'Test'
};
""")
async def test_constraints_exclusive_across_ancestry(self):
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
await self.con.execute("""
INSERT test::UniqueName {
name := 'exclusive_name_across'
};
INSERT test::UniqueNameInherited {
name := 'exclusive_name_across'
};
""")
async with self._run_and_rollback():
await self.con.execute("""
INSERT test::UniqueName {
name := 'exclusive_name_ok'
};
INSERT test::UniqueNameInherited {
name := 'exclusive_name_inherited_ok'
};
""")
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
await self.con.execute("""
UPDATE
test::UniqueNameInherited
FILTER
test::UniqueNameInherited.name =
'exclusive_name_inherited_ok'
SET {
name := 'exclusive_name_ok'
};
""")
async def test_constraints_exclusive_case_insensitive(self):
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
await self.con.execute("""
INSERT test::UniqueName_3 {
name := 'TeSt'
};
INSERT test::UniqueName_3 {
name := 'tEsT'
};
""")
async def test_constraints_exclusive_abstract(self):
async with self._run_and_rollback():
# This is OK, the name exclusivity constraint is abstract
await self.con.execute("""
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap'
};
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap'
};
""")
# This is OK too
await self.con.execute("""
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap1'
};
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap1'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Not OK, abstract constraint materializes into a real one
await self.con.execute("""
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap2'
};
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap2'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Not OK, abstract constraint materializes into a real one
await self.con.execute("""
INSERT test::AbstractConstraintMixedChild {
name := 'exclusive_name_ap2'
};
INSERT test::AbstractConstraintMixedChild {
name := 'exclusive_name_AP2'
};
""")
async with self._run_and_rollback():
# This is OK, duplication is in different children
await self.con.execute("""
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap3'
};
INSERT test::AbstractConstraintMixedChild {
name := 'exclusive_name_ap3'
};
""")
# This is OK, the name exclusivity constraint is abstract again
await self.con.execute("""
INSERT test::AbstractConstraintPropagated {
name := 'exclusive_name_ap4'
};
INSERT test::AbstractConstraintPropagated {
name := 'exclusive_name_ap4'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Not OK, yet
await self.con.execute("""
INSERT test::BecomingAbstractConstraint {
name := 'exclusive_name_ap5'
};
INSERT test::BecomingAbstractConstraintChild {
name := 'exclusive_name_ap5'
};
""")
async with self._run_and_rollback():
await self.con.execute("""
INSERT test::BecomingConcreteConstraint {
name := 'exclusive_name_ap6'
};
INSERT test::BecomingConcreteConstraintChild {
name := 'exclusive_name_ap6'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
await self.con.execute("""
INSERT test::LosingAbstractConstraintParent {
name := 'exclusive_name_ap7'
};
INSERT test::LosingAbstractConstraintParent {
name := 'exclusive_name_ap7'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
await self.con.execute("""
INSERT test::AbstractConstraintMultipleParentsFlattening{
name := 'exclusive_name_ap8'
};
INSERT test::AbstractConstraintMultipleParentsFlattening{
name := 'exclusive_name_ap8'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# non-abstract inherited constraint
await self.con.execute("""
INSERT test::AbstractInheritingNonAbstract {
name := 'exclusive_name_ana'
};
INSERT test::AbstractInheritingNonAbstract {
name := 'exclusive_name_ana'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# non-abstract inherited constraint
await self.con.execute("""
INSERT test::AbstractInheritingNonAbstract {
name := 'exclusive_name_ana1'
};
INSERT test::AbstractInheritingNonAbstractChild {
name := 'exclusive_name_ana1'
};
""")
class TestConstraintsSchemaMigration(tb.QueryTestCase):
ISOLATED_METHODS = False
SCHEMA = os.path.join(os.path.dirname(__file__),
'schemas', 'constraints_migration',
'schema.esdl')
async def test_constraints_exclusive_migration(self):
new_schema_f = os.path.join(os.path.dirname(__file__),
'schemas', 'constraints_migration',
'updated_schema.esdl')
with open(new_schema_f) as f:
new_schema = f.read()
async with self.con.transaction():
await self.con.execute(f'''
CREATE MIGRATION test::d1 TO {{ {new_schema} }};
COMMIT MIGRATION test::d1;
''')
async with self._run_and_rollback():
# This is OK, the name exclusivity constraint is abstract
await self.con.execute("""
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap'
};
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap'
};
""")
# This is OK too
await self.con.execute("""
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap1'
};
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap1'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Not OK, abstract constraint materializes into a real one
await self.con.execute("""
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap2'
};
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap2'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Not OK, abstract constraint materializes into a real one
await self.con.execute("""
INSERT test::AbstractConstraintMixedChild {
name := 'exclusive_name_ap2'
};
INSERT test::AbstractConstraintMixedChild {
name := 'exclusive_name_AP2'
};
""")
async with self._run_and_rollback():
# This is OK, duplication is in different children
await self.con.execute("""
INSERT test::AbstractConstraintMixedChild {
name := 'exclusive_name_ap3'
};
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap3'
};
""")
async with self._run_and_rollback():
# This is OK, the name exclusivity constraint is abstract again
await self.con.execute("""
INSERT test::AbstractConstraintPropagated {
name := 'exclusive_name_ap4'
};
INSERT test::AbstractConstraintPropagated {
name := 'exclusive_name_ap4'
};
""")
async with self._run_and_rollback():
# OK, former constraint was turned into an abstract constraint
await self.con.execute("""
INSERT test::BecomingAbstractConstraint {
name := 'exclusive_name_ap5'
};
INSERT test::BecomingAbstractConstraintChild {
name := 'exclusive_name_ap5'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Constraint is no longer abstract
await self.con.execute("""
INSERT test::BecomingConcreteConstraint {
name := 'exclusive_name_ap6'
};
INSERT test::BecomingConcreteConstraintChild {
name := 'exclusive_name_ap6'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Constraint is no longer abstract
await self.con.execute("""
INSERT test::LosingAbstractConstraintParent {
name := 'exclusive_name_ap6'
};
INSERT test::LosingAbstractConstraintParent {
name := 'exclusive_name_ap6'
};
""")
async with self._run_and_rollback():
await self.con.execute("""
INSERT test::LosingAbstractConstraintParent2 {
name := 'exclusive_name_ap7'
};
INSERT test::LosingAbstractConstraintParent2 {
name := 'exclusive_name_ap7'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Constraint is no longer abstract
await self.con.execute("""
INSERT test::AbstractConstraintMultipleParentsFlattening{
name := 'exclusive_name_ap8'
};
INSERT test::AbstractConstraintMultipleParentsFlattening{
name := 'exclusive_name_AP8'
};
""")
async with self._run_and_rollback():
# Parent lost its concrete constraint inheritance
await self.con.execute("""
INSERT test::AbstractInheritingNonAbstract {
name := 'exclusive_name_ana'
};
INSERT test::AbstractInheritingNonAbstract {
name := 'exclusive_name_ana'
};
""")
async with self._run_and_rollback():
# Parent lost its concrete constraint inheritance
await self.con.execute("""
INSERT test::AbstractInheritingNonAbstract {
name := 'exclusive_name_ana1'
};
INSERT test::AbstractInheritingNonAbstractChild {
name := 'exclusive_name_ana1'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
# Child uniqueness is still enforced
await self.con.execute("""
INSERT test::AbstractInheritingNonAbstractChild{
name := 'exclusive_name_ana2'
};
INSERT test::AbstractInheritingNonAbstractChild{
name := 'exclusive_name_ana2'
};
""")
class TestConstraintsDDL(tb.NonIsolatedDDLTestCase):
async def test_constraints_ddl_01(self):
qry = """
CREATE ABSTRACT LINK test::translated_label {
CREATE PROPERTY lang -> std::str;
CREATE PROPERTY prop1 -> std::str;
};
CREATE ABSTRACT LINK test::link_with_exclusive_property {
CREATE PROPERTY exclusive_property -> std::str {
CREATE CONSTRAINT std::exclusive;
};
};
CREATE ABSTRACT LINK test::link_with_exclusive_property_inherited
EXTENDING test::link_with_exclusive_property;
CREATE TYPE test::UniqueName {
CREATE PROPERTY name -> std::str {
CREATE CONSTRAINT std::exclusive;
};
CREATE LINK link_with_exclusive_property -> std::Object;
};
"""
await self.con.execute(qry)
# Simple exclusivity constraint on a link
#
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'name violates exclusivity constraint'):
await self.con.execute("""
INSERT test::UniqueName {
name := 'Test'
};
INSERT test::UniqueName {
name := 'Test'
};
""")
qry = """
CREATE TYPE test::AbstractConstraintParent {
CREATE PROPERTY name -> std::str {
CREATE DELEGATED CONSTRAINT std::exclusive;
};
};
CREATE TYPE test::AbstractConstraintPureChild
EXTENDING test::AbstractConstraintParent;
"""
await self.con.execute(qry)
async with self._run_and_rollback():
# This is OK, the name exclusivity constraint is abstract
await self.con.execute("""
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap'
};
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap'
};
""")
# This is OK too
await self.con.execute("""
INSERT test::AbstractConstraintParent {
name := 'exclusive_name_ap1'
};
INSERT test::AbstractConstraintPureChild {
name := 'exclusive_name_ap1'
};
""")
async def test_constraints_ddl_02(self):
# testing the generalized constraint with 'ON (...)' clause
qry = r"""
CREATE ABSTRACT CONSTRAINT test::mymax1(max: std::int64)
ON (len(__subject__))
{
SET errmessage :=
'{__subject__} must be no longer than {max} characters.';
SET expr := __subject__ <= max;
};
CREATE ABSTRACT CONSTRAINT test::mymax_ext1(max: std::int64)
ON (len(__subject__)) EXTENDING std::max_value
{
SET errmessage :=
'{__subject__} must be no longer than {max} characters.';
};
CREATE TYPE test::ConstraintOnTest1 {
CREATE PROPERTY foo -> std::str {
CREATE CONSTRAINT test::mymax1(3);
};
CREATE PROPERTY bar -> std::str {
CREATE CONSTRAINT test::mymax_ext1(3);
};
};
"""
await self.con.execute(qry)
await self.assert_query_result(
r'''
SELECT schema::Constraint {
name,
args: {
num,
name,
kind,
type: {
name
},
typemod,
@value
} ORDER BY .num ASC
} FILTER .name = 'test::mymax_ext1' AND exists(.subject);
''',
[
{
"name": 'test::mymax_ext1',
"args": [
{
"num": 0,
"kind": 'POSITIONAL',
"name": 'max',
"type": {"name": 'std::int64'},
"@value": '3',
"typemod": 'SINGLETON'
}
],
},
]
)
await self.assert_query_result(
r'''
SELECT schema::Constraint {
name,
params: {
num,
name,
kind,
type: {
name
},
typemod
} ORDER BY .num ASC
} FILTER .name = 'test::mymax_ext1' AND NOT exists(.subject);
''',
[
{
"name": 'test::mymax_ext1',
"params": [
{
"num": 0,
"kind": 'POSITIONAL',
"name": 'max',
"type": {"name": 'std::int64'},
"typemod": 'SINGLETON'
}
],
},
]
)
# making sure the constraint was applied successfully
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'foo must be no longer than 3 characters.'):
await self.con.execute("""
INSERT test::ConstraintOnTest1 {
foo := 'Test'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'bar must be no longer than 3 characters.'):
await self.con.execute("""
INSERT test::ConstraintOnTest1 {
bar := 'Test'
};
""")
async with self._run_and_rollback():
# constraint should not fail
await self.con.execute("""
INSERT test::ConstraintOnTest1 {
foo := '',
bar := ''
};
INSERT test::ConstraintOnTest1 {
foo := 'a',
bar := 'q'
};
INSERT test::ConstraintOnTest1 {
foo := 'ab',
bar := 'qw'
};
INSERT test::ConstraintOnTest1 {
foo := 'abc',
bar := 'qwe'
};
# a duplicate 'foo' and 'bar' just for good measure
INSERT test::ConstraintOnTest1 {
foo := 'ab',
bar := 'qw'
};
""")
async def test_constraints_ddl_03(self):
# testing the specialized constraint with 'ON (...)' clause
qry = r"""
CREATE ABSTRACT CONSTRAINT test::mymax2(max: std::int64) {
SET errmessage :=
'{__subject__} must be no longer than {max} characters.';
SET expr := __subject__ <= max;
};
CREATE TYPE test::ConstraintOnTest2 {
CREATE PROPERTY foo -> std::str {
CREATE CONSTRAINT test::mymax2(3) ON (len(__subject__));
};
CREATE PROPERTY bar -> std::str {
CREATE CONSTRAINT std::max_value(3) ON (len(__subject__)) {
SET errmessage :=
# XXX: once simple string concat is possible here
# formatting can be saner
'{__subject__} must be no longer than {max} characters.';
};
};
};
"""
await self.con.execute(qry)
# making sure the constraint was applied successfully
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'foo must be no longer than 3 characters.'):
await self.con.execute("""
INSERT test::ConstraintOnTest2 {
foo := 'Test'
};
""")
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'bar must be no longer than 3 characters.'):
await self.con.execute("""
INSERT test::ConstraintOnTest2 {
bar := 'Test'
};
""")
async with self._run_and_rollback():
# constraint should not fail
await self.con.execute("""
INSERT test::ConstraintOnTest2 {
foo := '',
bar := ''
};
INSERT test::ConstraintOnTest2 {
foo := 'a',
bar := 'q'
};
INSERT test::ConstraintOnTest2 {
foo := 'ab',
bar := 'qw'
};
INSERT test::ConstraintOnTest2 {
foo := 'abc',
bar := 'qwe'
};
# a duplicate 'foo' and 'bar' just for good measure
INSERT test::ConstraintOnTest2 {
foo := 'ab',
bar := 'qw'
};
""")
@unittest.expectedFailure
# FIXME: the test fails because errmessage is an expression that's
# not a simple string literal, but a concatenation of 2
# string literals.
async def test_constraints_ddl_04(self):
# testing an issue with expressions used for 'errmessage'
qry = r"""
CREATE ABSTRACT CONSTRAINT test::mymax3(max: std::int64) {
SET errmessage :=
'{__subject__} must be no longer ' +
'than {max} characters.';
SET expr := __subject__ <= max;
};
CREATE TYPE test::ConstraintOnTest3 {
CREATE PROPERTY foo -> std::str {
CREATE CONSTRAINT test::mymax3(3) ON (len(__subject__));
};
};
"""
await self.con.execute(qry)
# making sure the constraint was applied successfully
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.ConstraintViolationError,
'foo must be no longer than 3 characters.'):
await self.con.execute("""
INSERT test::ConstraintOnTest3 {
foo := 'Test'
};
""")
async def test_constraints_ddl_error_02(self):
# testing that subjectexpr cannot be overridden after it is
# specified explicitly
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.InvalidConstraintDefinitionError,
r"subjectexpr is already defined for .+max_int"):
await self.con.execute(r"""
CREATE ABSTRACT CONSTRAINT test::max_int(m: std::int64)
ON (<int64>__subject__)
{
SET errmessage :=
# XXX: once simple string concat is possible here
# formatting can be saner
'{__subject__} must be no longer than {m} characters.';
SET expr := __subject__ <= m;
};
CREATE TYPE test::InvalidConstraintTest2 {
CREATE PROPERTY foo -> std::str {
CREATE CONSTRAINT test::max_int(3)
ON (len(__subject__));
};
};
""")
async def test_constraints_ddl_error_05(self):
# Test that constraint expression returns a boolean.
qry = """
CREATE MIGRATION test::ddl_error_05 TO {
type User {
required property login -> str {
constraint expression on (len(__subject__))
}
};
};
"""
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
"constraint expression expected to return a bool value, "
"got 'int64'"):
async with self.con.transaction():
await self.con.execute(qry)
qry = """
CREATE TYPE User {
CREATE REQUIRED PROPERTY login -> str {
CREATE CONSTRAINT expression on (len(__subject__));
};
};
"""
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
"constraint expression expected to return a bool value, "
"got 'int64'"):
await self.con.execute(qry)
qry = """
CREATE ABSTRACT CONSTRAINT foo {
SET expr := __subject__;
};
CREATE TYPE User {
CREATE REQUIRED PROPERTY login -> str {
CREATE CONSTRAINT foo;
};
};
"""
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
"constraint expression expected to return a bool value, "
"got 'str'"):
await self.con.execute(qry)
async def test_constraints_ddl_error_06(self):
# testing the generalized constraint with 'ON (...)' clause
qry = r"""
CREATE ABSTRACT CONSTRAINT test::mymax_er_06(max: std::int64)
ON (len(__subject__))
{
SET expr := __subject__ <= $max;
};
CREATE TYPE test::ConstraintOnTest_err_06 {
CREATE PROPERTY foo -> std::str {
CREATE CONSTRAINT test::mymax_er_06(3);
};
};
"""
try:
await self.con.execute('START TRANSACTION;')
with self.assertRaisesRegex(
edgedb.InvalidConstraintDefinitionError,
r'dollar-prefixed.*not supported'):
await self.con.execute(qry)
finally:
await self.con.execute('ROLLBACK;')
| 35.366732 | 79 | 0.468465 |
f4a4c208810f3892360c1412349ef2dce92fc776 | 1,266 | py | Python | setup.py | riccardocagnasso/jwtauth_aiopyramid_test | c62d5b87cbd13ab4fa62594ff99cf11121fe859a | [
"MIT"
] | null | null | null | setup.py | riccardocagnasso/jwtauth_aiopyramid_test | c62d5b87cbd13ab4fa62594ff99cf11121fe859a | [
"MIT"
] | null | null | null | setup.py | riccardocagnasso/jwtauth_aiopyramid_test | c62d5b87cbd13ab4fa62594ff99cf11121fe859a | [
"MIT"
] | null | null | null | import os
import sys
from setuptools import setup, find_packages
py_version = sys.version_info[:2]
if py_version < (3, 3):
raise Exception("aiopyramid requires Python >= 3.3.")
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'jwtauth_test'
with open(os.path.join(here, 'README.rst')) as readme:
README = readme.read()
with open(os.path.join(here, 'CHANGES.rst')) as changes:
CHANGES = changes.read()
requires = [
'aiopyramid[gunicorn]',
'pyramid_jwtauth',
'pyramid_chameleon'
]
setup(
name=NAME,
version='0.0',
description='jwtauth_test',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='aiopyramid asyncio web wsgi pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite=NAME,
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = jwtauth_test:main
""",
)
| 24.823529 | 63 | 0.638231 |
f9d5a699acd9fc437a5d95a3ea351f0baedeb17d | 14,122 | py | Python | fairworkflows/rdf_wrapper.py | fair-workflows/fairworkflows | 363a7590326b6b10e1da47eedc6026f4bc382725 | [
"Apache-2.0"
] | 8 | 2020-10-20T14:45:08.000Z | 2021-12-14T08:50:13.000Z | fairworkflows/rdf_wrapper.py | fair-workflows/fairworkflows | 363a7590326b6b10e1da47eedc6026f4bc382725 | [
"Apache-2.0"
] | 76 | 2020-09-24T14:30:40.000Z | 2021-07-01T13:43:29.000Z | fairworkflows/rdf_wrapper.py | fair-workflows/FAIRWorkbench | 363a7590326b6b10e1da47eedc6026f4bc382725 | [
"Apache-2.0"
] | 1 | 2022-02-03T13:43:28.000Z | 2022-02-03T13:43:28.000Z | import warnings
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List
from urllib.parse import urldefrag
import pyshacl
import rdflib
from rdflib import RDF, RDFS, DCTERMS, OWL
from nanopub import Publication, NanopubClient
from rdflib.tools.rdf2dot import rdf2dot
from fairworkflows import namespaces, LinguisticSystem
from fairworkflows.config import PACKAGE_DIR
PLEX_SHAPES_SHACL_FILEPATH = str(PACKAGE_DIR / 'resources' / 'plex-shapes.ttl')
class RdfWrapper:
def __init__(self, uri, ref_name='fairobject', derived_from: List[str] = None,
language: LinguisticSystem = None ):
self._rdf = rdflib.Graph()
if uri:
self._uri = str(uri)
else:
self._uri = None
self.self_ref = rdflib.term.BNode(ref_name)
self._is_modified = False
self._is_published = False
self.derived_from = derived_from
self._bind_namespaces()
# A blank node to which triples about the linguistic
# system for this FAIR object can be added
self.lingsys_ref = rdflib.BNode('LinguisticSystem')
if language is not None:
self.language = language
def _bind_namespaces(self):
"""Bind namespaces used often in fair step and fair workflow.
Unused namespaces will be removed upon serialization.
"""
self.rdf.bind("npx", namespaces.NPX)
self.rdf.bind("pplan", namespaces.PPLAN)
self.rdf.bind("prov", namespaces.PROV)
self.rdf.bind("dul", namespaces.DUL)
self.rdf.bind("bpmn", namespaces.BPMN)
self.rdf.bind("pwo", namespaces.PWO)
self.rdf.bind("schema", namespaces.SCHEMAORG)
self.rdf.bind("dc", DCTERMS)
self.rdf.bind("owl", OWL)
@property
def rdf(self) -> rdflib.Graph:
"""Get the rdf graph."""
return self._rdf
@property
def uri(self) -> str:
"""Get the URI for this RDF."""
return self._uri
@property
def is_modified(self) -> bool:
"""Returns true if the RDF has been modified since initialisation"""
return self._is_modified
@property
def derived_from(self) -> List[str]:
"""
Denotes where this RdfWrapper object was derived from
"""
return self._derived_from
@derived_from.setter
def derived_from(self, uris: List[str]):
self._derived_from = uris
def add_triple(self, s, p, o):
""" Add any general triple to the rdf i.e. that does not have the self_ref (step, or plan) as subject """
self._rdf.add((s, p, o))
def get_attribute(self, predicate, return_list=False):
"""Get attribute.
Get attribute of this RDF.
Returns:
The object for which the predicate corresponds to predicate
argument and subject corresponds to this concept. Return None
if no attributes are found, or a list of attributes if multiple
matching objects are found.
"""
objects = list(self._rdf.objects(subject=self.self_ref,
predicate=predicate))
if return_list:
return objects
if len(objects) == 0:
return None
elif len(objects) == 1:
return objects[0]
else:
return objects
def set_attribute(self, predicate, value, overwrite=True):
"""Set attribute.
Set attribute of this RDF. I.e. for the given `predicate` argument, set
the object to the given `value` argument for the subject
corresponding to this RDF. Optionally overwrite the attribute if it
already exists (but throw a warning).
"""
if overwrite and self.get_attribute(predicate) is not None:
warnings.warn(f'A predicate {predicate} was already defined'
f'overwriting {predicate} for {self.self_ref}')
self.remove_attribute(predicate)
self._rdf.add((self.self_ref, predicate, value))
self._is_modified = True
def remove_attribute(self, predicate, object=None):
"""Remove attribute.
If `object` arg is None: remove attribute of this RDF. I.e. remove all
triples from the RDF for the given `predicate` argument that have the
self-reference subject. Else remove only attributes with the object
matching the `object` arg.
"""
self._rdf.remove((self.self_ref, predicate, object))
@property
def label(self):
"""Label.
Returns the rdfs:label of this Fair object (or a list, if more than one matching triple is found)
"""
return self.get_attribute(RDFS.label)
@label.setter
def label(self, value):
"""
Adds the given text string as an rdfs:label for this Fair object.
"""
self.set_attribute(RDFS.label, rdflib.term.Literal(value))
@property
def description(self):
"""Description.
Returns the dcterms:description of this Fair object (or a list, if more than
one matching triple is found)
"""
return self.get_attribute(DCTERMS.description)
@description.setter
def description(self, value):
"""
Adds the given text string as a dcterms:description for this Fair object.
"""
self.set_attribute(DCTERMS.description, rdflib.term.Literal(value))
@property
def language(self):
"""Returns the language for this fair objects's description (could be code).
Returns a LinguisticSystem object.
"""
if (None, DCTERMS.language, self.lingsys_ref) in self._rdf:
lingsys_rdf = rdflib.Graph()
for t in self._rdf.triples((self.lingsys_ref, None, None)):
lingsys_rdf.add(t)
return LinguisticSystem.from_rdf(lingsys_rdf)
else:
return None
@language.setter
def language(self, value: LinguisticSystem):
"""Sets the language for this fair object's code (takes a LinguisticSystem).
Removes the existing linguistic system triples from the RDF decription
and replaces them with the new linguistic system."""
if (None, DCTERMS.language, self.lingsys_ref) not in self._rdf:
self._rdf.add((self.self_ref, DCTERMS.language, self.lingsys_ref))
lingsys_triples = list(self._rdf.triples( (self.lingsys_ref, None, None) ))
if len(lingsys_triples) > 0:
self._rdf.remove(lingsys_triples)
self._rdf += value.generate_rdf(self.lingsys_ref)
def shacl_validate(self):
sg = rdflib.Graph()
sg.parse(PLEX_SHAPES_SHACL_FILEPATH, format='ttl')
conforms, _, results_text = pyshacl.validate(self._rdf, shacl_graph=sg, inference='rdfs')
assert conforms, results_text
def anonymise_rdf(self):
"""
Replace any subjects or objects referring directly to the rdf uri, with a blank node
"""
replace_in_rdf(self._rdf, oldvalue=rdflib.URIRef(self.uri), newvalue=self.self_ref)
@classmethod
def from_rdf(cls, rdf: rdflib.Graph, uri: str, fetch_references: bool = False,
force: bool = False, remove_irrelevant_triples: bool = True):
"""Construct RdfWrapper object from rdf graph.
Args:
rdf: The RDF graph
uri: Uri of the object
fetch_references: Boolean toggling whether to fetch objects from nanopub that are
referred by this object (e.g. FairSteps in a FairWorkflow)
force: Toggle forcing creation of object even if url is not in any of the subjects of
the passed RDF
remove_irrelevant_triples: Toggle removing irrelevant triples from the wrapped rdf.
"""
raise NotImplementedError()
@staticmethod
def _uri_is_subject_in_rdf(uri: str, rdf: rdflib.Graph, force: bool):
"""Check whether uri is a subject in the rdf.
Args:
rdf: The RDF graph
uri: Uri of the object
force: Toggle raising an error (force=False) or just a warning (force=True)
"""
if rdflib.URIRef(uri) not in rdf.subjects():
message = (f"Provided URI '{uri}' does not "
f"match any subject in provided rdf graph.")
if force:
warnings.warn(message, UserWarning)
else:
raise ValueError(message + " Use force=True to suppress this error")
@classmethod
def from_nanopub(cls, uri: str, use_test_server=False):
"""Construct RdfWrapper object from an existing nanopublication.
Fetch the nanopublication corresponding to the specified URI. Pass its assertion
graph to from_rdf to construct the object.
Args:
uri: The URI of a nanopublication (e.g.: http://purl.org/np/id) that npx:introduces
the RDF object as a concept or the URI of a nanopublication fragment pointing to a
concept (e.g.: http://purl.org/np/id#concept)
use_test_server: Toggle using the test nanopub server.
"""
# Work out the nanopub URI by defragging the step URI
nanopub_uri, frag = urldefrag(uri)
# Fetch the nanopub
client = NanopubClient(use_test_server=use_test_server)
nanopub = client.fetch(nanopub_uri)
if len(frag) > 0:
# If we found a fragment we can use the passed URI
uri = uri
elif nanopub.introduces_concept:
# Otherwise we try to extract it from 'introduced concept'
uri = str(nanopub.introduces_concept)
else:
raise ValueError('This nanopub does not introduce any concepts. Please provide URI to '
'the FAIR object itself (not just the nanopub).')
self = cls.from_rdf(rdf=nanopub.assertion, uri=uri, fetch_references=True)
self._derived_from = [uri]
# Record that this RDF originates from a published source
self._is_published = True
return self
def _publish_as_nanopub(self, use_test_server=False, **kwargs):
"""
Publishes this rdf as a nanopublication.
Args:
use_test_server (bool): Toggle using the test nanopub server.
kwargs: Keyword arguments to be passed to [nanopub.Publication.from_assertion](
https://nanopub.readthedocs.io/en/latest/reference/publication.html#
nanopub.publication.Publication.from_assertion).
This allows for more control over the nanopublication RDF.
Returns:
a dictionary with publication info, including 'nanopub_uri', and 'concept_uri'
"""
# If this RDF has been modified from something that was previously published,
# include the original URI in the derived_from PROV (if applicable)
if self._is_published and not self._is_modified:
warnings.warn(f'Cannot publish() this Fair object. '
f'This rdf is already published (at {self._uri}) '
f'and has not been modified locally.')
return {'nanopub_uri': None, 'concept_uri': None}
for invalid_kwarg in ['introduces_concept', 'assertion_rdf']:
if invalid_kwarg in kwargs:
raise ValueError(f'{invalid_kwarg} is automatically filled by fairworkflows '
f'library, you cannot set it.')
if 'derived_from' in kwargs:
if self._derived_from is None:
self._derived_from = kwargs.pop('derived_from')
if self._derived_from is not None:
raise ValueError('You are trying to set derived_from for the nanopublication, '
'but there is already a value specified as .derived_from'
f'property of this object: {self._derived_from}')
# Publish the rdf of this step as a nanopublication
nanopub = Publication.from_assertion(assertion_rdf=self.rdf,
introduces_concept=self.self_ref,
derived_from=self._derived_from,
**kwargs)
client = NanopubClient(use_test_server=use_test_server)
publication_info = client.publish(nanopub)
# Set the new, published, URI, which should be whatever the (published) URI of the concept that was introduced is.
# Note that this is NOT the nanopub's URI, since the nanopub is not the step/workflow. The rdf object describing the step/workflow
# is contained in the assertion graph of the nanopub, and has its own URI.
self._uri = publication_info['concept_uri']
self._is_published = True
self._is_modified = False
return publication_info
@staticmethod
def _import_graphviz():
"""Import graphviz.
Raises:
ImportError with appropriate message if import failed
"""
try:
import graphviz
return graphviz
except ImportError:
raise ImportError('Cannot produce visualization of RDF, you need '
'to install graphviz python package. '
'Version 0.14.1 is known to work well.')
def display_rdf(self):
graphviz = self._import_graphviz()
with TemporaryDirectory() as td:
filename = Path(td) / 'dag.dot'
with open(filename, 'w') as f:
rdf2dot(self._rdf, f)
return graphviz.Source.from_file(filename)
def replace_in_rdf(rdf: rdflib.Graph, oldvalue, newvalue):
"""
Replace subjects or objects of oldvalue with newvalue
"""
for s, p, o in rdf:
if s == oldvalue:
rdf.remove((s, p, o))
rdf.add((newvalue, p, o))
elif o == oldvalue:
rdf.remove((s, p, o))
rdf.add((s, p, newvalue))
| 39.01105 | 138 | 0.619742 |
d77bee6cb9b10314fe45171f6c4110c197b88790 | 31 | py | Python | btd6_memory_info/generated/Assets/Scripts/Models/ServerEvents/DailyChallengeModel/daily_challenge_model.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/Assets/Scripts/Models/ServerEvents/DailyChallengeModel/daily_challenge_model.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/Assets/Scripts/Models/ServerEvents/DailyChallengeModel/daily_challenge_model.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | class DailyChallengeModel: pass | 31 | 31 | 0.903226 |
8f1e38a65d328328f8a39c5c79a420f4566b254b | 5,648 | py | Python | pystratis/api/dynamic_contract/dynamic_contract.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 8 | 2021-06-30T20:44:22.000Z | 2021-12-07T14:42:22.000Z | pystratis/api/dynamic_contract/dynamic_contract.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 2 | 2021-07-01T11:50:18.000Z | 2022-01-25T18:39:49.000Z | pystratis/api/dynamic_contract/dynamic_contract.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 4 | 2021-07-01T04:36:42.000Z | 2021-09-17T10:54:19.000Z | import ast
from typing import List, Union
from decimal import Decimal
from pystratis.api import APIRequest, EndpointRegister, endpoint
from pystratis.core.types import Address, Money
from pystratis.api.dynamic_contract.responsemodels import *
class DynamicContract(APIRequest, metaclass=EndpointRegister):
"""Implements the connectionmanager api endpoints."""
route = '/api/contract'
def __init__(self, **kwargs):
super().__init__(**kwargs)
@endpoint(f'{route}/{{address}}/property/{{property}}')
def property(self,
address: Address,
property: str,
wallet_name: str,
wallet_password: str,
sender: Address,
gas_price: int = 100,
gas_limit: int = 250000,
amount: Union[Money, int, float, Decimal] = Money(0),
fee_amount: Union[Money, int, float, Decimal] = Money(0.01),
**kwargs) -> LocalExecutionResultModel:
"""Query the value of a property on the contract using a local call.
Args:
address (Address): The smart contract address.
property (str): The property to query.
wallet_name (str): The wallet name.
wallet_password (str): The wallet password.
sender (Address): The sending address.
gas_price (int, optional): The gas price. Default=100
gas_limit (int, optional): The gas limit. Default=250000
amount (Money, int, float, Decimal, optional): Amount to send. Default=Money(0)
fee_amount (Money, int, float, Decimal, optional): Fee amount. Default=Money(0.01)
**kwargs: Extra keyword arguments.
Returns:
LocalExecutionResultModel: The results of a local contract execution.
Raises:
APIError: Error thrown by node API. See message for details.
"""
kwargs['endpoint'] = kwargs['endpoint'].replace('{address}', f'{address}')
kwargs['endpoint'] = kwargs['endpoint'].replace('{property}', f'{property}')
headers = {
'GasPrice': str(gas_price),
'GasLimit': str(gas_limit),
'Amount': Money(amount).to_coin_unit(),
'FeeAmount': Money(fee_amount).to_coin_unit(),
'WalletName': wallet_name,
'WalletPassword': wallet_password,
'Sender': str(sender)
}
kwargs['headers'] = headers
data = self.get(**kwargs)
for i in range(len(data['internalTransfers'])):
data['internalTransfers'][i]['from'] = Address(address=data['internalTransfers'][i]['from'], network=self._network)
data['internalTransfers'][i]['to'] = Address(address=data['internalTransfers'][i]['to'], network=self._network)
for i in range(len(data['logs'])):
data['logs'][i]['address'] = Address(address=data['logs'][i]['address'], network=self._network)
if data['errorMessage'] is not None:
data['errorMessage'] = ast.literal_eval(data['errorMessage'])
data['errorMessage'] = data['errorMessage']['value']
if data['gasConsumed'] is not None:
data['gasConsumed'] = data['gasConsumed']['value']
if data['return'] is not None:
data['return'] = data['return']
return LocalExecutionResultModel(**data)
@endpoint(f'{route}/{{address}}/method/{{method}}')
def method(self,
address: Address,
method: str,
data: dict,
wallet_name: str,
wallet_password: str,
sender: Address,
gas_price: int = 100,
gas_limit: int = 250000,
amount: Union[Money, int, float, Decimal] = Money(0),
fee_amount: Union[Money, int, float, Decimal] = Money(0.01),
**kwargs) -> BuildContractTransactionModel:
"""Call a method on the contract by broadcasting a call transaction to the network.
Args:
address (Address): The smart contract address.
method (str): The method to call.
data (dict): The data for the request body.
wallet_name (str): The wallet name.
wallet_password (str): The wallet password.
sender (Address): The sending address.
gas_price (int, optional): The gas price. Default=100
gas_limit (int, optional): The gas limit. Default=250000
amount (Money, int, float, Decimal, optional): Amount to send. Default=Money(0)
fee_amount (Money, int, float, Decimal, optional): Fee amount. Default=Money(0.01)
**kwargs: Extra keyword arguments.
Returns:
BuildContractTransactionModel: A built smart contract transaction.
Raises:
APIError: Error thrown by node API. See message for details.
"""
kwargs['endpoint'] = kwargs['endpoint'].replace('{address}', f'{address}')
kwargs['endpoint'] = kwargs['endpoint'].replace('{method}', f'{method}')
headers = {
'GasPrice': str(gas_price),
'GasLimit': str(gas_limit),
'Amount': Money(amount).to_coin_unit(),
'FeeAmount': Money(fee_amount).to_coin_unit(),
'WalletName': wallet_name,
'WalletPassword': wallet_password,
'Sender': str(sender)
}
kwargs['headers'] = headers
data = self.post(request_model=data, **kwargs)
data['fee'] = Money.from_satoshi_units(data['fee'])
return BuildContractTransactionModel(**data)
| 45.548387 | 127 | 0.589412 |
f11c2ccd841a6b251e9476a1841dfadb2cec6039 | 4,630 | py | Python | src/data_processing/text_processing.py | TamPhilip/asl-app | 8f616ccde7a17570b567869554601c37cf78fbfc | [
"MIT"
] | null | null | null | src/data_processing/text_processing.py | TamPhilip/asl-app | 8f616ccde7a17570b567869554601c37cf78fbfc | [
"MIT"
] | null | null | null | src/data_processing/text_processing.py | TamPhilip/asl-app | 8f616ccde7a17570b567869554601c37cf78fbfc | [
"MIT"
] | null | null | null | #%% Run Imports
import pandas as pd
import re
from nltk.corpus import stopwords
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
import os
import seaborn
import matplotlib.pyplot as plt
from sklearn.externals import joblib
#%% Run preprocessin
# TODO: CREATE A PREPROCESSING/INITIAL MODEL PREDICTION AGAIN
path = os.path.abspath(os.curdir)
stop_words = set(stopwords.words('english'))
stop_words = {x.replace("'","") for x in stop_words if re.search("[']", x.lower())}
movie_data = pd.read_csv('{}/../csv-data/final_data.csv'.format(path))
print(len(movie_data))
msk = np.random.rand(len(movie_data)) < 0.8
genres = ['Action',
'Comedy',
# 'Drama',
'Thriller',
'Family',
'Adventure',
'Mystery',
'Romance',
'Crime'
]
train = movie_data[msk]
train = np.split(train, [2], axis=1)
train[0].drop(columns='Unnamed: 0', inplace=True)
train_features = train[0]['plot'].values.astype('U')
print(train[1])
train_labels = train[1]
test = movie_data[~msk]
test = np.split(test, [2], axis=1)
test[0].drop(columns='Unnamed: 0', inplace=True)
test_features = test[0]['plot'].values.astype('U')
test_labels = test[1]
#%% Run Predict
def predict_model(type, model, vectorizer, train_labels, train_features, test_labels, test_features):
print("\n {} \n".format(type))
pipeline = Pipeline([
('vec', vectorizer),
('clf', model),
])
predictions = {}
for genre in genres:
print('... Processing {}'.format(genre))
# train the model using X_dtm & y
pipeline.fit(train_features, np.array(train_labels[genre]).astype('int'))
# compute the testing accuracy
prediction = pipeline.predict(test_features)
accuracy = accuracy_score(np.array(test_labels[genre]).astype('int'), prediction)
cm = confusion_matrix(np.array(test_labels[genre]).astype('int'), prediction)
print('Test accuracy is {}'.format(accuracy))
predictions[genre] = (accuracy, cm)
joblib.dump(pipeline,'{}_{}.pkl'.format(type, genre))
joblib.dump(pipeline, '{}.pkl'.format(type))
return predictions
#TFIdVectorizer = CountVectorizer + TfidTransformer (Normalizing)
#%% Run Predictions
type = "Logistic_Regression_TfidVectorizer"
logistic_results = predict_model(type, OneVsRestClassifier(LogisticRegression(solver='sag'), n_jobs=1),
TfidfVectorizer(stop_words=stop_words),
train_labels,
train_features,
test_labels,
test_features)
type = "Support_Vector_Machine_TfidVectorizer"
svm_results = predict_model(type, OneVsRestClassifier(LinearSVC(), n_jobs=1),
TfidfVectorizer(stop_words=stop_words),
train_labels,
train_features,
test_labels,
test_features)
type = "Naive_Bayes_TfidVectorizer"
nv_results = predict_model(type, OneVsRestClassifier(MultinomialNB(fit_prior=True, class_prior=None), n_jobs=1),
TfidfVectorizer(stop_words=stop_words),
train_labels,
train_features,
test_labels,
test_features)
#%% Run Data analysis
plt.title('Actual')
seaborn.set(font_scale=1.1)#for label size
df_action = pd.DataFrame(nv_results['Action'][1], index = ['Action', 'N Action'],
columns=['Action', 'N Action'])
sb = seaborn.heatmap(df_action, annot=True, fmt='g').xaxis.set_ticks_position('top')
plt.ylabel('Predicted')
plt.xlabel('Logistic Regression')
plt.show()
plt.title('Actual')
df_comedy = pd.DataFrame(nv_results['Comedy'][1], index = ['Comedy', 'Not Com'],
columns=['Comedy', 'Not Com'])
sb = seaborn.heatmap(df_comedy, annot=True, fmt='g').xaxis.set_ticks_position('top')
plt.ylabel('Predicted')
plt.xlabel('Logistic Regression')
plt.show() | 39.57265 | 112 | 0.614903 |
80dd0fe730592749ef9cd75c008f964776b68613 | 4,027 | py | Python | zerver/lib/markdown/help_relative_links.py | Fingel/zulip | 07c12e8a6caeef8c0f73919052241c5ec05e2b85 | [
"Apache-2.0"
] | 3 | 2021-09-12T05:05:28.000Z | 2021-12-30T09:45:20.000Z | zerver/lib/markdown/help_relative_links.py | Fingel/zulip | 07c12e8a6caeef8c0f73919052241c5ec05e2b85 | [
"Apache-2.0"
] | null | null | null | zerver/lib/markdown/help_relative_links.py | Fingel/zulip | 07c12e8a6caeef8c0f73919052241c5ec05e2b85 | [
"Apache-2.0"
] | null | null | null | import re
from typing import Any, List, Match, Optional
from markdown import Markdown
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES
# There is a lot of duplicated code between this file and
# help_settings_links.py. So if you're making a change here consider making
# it there as well.
REGEXP = re.compile(r"\{relative\|(?P<link_type>.*?)\|(?P<key>.*?)\}")
gear_info = {
# The pattern is key: [name, link]
# key is from REGEXP: `{relative|gear|key}`
# name is what the item is called in the gear menu: `Select **name**.`
# link is used for relative links: `Select [name](link).`
"manage-streams": ["Manage streams", "/#streams/subscribed"],
"settings": ["Personal Settings", "/#settings/profile"],
"manage-organization": ["Manage organization", "/#organization/organization-profile"],
"integrations": ["Integrations", "/integrations"],
"stats": ["Usage statistics", "/stats"],
"plans": ["Plans and pricing", "/plans"],
"billing": ["Billing", "/billing"],
"invite": ["Invite users", "/#invite"],
}
gear_instructions = """
1. Click on the **gear** (<i class="fa fa-cog"></i>) icon in the upper
right corner of the web or desktop app.
1. Select {item}.
"""
def gear_handle_match(key: str) -> str:
if relative_help_links:
item = f"[{gear_info[key][0]}]({gear_info[key][1]})"
else:
item = f"**{gear_info[key][0]}**"
return gear_instructions.format(item=item)
stream_info = {
"all": ["All streams", "/#streams/all"],
"subscribed": ["Subscribed streams", "/#streams/subscribed"],
}
stream_instructions_no_link = """
1. Click on the **gear** (<i class="fa fa-cog"></i>) icon in the upper
right corner of the web or desktop app.
1. Click **Manage streams**.
"""
def stream_handle_match(key: str) -> str:
if relative_help_links:
return f"1. Go to [{stream_info[key][0]}]({stream_info[key][1]})."
if key == "all":
return stream_instructions_no_link + "\n\n1. Click **All streams** in the upper left."
return stream_instructions_no_link
LINK_TYPE_HANDLERS = {
"gear": gear_handle_match,
"stream": stream_handle_match,
}
class RelativeLinksHelpExtension(Extension):
def extendMarkdown(self, md: Markdown) -> None:
"""Add RelativeLinksHelpExtension to the Markdown instance."""
md.registerExtension(self)
md.preprocessors.register(
RelativeLinks(), "help_relative_links", PREPROCESSOR_PRIORITES["help_relative_links"]
)
relative_help_links: Optional[bool] = None
def set_relative_help_links(value: bool) -> None:
global relative_help_links
relative_help_links = value
class RelativeLinks(Preprocessor):
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if match:
text = [self.handleMatch(match)]
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding, *text, following]
lines = lines[:loc] + text + lines[loc + 1 :]
break
else:
done = True
return lines
def handleMatch(self, match: Match[str]) -> str:
return LINK_TYPE_HANDLERS[match.group("link_type")](match.group("key"))
def makeExtension(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension:
return RelativeLinksHelpExtension(*args, **kwargs)
| 33.280992 | 97 | 0.630246 |
46b9984200cc9b8583b7bbb9fb0b7963293e8855 | 3,304 | py | Python | test/test_cli.py | rob-smallshire/eseries | bfdeecf404e0e8226fb2a8fce97cc5f426420199 | [
"MIT"
] | 6 | 2019-01-28T13:36:39.000Z | 2021-12-03T08:45:47.000Z | test/test_cli.py | rob-smallshire/eseries | bfdeecf404e0e8226fb2a8fce97cc5f426420199 | [
"MIT"
] | 2 | 2020-04-22T21:50:00.000Z | 2021-05-13T07:56:22.000Z | test/test_cli.py | rob-smallshire/eseries | bfdeecf404e0e8226fb2a8fce97cc5f426420199 | [
"MIT"
] | 3 | 2019-01-28T13:36:44.000Z | 2021-07-21T01:56:17.000Z | import os
from eseries.cli import main
def test_nearest(capfd):
code = main("nearest E12 21".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "22\n"
def test_nearest_with_symbol(capfd):
code = main("nearest E12 21000 -s".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "22 k\n"
def test_nearby(capfd):
code = main("nearby E24 21".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "18\n20\n22\n"
def test_gt(capfd):
code = main("gt E24 21".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "22\n"
def test_lt(capfd):
code = main("lt E24 21".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "20\n"
def test_ge(capfd):
code = main("ge E24 22".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "22\n"
def test_le(capfd):
code = main("le E24 22".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "22\n"
def test_tolerance(capfd):
code = main("tolerance E12".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "0.1\n"
def test_tolerance_e12_percent(capfd):
code = main("tolerance E12 --symbol".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "10%\n"
def test_tolerance_e192_percent(capfd):
code = main("tolerance E192 --symbol".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "0.5%\n"
def test_series_e3(capfd):
code = main("series E3".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "10\n22\n47\n"
def test_range_e12(capfd):
code = main("range E12 1700 3400".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "1.8e3\n2.2e3\n2.7e3\n3.3e3\n"
def test_range_e12_symbol(capfd):
code = main("range E12 1700 3400 -s".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "1.8 k\n2.2 k\n2.7 k\n3.3 k\n"
def test_lower_tolerance(capfd):
code = main("lower-tolerance-limit E48 316".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "310\n"
def test_lower_tolerance_zero(capfd):
code = main("lower-tolerance-limit E48 0".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "0\n"
def test_upper_tolerance(capfd):
code = main("upper-tolerance-limit E48 316".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "322\n"
def test_tolerance_limits(capfd):
code = main("tolerance-limits E48 316".split())
out, err = capfd.readouterr()
assert code == os.EX_OK
assert out == "310\n322\n"
def test_bogus_e_series_gives_exit_code_ex_dataerr():
code = main("tolerance-limits E13 316".split())
assert code == os.EX_DATAERR
def test_bogus_value_gives_exit_code_ex_dataerr():
code = main("tolerance-limits E12 FOO".split())
assert code == os.EX_DATAERR
def test_malformed_command_gives_code_ex_usage():
code = main("foo E13 316".split())
assert code == os.EX_USAGE | 24.116788 | 56 | 0.63862 |
76604303c40d4962c59b4dd83c285e28ec3e8b5b | 18,403 | bzl | Python | deps.bzl | AffineCOP26/daml | 6388396bc47ddfe8743f6e076a1929e2d659624c | [
"Apache-2.0"
] | 1 | 2021-04-22T00:23:14.000Z | 2021-04-22T00:23:14.000Z | deps.bzl | AffineCOP26/daml | 6388396bc47ddfe8743f6e076a1929e2d659624c | [
"Apache-2.0"
] | 5 | 2021-06-15T21:44:19.000Z | 2022-02-27T13:59:46.000Z | deps.bzl | Darkstar1t/daml | 802362b59879455fff5187a55b49e341408be0bc | [
"Apache-2.0"
] | 1 | 2021-03-25T09:14:22.000Z | 2021-03-25T09:14:22.000Z | # Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# The dependencies of the daml workspace.
# This allows using the daml workspace externally
# from another bazel workspace.
#
# For example, another Bazel project can depend on
# targets in the daml repository by doing:
# ---
# local_repository(
# name = "com_github_digital_asset_daml",
# path = "/path/to/daml"
# )
# load("@com_github_digital_asset_daml//:deps.bzl", "daml_deps")
# daml_deps()
# ---
#
# A 3rd-party consumer would also need to register relevant
# toolchains and repositories in order to build targets.
# That is, copy some setup calls from WORKSPACE into the
# other WORKSPACE.
#
# Make sure to reference repository local files with the full
# prefix: @com_github_digital_asset_daml//..., as these won't
# be resolvable from external workspaces otherwise.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
rules_scala_version = "67a7ac178a73d1d5ff4c2b0663a8eda6dfcbbc56"
rules_scala_sha256 = "95054009fd938ac7ef53a20619f94a5408d8ae74eb5b318cd150a3ecb1a6086f"
rules_haskell_version = "60ed30aab00e9ffa2e2fe19e59f7de885f029556"
rules_haskell_sha256 = "a9c94b1fb61e1e341b7544305e9b0a359594779f797fddfcfcd447709c7c9820"
rules_nixpkgs_version = "0dd4c8a085b108592b0193ad1e237e2e07f715ac"
rules_nixpkgs_sha256 = "f2073135db911ee94b70da1e2288dd2445976a1b20a1edfe67773b29751f50a9"
buildifier_version = "4.0.0"
buildifier_sha256 = "0d3ca4ed434958dda241fb129f77bd5ef0ce246250feed2d5a5470c6f29a77fa"
zlib_version = "1.2.11"
zlib_sha256 = "629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff"
rules_nodejs_version = "2.3.1"
rules_nodejs_sha256 = "121f17d8b421ce72f3376431c3461cd66bfe14de49059edc7bb008d5aebd16be"
rules_jvm_external_version = "3.3"
rules_jvm_external_sha256 = "d85951a92c0908c80bd8551002d66cb23c3434409c814179c0ff026b53544dab"
rules_go_version = "0.23.6"
rules_go_sha256 = "8663604808d2738dc615a2c3eb70eba54a9a982089dd09f6ffe5d0e75771bc4f"
rules_bazel_common_version = "9e3880428c1837db9fb13335ed390b7e33e346a7"
rules_bazel_common_sha256 = "48a209fed9575c9d108eaf11fb77f7fe6178a90135e4d60cac6f70c2603aa53a"
# Recent davl.
davl_version = "f2d7480d118f32626533d6a150a8ee7552cc0222" # 2020-03-23, "Deploy upgrade to SDK 0.13.56-snapshot.20200318",https://github.com/digital-asset/davl/pull/233/commits.
davl_sha256 = "3e8ae2a05724093e33b7f0363381e81a7e8e9655ccb3aa47ad540ea87e814321"
# Pinned davl relied on by damlc packaging tests.
davl_v3_version = "51d3977be2ab22f7f4434fd4692ca2e17a7cce23"
davl_v3_sha256 = "e8e76e21b50fb3adab36df26045b1e8c3ee12814abc60f137d39b864d2eae166"
# daml cheat sheet
daml_cheat_sheet_version = "5ae141096d7fc0031392206e80f71f7dc3b23e1c" # 2021-03-11
daml_cheat_sheet_sha256 = "e51651b34bc67704c8f6995207982f9e87758460246c2132dd8fe524277f612b"
platforms_version = "0.0.3"
platforms_sha256 = "15b66b5219c03f9e8db34c1ac89c458bb94bfe055186e5505d5c6f09cb38307f"
def daml_deps():
if "platforms" not in native.existing_rules():
http_archive(
name = "platforms",
sha256 = platforms_sha256,
strip_prefix = "platforms-{}".format(platforms_version),
urls = ["https://github.com/bazelbuild/platforms/archive/{version}.tar.gz".format(version = platforms_version)],
)
if "rules_haskell" not in native.existing_rules():
http_archive(
name = "rules_haskell",
strip_prefix = "rules_haskell-%s" % rules_haskell_version,
urls = ["https://github.com/tweag/rules_haskell/archive/%s.tar.gz" % rules_haskell_version],
patches = [
# Update and remove this patch once this is upstreamed.
# See https://github.com/tweag/rules_haskell/pull/1281
"@com_github_digital_asset_daml//bazel_tools:haskell-strict-source-names.patch",
# The fake libs issue should be fixed in upstream rules_haskell
# or GHC. Remove this patch once that's available.
"@com_github_digital_asset_daml//bazel_tools:haskell-windows-remove-fake-libs.patch",
# This is a daml specific patch and not upstreamable.
"@com_github_digital_asset_daml//bazel_tools:haskell-windows-extra-libraries.patch",
# This should be made configurable in rules_haskell.
# Remove this patch once that's available.
"@com_github_digital_asset_daml//bazel_tools:haskell-opt.patch",
# Can be removed once https://github.com/tweag/rules_haskell/pull/1464 is merged.
"@com_github_digital_asset_daml//bazel_tools:haskell-cc-wrapper-windows.patch",
],
patch_args = ["-p1"],
sha256 = rules_haskell_sha256,
)
if "io_tweag_rules_nixpkgs" not in native.existing_rules():
http_archive(
name = "io_tweag_rules_nixpkgs",
strip_prefix = "rules_nixpkgs-%s" % rules_nixpkgs_version,
urls = ["https://github.com/tweag/rules_nixpkgs/archive/%s.tar.gz" % rules_nixpkgs_version],
sha256 = rules_nixpkgs_sha256,
patches = [
# On CI and locally we observe occasional segmantation faults
# of nix. A known issue since Nix 2.2.2 is that HTTP2 support
# can cause such segmentation faults. Since Nix 2.3.2 it is
# possible to disable HTTP2 via a command-line flag, which
# reportedly solves the issue. See
# https://github.com/NixOS/nix/issues/2733#issuecomment-518324335
"@com_github_digital_asset_daml//bazel_tools:nixpkgs-disable-http2.patch",
# Already upstreamed to rules-nixpkgs. Remove on next upgrade.
"@com_github_digital_asset_daml//bazel_tools:rules-nixpkgs-llvm-cov.patch",
],
patch_args = ["-p1"],
)
if "com_github_madler_zlib" not in native.existing_rules():
http_archive(
name = "com_github_madler_zlib",
build_file = "@com_github_digital_asset_daml//3rdparty/c:zlib.BUILD",
strip_prefix = "zlib-{}".format(zlib_version),
urls = ["https://github.com/madler/zlib/archive/v{}.tar.gz".format(zlib_version)],
sha256 = zlib_sha256,
)
if "io_bazel_rules_go" not in native.existing_rules():
http_archive(
name = "io_bazel_rules_go",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz".format(version = rules_go_version),
"https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz".format(version = rules_go_version),
],
sha256 = rules_go_sha256,
)
if "rules_jvm_external" not in native.existing_rules():
http_archive(
name = "rules_jvm_external",
strip_prefix = "rules_jvm_external-{}".format(rules_jvm_external_version),
sha256 = rules_jvm_external_sha256,
url = "https://github.com/bazelbuild/rules_jvm_external/archive/{}.zip".format(rules_jvm_external_version),
)
if "io_bazel_rules_scala" not in native.existing_rules():
http_archive(
name = "io_bazel_rules_scala",
url = "https://github.com/bazelbuild/rules_scala/archive/%s.zip" % rules_scala_version,
type = "zip",
strip_prefix = "rules_scala-%s" % rules_scala_version,
sha256 = rules_scala_sha256,
patches = [
"@com_github_digital_asset_daml//bazel_tools:scala-escape-jvmflags.patch",
],
patch_args = ["-p1"],
)
if "com_google_protobuf" not in native.existing_rules():
http_archive(
name = "com_google_protobuf",
sha256 = "bf0e5070b4b99240183b29df78155eee335885e53a8af8683964579c214ad301",
# changing this version needs to be in sync with protobuf-java and grpc dependencies in bazel-java-bdeps.bzl
strip_prefix = "protobuf-3.14.0",
urls = ["https://github.com/google/protobuf/archive/v3.14.0.zip"],
patches = [
"@com_github_digital_asset_daml//bazel_tools:protobuf-win32.patch",
],
patch_args = ["-p1"],
)
if "bazel_gazelle" not in native.existing_rules():
http_archive(
name = "bazel_gazelle",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
],
sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f",
)
if "io_bazel_rules_sass" not in native.existing_rules():
http_archive(
name = "io_bazel_rules_sass",
sha256 = "7b9c9a88099d00dbb16be359c3b1946309d99673220c6b39c7e8bda8ecc692f8",
strip_prefix = "rules_sass-1.24.4",
urls = [
"https://github.com/bazelbuild/rules_sass/archive/1.24.4.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_sass/archive/1.24.4.zip",
],
)
# Fetch rules_nodejs so we can install our npm dependencies
if "build_bazel_rules_nodejs" not in native.existing_rules():
http_archive(
name = "build_bazel_rules_nodejs",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/{}/rules_nodejs-{}.tar.gz".format(rules_nodejs_version, rules_nodejs_version)],
sha256 = rules_nodejs_sha256,
patches = [
# Work around for https://github.com/bazelbuild/rules_nodejs/issues/1565
"@com_github_digital_asset_daml//bazel_tools:rules_nodejs_npm_cli_path.patch",
"@com_github_digital_asset_daml//bazel_tools:rules_nodejs_node_dependency.patch",
],
patch_args = ["-p1"],
)
if "upb" not in native.existing_rules():
# upb is a dependency of com_github_grpc_grpc.
# It is usually pulled in automatically by grpc_deps(), but depend on it explicitly to patch it.
# This http_archive can be removed when we no longer need to patch upb.
http_archive(
name = "upb",
sha256 = "c0b97bf91dfea7e8d7579c24e2ecdd02d10b00f3c5defc3dce23d95100d0e664",
strip_prefix = "upb-60607da72e89ba0c84c84054d2e562d8b6b61177",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/protocolbuffers/upb/archive/60607da72e89ba0c84c84054d2e562d8b6b61177.tar.gz",
"https://github.com/protocolbuffers/upb/archive/60607da72e89ba0c84c84054d2e562d8b6b61177.tar.gz",
],
)
if "com_github_grpc_grpc" not in native.existing_rules():
http_archive(
name = "com_github_grpc_grpc",
strip_prefix = "grpc-1.36.0",
urls = ["https://github.com/grpc/grpc/archive/v1.36.0.tar.gz"],
sha256 = "1a5127c81487f4e3e57973bb332f04b9159f94d860c207e096d8a587d371edbd",
patches = [
"@com_github_digital_asset_daml//bazel_tools:grpc-bazel-mingw.patch",
],
patch_args = ["-p1"],
)
if "com_google_absl" not in native.existing_rules():
http_archive(
name = "com_google_absl",
sha256 = "3d74cdc98b42fd4257d91f652575206de195e2c824fcd8d6e6d227f85cb143ef",
strip_prefix = "abseil-cpp-0f3bb466b868b523cf1dc9b2aaaed65c77b28862",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/abseil/abseil-cpp/archive/0f3bb466b868b523cf1dc9b2aaaed65c77b28862.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/0f3bb466b868b523cf1dc9b2aaaed65c77b28862.tar.gz",
],
patches = [
"@com_github_digital_asset_daml//bazel_tools:absl-mingw.patch",
],
patch_args = ["-p1"],
)
if "io_grpc_grpc_java" not in native.existing_rules():
http_archive(
name = "io_grpc_grpc_java",
strip_prefix = "grpc-java-1.35.0",
urls = ["https://github.com/grpc/grpc-java/archive/v1.35.0.tar.gz"],
sha256 = "537d01bdc5ae2bdb267853a75578d671db3075b33e3a00a93f5a572191d3a7b3",
patch_args = ["-p1"],
)
if "com_github_johnynek_bazel_jar_jar" not in native.existing_rules():
http_archive(
name = "com_github_johnynek_bazel_jar_jar",
sha256 = "841ae424eec3f322d411eb49d949622cc84787cb4189a30698fa9adadb98deac",
strip_prefix = "bazel_jar_jar-20dbf71f09b1c1c2a8575a42005a968b38805519",
urls = ["https://github.com/johnynek/bazel_jar_jar/archive/20dbf71f09b1c1c2a8575a42005a968b38805519.zip"], # Latest commit SHA as at 2019/02/13
)
if "com_github_googleapis_googleapis" not in native.existing_rules():
http_archive(
name = "com_github_googleapis_googleapis",
strip_prefix = "googleapis-6c48ab5aef47dc14e02e2dc718d232a28067129d",
urls = ["https://github.com/googleapis/googleapis/archive/6c48ab5aef47dc14e02e2dc718d232a28067129d.tar.gz"],
sha256 = "70d7be6ad49b4424313aad118c8622aab1c5fdd5a529d4215d3884ff89264a71",
)
if "com_github_bazelbuild_remote_apis" not in native.existing_rules():
http_archive(
name = "com_github_bazelbuild_remote_apis",
strip_prefix = "remote-apis-2.0.0",
urls = ["https://github.com/bazelbuild/remote-apis/archive/v2.0.0.tar.gz"],
sha256 = "79204ed1fa385c03b5235f65b25ced6ac51cf4b00e45e1157beca6a28bdb8043",
patches = ["@com_github_digital_asset_daml//:bazel_tools/remote_apis_no_services.patch"],
patch_args = ["-p1"],
)
# Buildifier.
# It is written in Go and hence needs rules_go to be available.
if "com_github_bazelbuild_buildtools" not in native.existing_rules():
http_archive(
name = "com_github_bazelbuild_buildtools",
sha256 = buildifier_sha256,
strip_prefix = "buildtools-{}".format(buildifier_version),
url = "https://github.com/bazelbuild/buildtools/archive/{}.tar.gz".format(buildifier_version),
)
native.bind(
name = "guava",
actual = "@com_google_guava_guava//jar",
)
native.bind(
name = "gson",
actual = "@com_google_code_gson_gson//jar",
)
if "com_github_google_bazel_common" not in native.existing_rules():
http_archive(
name = "com_github_google_bazel_common",
sha256 = rules_bazel_common_sha256,
strip_prefix = "bazel-common-{}".format(rules_bazel_common_version),
urls = ["https://github.com/google/bazel-common/archive/{}.zip".format(rules_bazel_common_version)],
)
maybe(
http_archive,
name = "rules_pkg",
urls = [
"https://github.com/bazelbuild/rules_pkg/releases/download/0.2.6-1/rules_pkg-0.2.6.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.2.6/rules_pkg-0.2.6.tar.gz",
],
sha256 = "aeca78988341a2ee1ba097641056d168320ecc51372ef7ff8e64b139516a4937",
)
if "com_github_grpc_ecosystem_grpc_health_probe_binary" not in native.existing_rules():
http_file(
name = "com_github_grpc_ecosystem_grpc_health_probe_binary",
sha256 = "bfbe82e34645e91cdf3bacbb0d2dc7786f3c3cc4da6b64a446e5fdfb7bb0429f",
downloaded_file_path = "grpc-health-probe",
urls = [
"https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.3.1/grpc_health_probe-linux-amd64",
],
executable = True,
)
if "davl-v3" not in native.existing_rules():
http_archive(
name = "davl-v3",
strip_prefix = "davl-{}".format(davl_v3_version),
urls = ["https://github.com/digital-asset/davl/archive/{}.tar.gz".format(davl_v3_version)],
sha256 = davl_v3_sha256,
build_file_content = """
package(default_visibility = ["//visibility:public"])
exports_files(["released/davl-v3.dar"])
""",
)
if "davl" not in native.existing_rules():
http_archive(
name = "davl",
strip_prefix = "davl-{}".format(davl_version),
urls = ["https://github.com/digital-asset/davl/archive/{}.tar.gz".format(davl_version)],
sha256 = davl_sha256,
build_file_content = """
package(default_visibility = ["//visibility:public"])
exports_files(["released/davl-v4.dar", "released/davl-v5.dar", "released/davl-upgrade-v3-v4.dar", "released/davl-upgrade-v4-v5.dar"])
""",
)
if "daml-cheat-sheet" not in native.existing_rules():
http_archive(
name = "daml-cheat-sheet",
strip_prefix = "daml-cheat-sheet-{}".format(daml_cheat_sheet_version),
urls = ["https://github.com/digital-asset/daml-cheat-sheet/archive/{}.tar.gz".format(daml_cheat_sheet_version)],
sha256 = daml_cheat_sheet_sha256,
build_file_content = """
package(default_visibility = ["//visibility:public"])
genrule(
name = "site",
srcs = ["_config.yml"] + glob(["**/*"],
exclude = ["_config.yml", "LICENSE", "WORKSPACE", "BUILD.bazel", "README.md"]),
outs = ["cheat-sheet.tar.gz"],
tools = ["@jekyll_nix//:bin/jekyll"],
cmd = '''
DIR=$$(dirname $(execpath _config.yml))
$(execpath @jekyll_nix//:bin/jekyll) build -s $$DIR
tar hc _site \\\\
--owner=1000 \\\\
--group=1000 \\\\
--mtime=2000-01-01\\\\ 00:00Z \\\\
--no-acls \\\\
--no-xattrs \\\\
--no-selinux \\\\
--sort=name \\\\
| gzip -n > $(OUTS)
''',
)
""",
)
| 47.552972 | 178 | 0.662229 |
63bdfee8d51b739e428cf17293d4511602c2874d | 1,403 | py | Python | gamestonk_terminal/discovery/alpha_vantage_view.py | alexgallego1997/GamestonkTerminal | 1c6ce5c99111aa7195c51f6930fcdbb9dadd2f00 | [
"MIT"
] | 3 | 2021-04-13T06:26:46.000Z | 2022-01-26T05:11:22.000Z | gamestonk_terminal/discovery/alpha_vantage_view.py | lolrenx/GamestonkTerminal | eb2b0d766bf1b6bb8656d6733083962efb152fe2 | [
"MIT"
] | 2 | 2021-04-17T10:24:06.000Z | 2021-04-17T10:44:59.000Z | gamestonk_terminal/discovery/alpha_vantage_view.py | lolrenx/GamestonkTerminal | eb2b0d766bf1b6bb8656d6733083962efb152fe2 | [
"MIT"
] | null | null | null | """ Alpha Vantage View """
__docformat__ = "numpy"
import argparse
from typing import List
import matplotlib.pyplot as plt
from alpha_vantage.sectorperformance import SectorPerformances
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.helper_funcs import parse_known_args_and_warn
from gamestonk_terminal import feature_flags as gtff
def sectors_view(other_args: List[str]):
"""Opens a bar chart with sector performance
Parameters
----------
other_args : List[str]
argparse other args
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="sectors",
description="""
Real-time and historical sector performances calculated from
S&P500 incumbents. Pops plot in terminal. [Source: Alpha Vantage]
""",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
sector_perf = SectorPerformances(
key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas"
)
# pylint: disable=unbalanced-tuple-unpacking
df_sectors, _ = sector_perf.get_sector()
# pylint: disable=invalid-sequence-index
df_sectors["Rank A: Real-Time Performance"].plot(kind="bar")
plt.title("Real Time Performance (%) per Sector")
plt.tight_layout()
plt.grid()
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
| 27.509804 | 77 | 0.692088 |
5a352ec6003f8c55cb994f8ce69fc844174fe0dc | 3,587 | py | Python | app/controllers/admin/board.py | meongbego/IOT_ADRINI | 0923b86a9d1da5d6859b70726ad1e041aecc97b2 | [
"MIT"
] | 1 | 2019-07-27T12:17:23.000Z | 2019-07-27T12:17:23.000Z | app/controllers/admin/board.py | meongbego/ADRINI_IOT_PLATFORM | 0923b86a9d1da5d6859b70726ad1e041aecc97b2 | [
"MIT"
] | 4 | 2021-04-18T11:41:31.000Z | 2021-06-01T23:12:19.000Z | app/controllers/admin/board.py | sofyan48/ADRINI_IOT_PLATFORM | 0923b86a9d1da5d6859b70726ad1e041aecc97b2 | [
"MIT"
] | null | null | null | from flask_restful import Resource, reqparse, fields
from app.helpers.rest import *
from app.helpers.memcache import *
from app.models import model as db
from app.middlewares.auth import admin_required
class BoardResource(Resource):
@admin_required
def get(self):
obj_userdata = list()
try:
results = db.get_all("tb_board")
except Exception:
return response(200, message="Data Not Found")
else:
for i in results :
data = {
"id_board": str(i['id_board']),
"serial" : i['serial_board'],
"nm_board" : i['nm_board']
}
obj_userdata.append(data)
return response(200, data=obj_userdata)
class BoardResourceById(Resource):
@admin_required
def get(self, id_board):
obj_userdata = []
results = db.get_by_id(
table="tb_board",
field="id_board",
value=id_board
)
for i in results :
data = {
"id_board": str(i['id_board']),
"serial" : i['serial_board'],
"nm_board" : i['nm_board']
}
obj_userdata.append(data)
return response(200, data=obj_userdata)
class BoardInsert(Resource):
@admin_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('nm_board', type=str, required=True)
parser.add_argument('serial_board', type=str, required=True)
args = parser.parse_args()
data_insert = {
"nm_board" : args['nm_board'],
"serial_board" : args['serial_board']
}
try:
result = db.insert(table="tb_board", data=data_insert)
except Exception as e:
message = {
"status": False,
"error": str(e)
}
return response(200, message=message)
else:
respon = {
"data": data_insert,
"id" : result
}
return response(200, data=respon)
class BoardRemove(Resource):
@admin_required
def delete(self, id_board):
try:
db.delete(
table="tb_board",
field='id_board',
value=id_board
)
except Exception as e:
message = {
"status": False,
"error": str(e)
}
else:
message = "removing"
finally:
return response(200, message=message)
class BoardUpdate(Resource):
@admin_required
def put(self, id_board):
parser = reqparse.RequestParser()
parser.add_argument('nm_board', type=str, required=True)
parser.add_argument('serial_board', type=str, required=True)
args = parser.parse_args()
data = {
"where":{
"id_board": id_board
},
"data":{
"nm_board" : args['nm_board'],
"serial_board" : args['serial_board']
}
}
try:
db.update("tb_board", data=data)
except Exception as e:
message = {
"status": False,
"error": str(e)
}
else:
message = {
"status": True,
"data": data
}
finally:
return response(200, message=message)
| 28.023438 | 68 | 0.489546 |
aa8c79eea86455da6bc4193edf55bf458cdf84ee | 4,256 | py | Python | ns/shaper/token_bucket.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | 3 | 2021-06-17T01:57:43.000Z | 2021-12-16T11:53:31.000Z | ns/shaper/token_bucket.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | null | null | null | ns/shaper/token_bucket.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | null | null | null | """
Models an ideal token bucket shaper.
"""
import simpy
class TokenBucketShaper:
""" The token bucket size should be greater than the size of the largest packet that
can occur on input. If this is not the case we always accumulate enough tokens to let
the current packet pass based on the average rate. This may not be the behavior you desire.
Parameters
----------
env : simpy.Environment
the simulation environment
rate : float
the token arrival rate in bits
b_size : Number
a token bucket size in bytes
peak : Number or None for infinite peak
the peak sending rate of the buffer (quickest time two packets could be sent)
"""
def __init__(self,
env,
rate,
b_size,
peak=None,
zero_buffer=False,
zero_downstream_buffer=False,
debug=False):
self.store = simpy.Store(env)
self.rate = rate
self.env = env
self.out = None
self.packets_received = 0
self.packets_sent = 0
self.b_size = b_size
self.peak = peak
self.upstream_updates = {}
self.upstream_stores = {}
self.zero_buffer = zero_buffer
self.zero_downstream_buffer = zero_downstream_buffer
if self.zero_downstream_buffer:
self.downstream_stores = simpy.Store(env)
self.current_bucket = b_size # Current size of the bucket in bytes
self.update_time = 0.0 # Last time the bucket was updated
self.debug = debug
self.busy = 0 # Used to track if a packet is currently being sent ?
self.action = env.process(
self.run()) # starts the run() method as a SimPy process
def update(self, packet):
if self.zero_buffer:
self.upstream_stores[packet].get()
del self.upstream_stores[packet]
self.upstream_updates[packet](packet)
del self.upstream_updates[packet]
if self.debug:
print(
f"Sent packet {packet.id} from flow {packet.flow_id} with color {packet.color}"
)
def run(self):
while True:
if self.zero_downstream_buffer:
packet = yield self.downstream_stores.get()
else:
packet = yield self.store.get()
self.update(packet)
now = self.env.now
self.current_bucket = min(
self.b_size, self.current_bucket + self.rate *
(now - self.update_time) / 8.0)
self.update_time = now
if packet.size > self.current_bucket:
yield self.env.timeout(
(packet.size - self.current_bucket) * 8.0 / self.rate)
self.current_bucket = 0.0
self.update_time = self.env.now
else:
self.current_bucket -= packet.size
self.update_time = self.env.now
if not self.peak:
if self.zero_downstream_buffer:
self.out.put(packet,
upstream_update=self.update,
upstream_store=self.store)
else:
self.out.put(packet)
else:
yield self.env.timeout(packet.size * 8.0 / self.rate)
if self.zero_downstream_buffer:
self.out.put(packet,
upstream_update=self.update,
upstream_store=self.store)
else:
self.out.put(packet)
self.packets_sent += 1
if self.debug:
print(packet)
def put(self, packet, upstream_update=None, upstream_store=None):
self.packets_received += 1
if self.zero_buffer and upstream_update is not None and upstream_store is not None:
self.upstream_stores[packet] = upstream_store
self.upstream_updates[packet] = upstream_update
if self.zero_downstream_buffer:
self.downstream_stores.put(packet)
return self.store.put(packet)
# todo: two rate token bucket | 35.173554 | 95 | 0.56109 |
9d9969b7aab1ac38d2346f26ad69bde4c77e0878 | 5,281 | py | Python | App/wizard/prefs/chat_archives.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | App/wizard/prefs/chat_archives.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | App/wizard/prefs/chat_archives.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | from wizard.prefs.main import prefs
from wizard.tools import log
from wizard.vars import defaults
from wizard.tools import utility as utils
import importlib
importlib.reload(defaults)
import yaml
import os
import shutil
import sys
prefs = prefs()
logger = log.pipe_log(__name__)
class chat_archives():
def __init__(self):
self.database = utils.database()
self.init_shared_folder()
self.users = prefs.project_users
if self.is_file():
self.read()
else:
self.init_settings()
def init_settings(self):
self.archives_dic = dict()
self.archives_dic[defaults._chat_messages_] = dict()
self.archives_dic[defaults._chat_rooms_] = dict()
self.archives_dic[defaults._chat_rooms_messages_] = dict()
self.write()
def add_message(self, message_key, msg_dic):
self.read()
self.archives_dic[defaults._chat_messages_][message_key] = msg_dic
self.add_msg_id_to_room(message_key, msg_dic)
self.write()
def remove_message(self, message_key):
self.read()
if message_key in self.archives_dic[defaults._chat_messages_].keys():
del self.archives_dic[defaults._chat_messages_][message_key]
self.remove_msg_id_from_room(message_key)
self.write()
def add_msg_id_to_room(self, msg_id, msg_dic):
if msg_dic[defaults._chat_destination_] in self.users:
key = self.get_user_couple_key(msg_dic[defaults._chat_destination_])
else:
key = msg_dic[defaults._chat_destination_]
if key not in self.archives_dic[defaults._chat_rooms_messages_].keys():
self.archives_dic[defaults._chat_rooms_messages_][key] = list()
self.archives_dic[defaults._chat_rooms_messages_][key].append(msg_id)
def remove_msg_id_from_room(self, message_key):
for room in self.archives_dic[defaults._chat_rooms_messages_].keys():
if message_key in self.archives_dic[defaults._chat_rooms_messages_][room]:
self.archives_dic[defaults._chat_rooms_messages_][room].remove(message_key)
def get_user_couple_key(self, user):
user_couple = [user, prefs.user]
user_couple.sort()
user_couple_key = '|'.join(user_couple)
return user_couple_key
def create_room(self, room_name):
self.read()
if room_name not in self.archives_dic[defaults._chat_rooms_]:
self.archives_dic[defaults._chat_rooms_][room_name] = dict()
self.archives_dic[defaults._chat_rooms_][room_name][defaults._users_list_key_] = []
self.write()
return 1
else:
logger.warning("Room already exists")
return None
def init_shared_folder(self):
self.shared_folder = os.path.join(prefs.project_path, defaults._shared_folder_)
if not os.path.isdir(self.shared_folder):
os.makedirs(self.shared_folder)
def add_file_to_shared(self, file):
if os.path.isfile(file):
filename = os.path.basename(file)
#filename, extension = os.path.splitext(filename)
#new_filename = "{}{}".format(utils.random_string(), extension)
new_file = utils.get_filename_without_override(os.path.join(self.shared_folder, filename))
print(new_file)
shutil.copyfile(file, new_file)
return new_file
else:
logger.warning("The file doesn't exists")
return None
def get_messages(self):
if self.read():
return self.archives_dic[defaults._chat_messages_]
else:
return None
def get_room_last_ids(self, room, number=10):
if room not in self.users:
key = room
else:
key = self.get_user_couple_key(room)
if key in self.archives_dic[defaults._chat_rooms_messages_].keys():
return self.archives_dic[defaults._chat_rooms_messages_][key][-number:]
else:
return []
def get_rooms(self):
if self.read():
return list(self.archives_dic[defaults._chat_rooms_].keys())
else:
return None
def get_current_project_chat_archives_file(self):
# Get the pref file from the current project
# (from user prefs)
project_path = prefs.project_path
if project_path:
file = os.path.join(project_path, defaults._chat_archives_)
# Write the pref_path in console (debug level)
logger.debug(file)
return file
else:
return 0
def write(self):
self.database.write(0, self.file, self.archives_dic)
def read(self):
if self.is_file():
self.archives_dic = self.database.read(0, self.file)
return 1
else:
logger.warning("Prefs file doesn't exist : {}".format(os.path.abspath(self.file)))
return None
def is_file(self):
self.file = self.get_current_project_chat_archives_file()
# Check the pref_file presence
if self.database.isfile(2, self.file):
return 1
else:
logger.warning("Can't find chat archives")
return 0
| 33.852564 | 102 | 0.641356 |
fd019416b6983cbb3b56fe36d6ec6d4e82954782 | 1,039 | py | Python | Dependencies/pop_info_generator.py | laneatmore/model_admix | dc1a894700fea38f21eb17df269d7a87a7f272ea | [
"MIT"
] | null | null | null | Dependencies/pop_info_generator.py | laneatmore/model_admix | dc1a894700fea38f21eb17df269d7a87a7f272ea | [
"MIT"
] | null | null | null | Dependencies/pop_info_generator.py | laneatmore/model_admix | dc1a894700fea38f21eb17df269d7a87a7f272ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#generates pop id's
import subprocess
import sys
import os
import fileinput
os.chdir("../Admixture/")
N1 = int(sys.argv[1])
N2 = int(sys.argv[2])
N3 = int(sys.argv[3])
def new_file(N1, N2, N3):
if (len(sys.argv) < 4):
print("not enough arguments")
exit()
elif (len(sys.argv) > 5):
print("too many arguments")
exit()
else:
pass
with open("population_information.txt", "w") as file:
file.write(str("ID col2 col3 POP REGION\n"))
for i in range(N1):
my_id = "msp_" + str(i)
file.write(my_id + str(" 0 PAR1 0 PAR1\n"))
for i in range(N2):
my_id2 = "msp_" + str(N1 + i)
file.write(my_id2 + str(" 1 PAR2 1 PAR2\n"))
for i in range(N3):
my_id3 = "msp_" + str(N1 + N2 + i)
file.write(my_id3 + str(" 2 ADM 2 ADM\n"))
with fileinput.FileInput("population_information.txt", inplace = True,
backup=".bak") as file:
for line in file:
print(line.replace("msp_0", "msp_00"), end='')
new_file(N1, N2, N3)
| 22.586957 | 71 | 0.588065 |
be8d4d06e5dc1740ab9480f626411f823bfbba69 | 14,287 | py | Python | sympy/combinatorics/tests/test_permutations.py | eriknw/sympy | b7544e2bb74c011f6098a7e886fd77f41776c2c4 | [
"BSD-3-Clause"
] | 7 | 2015-01-14T06:55:33.000Z | 2018-08-11T14:43:52.000Z | sympy/combinatorics/tests/test_permutations.py | pbeltran/sympy-1 | 94f92b36731c2bebe6de1037c063c2a258a8a399 | [
"BSD-3-Clause"
] | 1 | 2018-02-19T04:56:04.000Z | 2018-02-19T04:56:04.000Z | sympy/combinatorics/tests/test_permutations.py | pbeltran/sympy-1 | 94f92b36731c2bebe6de1037c063c2a258a8a399 | [
"BSD-3-Clause"
] | 1 | 2016-04-24T14:39:22.000Z | 2016-04-24T14:39:22.000Z | from sympy.combinatorics.permutations import (Permutation, _af_parity,
_af_rmul, _af_rmuln, Cycle)
from sympy.core.compatibility import permutations
from sympy.utilities.pytest import raises
rmul = Permutation.rmul
def test_Permutation():
# don't auto fill 0
raises(ValueError, lambda: Permutation([1]))
p = Permutation([0, 1, 2, 3])
# call as bijective
assert [p(i) for i in range(p.size)] == list(p)
# call as operator
assert p(range(p.size)) == list(p)
# call as function
assert list(p(1, 2)) == [0, 2, 1, 3]
# conversion to list
assert list(p) == range(4)
assert Permutation(size=4) == Permutation(3)
assert Permutation(Permutation(3), size=5) == Permutation(4)
# cycle form with size
assert Permutation([[1, 2]], size=4) == Permutation([[1, 2], [0], [3]])
# random generation
assert Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1]))
p = Permutation([2, 5, 1, 6, 3, 0, 4])
q = Permutation([[1], [0, 3, 5, 6, 2, 4]])
assert len(set([p, p])) == 1
r = Permutation([1, 3, 2, 0, 4, 6, 5])
ans = Permutation(_af_rmuln(*[w.array_form for w in (p, q, r)])).array_form
assert rmul(p, q, r).array_form == ans
# make sure no other permutation of p, q, r could have given
# that answer
for a, b, c in permutations((p, q, r)):
if (a, b, c) == (p, q, r):
continue
assert rmul(a, b, c).array_form != ans
assert p.support() == range(7)
assert q.support() == [0, 2, 3, 4, 5, 6]
assert Permutation(p.cyclic_form).array_form == p.array_form
assert p.cardinality == 5040
assert q.cardinality == 5040
assert q.cycles == 2
assert rmul(q, p) == Permutation([4, 6, 1, 2, 5, 3, 0])
assert rmul(p, q) == Permutation([6, 5, 3, 0, 2, 4, 1])
assert _af_rmul(p.array_form, q.array_form) == \
[6, 5, 3, 0, 2, 4, 1]
assert rmul(Permutation([[1, 2, 3], [0, 4]]),
Permutation([[1, 2, 4], [0], [3]])).cyclic_form == \
[[0, 4, 2], [1, 3]]
assert q.array_form == [3, 1, 4, 5, 0, 6, 2]
assert q.cyclic_form == [[0, 3, 5, 6, 2, 4]]
assert q.full_cyclic_form == [[0, 3, 5, 6, 2, 4], [1]]
assert p.cyclic_form == [[0, 2, 1, 5], [3, 6, 4]]
t = p.transpositions()
assert t == [(0, 5), (0, 1), (0, 2), (3, 4), (3, 6)]
assert Permutation.rmul(*[Permutation(Cycle(*ti)) for ti in (t)])
assert Permutation([1, 0]).transpositions() == [(0, 1)]
assert p**13 == p
assert q**0 == Permutation(range(q.size))
assert q**-2 == ~q**2
assert q**2 == Permutation([5, 1, 0, 6, 3, 2, 4])
assert q**3 == q**2*q
assert q**4 == q**2*q**2
a = Permutation(1, 3)
b = Permutation(2, 0, 3)
I = Permutation(3)
assert ~a == a**-1
assert a*~a == I
assert a*b**-1 == a*~b
ans = Permutation(0, 5, 3, 1, 6)(2, 4)
assert (p + q.rank()).rank() == ans.rank()
assert (p + q.rank())._rank == ans.rank()
assert (q + p.rank()).rank() == ans.rank()
raises(TypeError, lambda: p + Permutation(range(10)))
assert (p - q.rank()).rank() == Permutation(0, 6, 3, 1, 2, 5, 4).rank()
assert p.rank() - q.rank() < 0 # for coverage: make sure mod is used
assert (q - p.rank()).rank() == Permutation(1, 4, 6, 2)(3, 5).rank()
assert p*q == Permutation(_af_rmuln(*[list(w) for w in (q, p)]))
assert p*Permutation([]) == p
assert Permutation([])*p == p
assert p*Permutation([[0, 1]]) == Permutation([2, 5, 0, 6, 3, 1, 4])
assert Permutation([[0, 1]])*p == Permutation([5, 2, 1, 6, 3, 0, 4])
pq = p ^ q
assert pq == Permutation([5, 6, 0, 4, 1, 2, 3])
assert pq == rmul(q, p, ~q)
qp = q ^ p
assert qp == Permutation([4, 3, 6, 2, 1, 5, 0])
assert qp == rmul(p, q, ~p)
raises(ValueError, lambda: p ^ Permutation([]))
assert p.commutator(q) == Permutation(0, 1, 3, 4, 6, 5, 2)
assert q.commutator(p) == Permutation(0, 2, 5, 6, 4, 3, 1)
assert p.commutator(q) == ~q.commutator(p)
raises(ValueError, lambda: p.commutator(Permutation([])))
assert len(p.atoms()) == 7
assert q.atoms() == set([0, 1, 2, 3, 4, 5, 6])
assert p.inversion_vector() == [2, 4, 1, 3, 1, 0]
assert q.inversion_vector() == [3, 1, 2, 2, 0, 1]
assert Permutation.from_inversion_vector(p.inversion_vector()) == p
assert Permutation.from_inversion_vector(q.inversion_vector()).array_form\
== q.array_form
raises(ValueError, lambda: Permutation.from_inversion_vector([0, 2]))
assert Permutation([i for i in range(500, -1, -1)]).inversions() == 125250
s = Permutation([0, 4, 1, 3, 2])
assert s.parity() == 0
_ = s.cyclic_form # needed to create a value for _cyclic_form
assert len(s._cyclic_form) != s.size and s.parity() == 0
assert not s.is_odd
assert s.is_even
assert Permutation([0, 1, 4, 3, 2]).parity() == 1
assert _af_parity([0, 4, 1, 3, 2]) == 0
assert _af_parity([0, 1, 4, 3, 2]) == 1
s = Permutation([0])
assert s.is_Singleton
assert Permutation([]).is_Empty
r = Permutation([3, 2, 1, 0])
assert (r**2).is_Identity
assert rmul(~p, p).is_Identity
assert (~p)**13 == Permutation([5, 2, 0, 4, 6, 1, 3])
assert ~(r**2).is_Identity
assert p.max() == 6
assert p.min() == 0
q = Permutation([[6], [5], [0, 1, 2, 3, 4]])
assert q.max() == 4
assert q.min() == 0
p = Permutation([1, 5, 2, 0, 3, 6, 4])
q = Permutation([[1, 2, 3, 5, 6], [0, 4]])
assert p.ascents() == [0, 3, 4]
assert q.ascents() == [1, 2, 4]
assert r.ascents() == []
assert p.descents() == [1, 2, 5]
assert q.descents() == [0, 3, 5]
assert Permutation(r.descents()).is_Identity
assert p.inversions() == 7
# test the merge-sort with a longer permutation
big = list(p) + list(range(p.max() + 1, p.max() + 130))
assert Permutation(big).inversions() == 7
assert p.signature() == -1
assert q.inversions() == 11
assert q.signature() == -1
assert rmul(p, ~p).inversions() == 0
assert rmul(p, ~p).signature() == 1
assert p.order() == 6
assert q.order() == 10
assert (p**(p.order())).is_Identity
assert p.length() == 6
assert q.length() == 7
assert r.length() == 4
assert p.runs() == [[1, 5], [2], [0, 3, 6], [4]]
assert q.runs() == [[4], [2, 3, 5], [0, 6], [1]]
assert r.runs() == [[3], [2], [1], [0]]
assert p.index() == 8
assert q.index() == 8
assert r.index() == 3
assert p.get_precedence_distance(q) == q.get_precedence_distance(p)
assert p.get_adjacency_distance(q) == p.get_adjacency_distance(q)
assert p.get_positional_distance(q) == p.get_positional_distance(q)
p = Permutation([0, 1, 2, 3])
q = Permutation([3, 2, 1, 0])
assert p.get_precedence_distance(q) == 6
assert p.get_adjacency_distance(q) == 3
assert p.get_positional_distance(q) == 8
p = Permutation([0, 3, 1, 2, 4])
q = Permutation.josephus(4, 5, 2)
assert p.get_adjacency_distance(q) == 3
raises(ValueError, lambda: p.get_adjacency_distance(Permutation([])))
raises(ValueError, lambda: p.get_positional_distance(Permutation([])))
raises(ValueError, lambda: p.get_precedence_distance(Permutation([])))
a = [Permutation.unrank_nonlex(4, i) for i in range(5)]
iden = Permutation([0, 1, 2, 3])
for i in range(5):
for j in range(i + 1, 5):
assert a[i].commutes_with(a[j]) == \
(rmul(a[i], a[j]) == rmul(a[j], a[i]))
if a[i].commutes_with(a[j]):
assert a[i].commutator(a[j]) == iden
assert a[j].commutator(a[i]) == iden
a = Permutation(3)
b = Permutation(0, 6, 3)(1, 2)
assert a.cycle_structure == {1: 4}
assert b.cycle_structure == {2: 1, 3: 1, 1: 2}
def test_josephus():
assert Permutation.josephus(4, 6, 1) == Permutation([3, 1, 0, 2, 5, 4])
assert Permutation.josephus(1, 5, 1).is_Identity
def test_ranking():
assert Permutation.unrank_lex(5, 10).rank() == 10
p = Permutation.unrank_lex(15, 225)
assert p.rank() == 225
p1 = p.next_lex()
assert p1.rank() == 226
assert Permutation.unrank_lex(15, 225).rank() == 225
assert Permutation.unrank_lex(10, 0).is_Identity
p = Permutation.unrank_lex(4, 23)
assert p.rank() == 23
assert p.array_form == [3, 2, 1, 0]
assert p.next_lex() is None
p = Permutation([1, 5, 2, 0, 3, 6, 4])
q = Permutation([[1, 2, 3, 5, 6], [0, 4]])
a = [Permutation.unrank_trotterjohnson(4, i).array_form for i in range(5)]
assert a == [[0, 1, 2, 3], [0, 1, 3, 2], [0, 3, 1, 2], [3, 0, 1,
2], [3, 0, 2, 1] ]
assert [Permutation(pa).rank_trotterjohnson() for pa in a] == range(5)
assert Permutation([0, 1, 2, 3]).next_trotterjohnson() == \
Permutation([0, 1, 3, 2])
assert q.rank_trotterjohnson() == 2283
assert p.rank_trotterjohnson() == 3389
assert Permutation([1, 0]).rank_trotterjohnson() == 1
a = Permutation(range(3))
b = a
l = []
tj = []
for i in range(6):
l.append(a)
tj.append(b)
a = a.next_lex()
b = b.next_trotterjohnson()
assert a == b is None
assert set([tuple(a) for a in l]) == set([tuple(a) for a in tj])
p = Permutation([2, 5, 1, 6, 3, 0, 4])
q = Permutation([[6], [5], [0, 1, 2, 3, 4]])
assert p.rank() == 1964
assert q.rank() == 870
assert Permutation([]).rank_nonlex() == 0
prank = p.rank_nonlex()
assert prank == 1600
assert Permutation.unrank_nonlex(7, 1600) == p
qrank = q.rank_nonlex()
assert qrank == 41
assert Permutation.unrank_nonlex(7, 41) == Permutation(q.array_form)
a = [Permutation.unrank_nonlex(4, i).array_form for i in range(24)]
assert a == [
[1, 2, 3, 0], [3, 2, 0, 1], [1, 3, 0, 2], [1, 2, 0, 3], [2, 3, 1, 0],
[2, 0, 3, 1], [3, 0, 1, 2], [2, 0, 1, 3], [1, 3, 2, 0], [3, 0, 2, 1],
[1, 0, 3, 2], [1, 0, 2, 3], [2, 1, 3, 0], [2, 3, 0, 1], [3, 1, 0, 2],
[2, 1, 0, 3], [3, 2, 1, 0], [0, 2, 3, 1], [0, 3, 1, 2], [0, 2, 1, 3],
[3, 1, 2, 0], [0, 3, 2, 1], [0, 1, 3, 2], [0, 1, 2, 3]]
N = 10
p1 = Permutation(a[0])
for i in range(1, N+1):
p1 = p1*Permutation(a[i])
p2 = Permutation.rmul_with_af(*[Permutation(h) for h in a[N::-1]])
assert p1 == p2
ok = []
p = Permutation([1, 0])
for i in range(3):
ok.append(p.array_form)
p = p.next_nonlex()
if p is None:
ok.append(None)
break
assert ok == [[1, 0], [0, 1], None]
assert Permutation([3, 2, 0, 1]).next_nonlex() == Permutation([1, 3, 0, 2])
assert [Permutation(pa).rank_nonlex() for pa in a] == range(24)
def test_mul():
a, b = [0, 2, 1, 3], [0, 1, 3, 2]
assert _af_rmul(a, b) == [0, 2, 3, 1]
assert _af_rmuln(a, b, range(4)) == [0, 2, 3, 1]
assert rmul(Permutation(a), Permutation(b)).array_form == [0, 2, 3, 1]
a = Permutation([0, 2, 1, 3])
b = (0, 1, 3, 2)
c = (3, 1, 2, 0)
assert Permutation.rmul(a, b, c) == Permutation([1, 2, 3, 0])
assert Permutation.rmul(a, c) == Permutation([3, 2, 1, 0])
raises(TypeError, lambda: Permutation.rmul(b, c))
n = 6
m = 8
a = [Permutation.unrank_nonlex(n, i).array_form for i in range(m)]
h = range(n)
for i in range(m):
h = _af_rmul(h, a[i])
h2 = _af_rmuln(*a[:i + 1])
assert h == h2
def test_args():
p = Permutation([(0, 3, 1, 2), (4, 5)])
assert p._cyclic_form is None
assert Permutation(p) == p
assert p.cyclic_form == [[0, 3, 1, 2], [4, 5]]
assert p._array_form == [3, 2, 0, 1, 5, 4]
p = Permutation((0, 3, 1, 2))
assert p._cyclic_form is None
assert p._array_form == [0, 3, 1, 2]
assert Permutation([0]) == Permutation((0, ))
assert Permutation([[0], [1]]) == Permutation(((0, ), (1, ))) == \
Permutation(((0, ), [1]))
assert Permutation([[1, 2]]) == Permutation([0, 2, 1])
assert Permutation([[1], [4, 2]]) == Permutation([0, 1, 4, 3, 2])
assert Permutation([[1], [4, 2]], size=1) == Permutation([0, 1, 4, 3, 2])
assert Permutation(
[[1], [4, 2]], size=6) == Permutation([0, 1, 4, 3, 2, 5])
assert Permutation([], size=3) == Permutation([0, 1, 2])
assert Permutation(3).list(5) == [0, 1, 2, 3, 4]
assert Permutation(3).list(-1) == []
assert Permutation(5)(1, 2).list(-1) == [0, 2, 1]
assert Permutation(5)(1, 2).list() == [0, 2, 1, 3, 4, 5]
raises(TypeError, lambda: Permutation([1, 2], [0]))
# enclosing brackets needed
raises(ValueError, lambda: Permutation([[1, 2], 0]))
# enclosing brackets needed on 0
raises(ValueError, lambda: Permutation([1, 1, 0]))
raises(ValueError, lambda: Permutation([[1], [1, 2]]))
raises(ValueError, lambda: Permutation([4, 5], size=10)) # where are 0-3?
# but this is ok because cycles imply that only those listed moved
assert Permutation(4, 5) == Permutation([0, 1, 2, 3, 5, 4])
def test_Cycle():
assert str(Cycle()) == 'Cycle()'
assert Cycle(Cycle(1,2)) == Cycle(1, 2)
assert Cycle(1,2).copy() == Cycle(1,2)
assert list(Cycle(1, 3, 2)) == [0, 3, 1, 2]
assert Cycle(1, 2)(2, 3) == Cycle(1, 3, 2)
assert Cycle(1, 2)(2, 3)(4, 5) == Cycle(1, 3, 2)(4, 5)
assert Permutation(Cycle(1, 2)(2, 1, 0, 3)).cyclic_form, Cycle(0, 2, 1)
raises(ValueError, lambda: Cycle().list())
assert Cycle(1, 2).list() == [0, 2, 1]
assert Cycle(1, 2).list(4) == [0, 2, 1, 3]
assert Permutation(Cycle(1, 2), size=4) == \
Permutation([0, 2, 1, 3])
assert str(Cycle(1, 2)(4, 5)) == 'Cycle(1, 2)(4, 5)'
assert str(Cycle(1, 2)) == 'Cycle(1, 2)'
assert Cycle(Permutation(range(3))) == Cycle()
assert Cycle(1, 2).list() == [0, 2, 1]
assert Cycle(1, 2).list(4) == [0, 2, 1, 3]
raises(TypeError, lambda: Cycle((1, 2)))
raises(ValueError, lambda: Cycle(1, 2, 1))
raises(TypeError, lambda: Cycle(1, 2)*{})
# check round-trip
p = Permutation([[1, 2], [4, 3]], size=5)
assert Permutation(Cycle(p)) == p
def test_from_sequence():
assert Permutation.from_sequence('SymPy') == Permutation(4)(0, 1, 3)
assert Permutation.from_sequence('SymPy', key=lambda x: x.lower()) == \
Permutation(4)(0, 2)(1, 3)
| 37.012953 | 79 | 0.55407 |
207d6089194d03444b180ce00eccc57512d2163b | 471 | py | Python | celery/tests/test_messaging.py | gthb/celery | 13057dc69a6ecda6aabfae7b7640d971176251fb | [
"BSD-3-Clause"
] | 2 | 2017-05-24T13:03:30.000Z | 2017-09-04T08:24:19.000Z | celery/tests/test_messaging.py | winhamwr/celery | 249a270301ddb9b025cf8d00400bb442df9cae62 | [
"BSD-3-Clause"
] | null | null | null | celery/tests/test_messaging.py | winhamwr/celery | 249a270301ddb9b025cf8d00400bb442df9cae62 | [
"BSD-3-Clause"
] | null | null | null | import unittest2 as unittest
from celery.messaging import MSG_OPTIONS, extract_msg_options
class TestMsgOptions(unittest.TestCase):
def test_MSG_OPTIONS(self):
self.assertTrue(MSG_OPTIONS)
def test_extract_msg_options(self):
testing = {"mandatory": True, "routing_key": "foo.xuzzy"}
result = extract_msg_options(testing)
self.assertEqual(result["mandatory"], True)
self.assertEqual(result["routing_key"], "foo.xuzzy")
| 29.4375 | 65 | 0.719745 |
0d3d42ea4b8a2194ff75ec28f237ef8282644653 | 4,109 | py | Python | gossamer/integration.py | ijl/gossamer | 1b9608827b1d651f9748010a8c771e956340eb6e | [
"Apache-2.0"
] | 3 | 2016-02-10T13:33:55.000Z | 2019-01-04T11:37:47.000Z | gossamer/integration.py | ijl/gossamer | 1b9608827b1d651f9748010a8c771e956340eb6e | [
"Apache-2.0"
] | null | null | null | gossamer/integration.py | ijl/gossamer | 1b9608827b1d651f9748010a8c771e956340eb6e | [
"Apache-2.0"
] | null | null | null | """
Integrate with unittest.
"""
# Copyright (c) 2013 contributors; see AUTHORS.
# Licensed under the Apache License, Version 2.0
# https://www.apache.org/licenses/LICENSE-2.0
import unittest
from gossamer.main import dispatch
from gossamer.constant import modes, states, DEFAULT_WEBDRIVER
from gossamer import util, exc
def run_gossamerfile(
client_locals, gossamerfile, data_dir,
selenium=None, skip_allowed=True, rewrite_url=None
): # pylint: disable=R0913
"""
Call this to read one or more Gossamerfiles and run all of their tests.
It will mutate the locals() passed to it by the inclusion of a complete
`unittest.TestCase` for every test in the given Gossamerfiles. Test
runners will then automatically add your tests.
Parameters:
client_locals, locals() dictionary:
`locals()` of the module from which the func is being called.
gossamerfile, {str, list, tuple, dict}:
Location to one or more Gossamerfiles.
data_dir:
The data directory containing the recorded tests and screenshots.
selenium (optional), str:
If provided, the Selenium Server URL to use instead of that in
the recorded tests. Use this to change servers with environments.
skip_allowed (optional), bool:
If true, if Selenium Server is not running,
unittest will skip this test; if false, it will error. Default
true.
rewrite_url (optional), callable:
If given, test URLs will be rewritten according to the provided
callable. This callable should accept a single parameter, a string,
which is the URL in the recorded test. Use this to change the
environment used. E.g., lambda x: x.replace('http://dev.', 'http://ci.').
"""
if isinstance(gossamerfile, (str, unicode)):
gossamerfile = [gossamerfile]
elif isinstance(gossamerfile, dict):
gossamerfile = [val for _, val in gossamerfile.items()]
selenium = selenium or DEFAULT_WEBDRIVER
driver_ok = util.check_driver(selenium)
tests = util.make_tests(
gossamerfile, modes.PLAYBACK, data_dir, rewrite_url=rewrite_url
)
for key, test in tests.items():
case = type(
'GossamerTestCase',
GossamerTestCase.__bases__,
dict(GossamerTestCase.__dict__)
)
case._skip_allowed = skip_allowed # pylint: disable=W0212
case._driver_ok = driver_ok # pylint: disable=W0212
case._args = (test, test.settings.browser, selenium)
case._gossamer_test = test # pylint: disable=W0212
case.runTest.__func__.__doc__ = test.settings.desc or test.settings.name # pylint: disable=E1101,C0301
client_locals['GossamerTest_%s' % key] = case
return True
class GossamerTestCase(unittest.TestCase): # pylint: disable=R0904
"""
unittest case.
"""
_skip_allowed = True
_driver_ok = True
_gossamer_test = None
_args = ()
def setUp(self):
super(GossamerTestCase, GossamerTestCase()).setUp()
if not self._driver_ok:
if self._skip_allowed:
self.skipTest("Selenium Server is not available")
else:
raise exc.WebDriverConnectionFailed(
"Selenium Server is not available"
)
def runTest(self):
"""
Gossamer test
"""
test, browser, selenium = self._args
try:
driver = util.get_driver(browser, selenium)
result, err = dispatch(
driver, modes.PLAYBACK, test, output=util.null_writer
)
if result == states.FAIL:
if err is not None:
self.fail(str(err))
else:
self.fail('Screenshots were different.')
elif result == states.ERROR:
if err is not None:
raise err
raise exc.TestError() # todo
finally:
util.close_driver(driver)
| 33.958678 | 110 | 0.621562 |
0335201a4dda7c58be55ec92314276b86ab8b0f3 | 4,122 | py | Python | contrib/opencensus-ext-pyramid/opencensus/ext/pyramid/pyramid_middleware.py | samn/opencensus-python | d8709f141b67f7f5ba011c440b8ba8fb9cbc419a | [
"Apache-2.0"
] | null | null | null | contrib/opencensus-ext-pyramid/opencensus/ext/pyramid/pyramid_middleware.py | samn/opencensus-python | d8709f141b67f7f5ba011c440b8ba8fb9cbc419a | [
"Apache-2.0"
] | null | null | null | contrib/opencensus-ext-pyramid/opencensus/ext/pyramid/pyramid_middleware.py | samn/opencensus-python | d8709f141b67f7f5ba011c440b8ba8fb9cbc419a | [
"Apache-2.0"
] | null | null | null | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from opencensus.ext.pyramid.config import PyramidTraceSettings
from opencensus.trace import attributes_helper
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
from opencensus.trace import tracer as tracer_module
from opencensus.trace import utils
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD']
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL']
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
BLACKLIST_PATHS = 'BLACKLIST_PATHS'
log = logging.getLogger(__name__)
class OpenCensusTweenFactory(object):
"""Pyramid tweens are like wsgi middleware, but have access to things
like the request, response, and application registry.
The tween factory is a globally importable callable whose
constructor takes a request handler and application registry. It
will be called with a pyramid request object.
For details on pyramid tweens, see
https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/hooks.html#creating-a-tween
"""
def __init__(self, handler, registry):
"""Constructor for the pyramid tween
:param handler: Either the main Pyramid request handling
function or another tween
:type handler: function
:param registry: The pyramid application registry
:type registry: :class:`pyramid.registry.Registry`
"""
self.handler = handler
self.registry = registry
settings = PyramidTraceSettings(registry)
self.sampler = settings.SAMPLER
self.exporter = settings.EXPORTER
self.propagator = settings.PROPAGATOR
self._blacklist_paths = settings.params.get(BLACKLIST_PATHS)
def __call__(self, request):
self._before_request(request)
response = self.handler(request)
self._after_request(request, response)
return response
def _before_request(self, request):
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
try:
span_context = self.propagator.from_headers(request.headers)
tracer = tracer_module.Tracer(
span_context=span_context,
sampler=self.sampler,
exporter=self.exporter,
propagator=self.propagator)
span = tracer.start_span()
# Set the span name as the name of the current module name
span.name = '[{}]{}'.format(
request.method,
request.path)
span.span_kind = span_module.SpanKind.SERVER
tracer.add_attribute_to_current_span(
attribute_key=HTTP_METHOD,
attribute_value=request.method)
tracer.add_attribute_to_current_span(
attribute_key=HTTP_URL,
attribute_value=request.path)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def _after_request(self, request, response):
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
try:
tracer = execution_context.get_opencensus_tracer()
tracer.add_attribute_to_current_span(
HTTP_STATUS_CODE,
str(response.status_code))
tracer.end_span()
tracer.finish()
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
| 34.932203 | 94 | 0.687288 |
59c2c6f59f2a33b256f02f3bc70e9937f422834f | 8,592 | py | Python | nailgun/nailgun/statistics/oswl/helpers.py | dnikishov/fuel-web | 152c2072cf585fc61d7e157ccf9a7ea1d0377daa | [
"Apache-2.0"
] | null | null | null | nailgun/nailgun/statistics/oswl/helpers.py | dnikishov/fuel-web | 152c2072cf585fc61d7e157ccf9a7ea1d0377daa | [
"Apache-2.0"
] | null | null | null | nailgun/nailgun/statistics/oswl/helpers.py | dnikishov/fuel-web | 152c2072cf585fc61d7e157ccf9a7ea1d0377daa | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from cinderclient import client as cinder_client
from keystoneclient import discover as keystone_discover
from keystoneclient.v2_0 import client as keystone_client_v2
from keystoneclient.v3 import client as keystone_client_v3
from novaclient import client as nova_client
from nailgun import consts
from nailgun.db import db
from nailgun import objects
from nailgun.settings import settings
from nailgun.statistics.oswl.resources_description \
import resources_description
from nailgun.statistics import utils
logger = logging.getLogger('statistics')
class ClientProvider(object):
"""Initialize clients for OpenStack component and expose them as attrs"""
clients_version_attr_path = {
"nova": ["client", "version"],
"cinder": ["client", "version"],
"keystone": ["version"]
}
def __init__(self, cluster):
self.cluster = cluster
self._nova = None
self._cinder = None
self._keystone = None
self._credentials = None
@property
def nova(self):
if self._nova is None:
self._nova = nova_client.Client(
settings.OPENSTACK_API_VERSION["nova"],
*self.credentials,
service_type=consts.NOVA_SERVICE_TYPE.compute,
insecure=True
)
return self._nova
@property
def cinder(self):
if self._cinder is None:
self._cinder = cinder_client.Client(
settings.OPENSTACK_API_VERSION["cinder"],
*self.credentials,
insecure=True
)
return self._cinder
@property
def keystone(self):
if self._keystone is None:
# kwargs are universal for v2 and v3 versions of
# keystone client that are different only in accepting
# of tenant/project keyword name
auth_kwargs = {
"username": self.credentials[0],
"password": self.credentials[1],
"tenant_name": self.credentials[2],
"project_name": self.credentials[2],
"auth_url": self.credentials[3]
}
self._keystone = self._get_keystone_client(auth_kwargs)
return self._keystone
def _get_keystone_client(self, auth_creds):
"""Create client based on returned from keystone server version data.
:param auth_creds: credentials for authentication which also are
parameters for client's instance initialization
:returns: instance of keystone client of appropriate version
:raises: exception if response from server contains version other than
2.x and 3.x
"""
discover = keystone_discover.Discover(**auth_creds)
for version_data in discover.version_data():
version = version_data["version"][0]
if version <= 2:
return keystone_client_v2.Client(insecure=True, **auth_creds)
elif version == 3:
return keystone_client_v3.Client(insecure=True, **auth_creds)
raise Exception("Failed to discover keystone version "
"for auth_url {0}".format(
auth_creds.get("auth_url"))
)
@property
def credentials(self):
if self._credentials is None:
cluster_attrs_editable = \
objects.Cluster.get_editable_attributes(self.cluster)
access_data = cluster_attrs_editable.get("workloads_collector")
if not access_data:
# in case there is no section for workloads_collector
# in cluster attributes we try to fallback here to
# default credential for the cluster. It is not 100%
# foolproof as user might have changed them at this time
access_data = cluster_attrs_editable["access"]
os_user = access_data["user"]["value"]
os_password = access_data["password"]["value"]
os_tenant = access_data["tenant"]["value"]
auth_host = utils.get_mgmt_ip_of_cluster_controller(self.cluster)
auth_url = "http://{0}:{1}/{2}/".format(
auth_host, settings.AUTH_PORT,
settings.OPENSTACK_API_VERSION["keystone"])
self._credentials = (os_user, os_password, os_tenant, auth_url)
return self._credentials
def get_info_from_os_resource_manager(client_provider, resource_name):
"""Use OpenStack resource manager to retrieve information about resource
Utilize clients provided by client_provider instance to retrieve
data for resource_name, description of which is stored in
resources_description data structure.
:param client_provider: objects that provides instances of openstack
clients as its attributes
:param resource_name: string that contains name of resource for which
info should be collected from installation
:returns: data that store collected info
"""
resource_description = resources_description[resource_name]
client_name = resource_description["retrieved_from_component"]
client_inst = getattr(client_provider, client_name)
client_api_version = utils.get_nested_attr(
client_inst,
client_provider.clients_version_attr_path[client_name]
)
matched_api = \
resource_description["supported_api_versions"][client_api_version]
resource_manager_name = matched_api["resource_manager_name"]
resource_manager = getattr(client_inst, resource_manager_name)
attributes_white_list = matched_api["attributes_white_list"]
additional_display_options = \
matched_api.get("additional_display_options", {})
resource_info = _get_data_from_resource_manager(
resource_manager,
attributes_white_list,
additional_display_options
)
return resource_info
def _get_data_from_resource_manager(resource_manager, attrs_white_list_rules,
additional_display_options):
data = []
display_options = {}
display_options.update(additional_display_options)
instances_list = resource_manager.list(**display_options)
for inst in instances_list:
inst_details = {}
obj_dict = \
inst.to_dict() if hasattr(inst, "to_dict") else inst.__dict__
for rule in attrs_white_list_rules:
try:
inst_details[rule.map_to_name] = utils.get_attr_value(
rule.path, rule.transform_func, obj_dict
)
except KeyError:
# in case retrieved attribute is highlevel key
# and is not present in obj_dict KeyError occurs which
# cannot be handled by get_attr_value function due to
# its features so we must do it here in order
# to prevent from situation when whole set data is not
# collected for particular resource
logger.info("{0} cannot be collected for the statistic "
"as attribute with path {1} is not present in the "
"resource manager's data".format(rule.map_to_name,
rule.path))
data.append(inst_details)
return data
def delete_expired_oswl_entries():
try:
deleted_rows_count = \
objects.OpenStackWorkloadStatsCollection.clean_expired_entries()
if deleted_rows_count == 0:
logger.info("There are no expired OSWL entries in db.")
db().commit()
logger.info("Expired OSWL entries are "
"successfully cleaned from db")
except Exception as e:
logger.exception("Exception while cleaning oswls entries from "
"db. Details: {0}".format(six.text_type(e)))
finally:
db.remove()
| 35.949791 | 79 | 0.645717 |
dacdb61f1dfa2efdd5d8d6279fb894ab08d82cc5 | 1,215 | py | Python | scripts/parse_eval.py | jeongjuns/yolactpose | 6faea1707731e4a4bfb019856d3667e70238cfc6 | [
"MIT"
] | 1 | 2022-02-12T17:18:55.000Z | 2022-02-12T17:18:55.000Z | scripts/parse_eval.py | jeongjuns/yolactpose | 6faea1707731e4a4bfb019856d3667e70238cfc6 | [
"MIT"
] | null | null | null | scripts/parse_eval.py | jeongjuns/yolactpose | 6faea1707731e4a4bfb019856d3667e70238cfc6 | [
"MIT"
] | null | null | null | import re, sys, os
import matplotlib.pyplot as plt
from matplotlib._color_data import XKCD_COLORS
with open(sys.argv[1], 'r') as f:
txt = f.read()
txt, overall = txt.split('overall performance')
class_names = []
mAP_overall = []
mAP_small = []
mAP_medium = []
mAP_large = []
for class_result in txt.split('evaluate category: ')[1:]:
lines = class_result.split('\n')
class_names.append(lines[0])
def grabMAP(string):
return float(string.split('] = ')[1]) * 100
mAP_overall.append(grabMAP(lines[ 7]))
mAP_small .append(grabMAP(lines[10]))
mAP_medium .append(grabMAP(lines[11]))
mAP_large .append(grabMAP(lines[12]))
mAP_map = {
'small': mAP_small,
'medium': mAP_medium,
'large': mAP_large,
}
if len(sys.argv) > 2:
bars = plt.bar(class_names, mAP_map[sys.argv[2]])
plt.title(sys.argv[2] + ' mAP per class')
else:
bars = plt.bar(class_names, mAP_overall)
plt.title('overall mAP per class')
colors = list(XKCD_COLORS.values())
for idx, bar in enumerate(bars):
# Mmm pseudorandom colors
char_sum = sum([ord(char) for char in class_names[idx]])
bar.set_color(colors[char_sum % len(colors)])
plt.xticks(rotation='vertical')
plt.show()
| 24.3 | 58 | 0.669136 |
142169cb584bbde55a44645f93364bb8508d4638 | 1,659 | py | Python | sdk/python/pulumi_azure_native/devices/v20180401/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/devices/v20180401/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/devices/v20180401/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .certificate import *
from .get_certificate import *
from .get_iot_hub_resource import *
from .get_iot_hub_resource_event_hub_consumer_group import *
from .iot_hub_resource import *
from .iot_hub_resource_event_hub_consumer_group import *
from .list_iot_hub_resource_keys import *
from .list_iot_hub_resource_keys_for_key_name import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:devices/v20180401:Certificate":
return Certificate(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:devices/v20180401:IotHubResource":
return IotHubResource(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:devices/v20180401:IotHubResourceEventHubConsumerGroup":
return IotHubResourceEventHubConsumerGroup(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "devices/v20180401", _module_instance)
_register_module()
| 37.704545 | 98 | 0.7173 |
5f387e0067f1f5c7c8d25917c8f8ed7ef80654ad | 11,374 | py | Python | st2common/tests/unit/test_util_sandboxing.py | hnanchahal/st2 | 15116250a5eddfb5421102b31cb00b349ef7c763 | [
"Apache-2.0"
] | null | null | null | st2common/tests/unit/test_util_sandboxing.py | hnanchahal/st2 | 15116250a5eddfb5421102b31cb00b349ef7c763 | [
"Apache-2.0"
] | null | null | null | st2common/tests/unit/test_util_sandboxing.py | hnanchahal/st2 | 15116250a5eddfb5421102b31cb00b349ef7c763 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import unittest
import mock
from oslo_config import cfg
from st2common.constants.pack import SYSTEM_PACK_NAMES
from st2common.util.sandboxing import get_sandbox_path
from st2common.util.sandboxing import get_sandbox_python_path
from st2common.util.sandboxing import get_sandbox_python_path_for_python_action
from st2common.util.sandboxing import get_sandbox_python_binary_path
from st2common.util.sandboxing import clear_virtualenv_prefix
from st2common.util.sandboxing import get_virtualenv_prefix
from st2common.util.sandboxing import set_virtualenv_prefix
import st2tests.config as tests_config
__all__ = [
'SandboxingUtilsTestCase'
]
class SandboxingUtilsTestCase(unittest.TestCase):
maxDiff = None
def setUp(self):
super(SandboxingUtilsTestCase, self).setUp()
# Restore the virtualenv before each test case
set_virtualenv_prefix(self.old_virtualenv_prefix)
@classmethod
def setUpClass(cls):
tests_config.parse_args()
# Store original values so we can restore them in setUp
cls.old_virtualenv_prefix = get_virtualenv_prefix()
@classmethod
def tearDownClass(cls):
set_virtualenv_prefix(cls.old_virtualenv_prefix)
def assertEndsWith(self, string, ending_substr, msg=None):
msg = msg or "'{string}'' does not end with '{ending_substr}'"
try:
assert string.endswith(ending_substr) is True
except AssertionError as e:
print(dir(e))
print(e.args)
e.args = (msg.format(string=string, ending_substr=ending_substr),)
raise e
def test_get_sandbox_python_binary_path(self):
# Non-system content pack, should use pack specific virtualenv binary
result = get_sandbox_python_binary_path(pack='mapack')
expected = os.path.join(cfg.CONF.system.base_path, 'virtualenvs/mapack/bin/python')
self.assertEqual(result, expected)
# System content pack, should use current process (system) python binary
result = get_sandbox_python_binary_path(pack=SYSTEM_PACK_NAMES[0])
self.assertEqual(result, sys.executable)
def test_get_sandbox_path(self):
virtualenv_path = '/home/venv/test'
# Mock the current PATH value
with mock.patch.dict(os.environ, {'PATH': '/home/path1:/home/path2:/home/path3:'}):
result = get_sandbox_path(virtualenv_path=virtualenv_path)
self.assertEqual(result, f'{virtualenv_path}/bin/:/home/path1:/home/path2:/home/path3')
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path(self, mock_get_python_lib):
# No inheritance
python_path = get_sandbox_python_path(inherit_from_parent=False,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':')
# Inherit python path from current process
# Mock the current process python path
with mock.patch.dict(os.environ, {'PYTHONPATH': ':/data/test1:/data/test2'}):
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (not running inside virtualenv)
clear_virtualenv_prefix()
with mock.patch.dict(os.environ, {'PYTHONPATH': ':/data/test1:/data/test2'}):
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = f'{sys.prefix}/virtualenvtest'
with mock.patch.dict(os.environ, {'PYTHONPATH': ':/data/test1:/data/test2'}):
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
self.assertEqual(python_path, f':/data/test1:/data/test2:{sys.prefix}/virtualenvtest')
@mock.patch('os.path.isdir', mock.Mock(return_value=True))
@mock.patch('os.listdir', mock.Mock(return_value=['python3.6']))
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path_for_python_action_no_inheritance(self,
mock_get_python_lib):
# No inheritance
python_path = get_sandbox_python_path_for_python_action(pack='dummy_pack',
inherit_from_parent=False,
inherit_parent_virtualenv=False)
actual_path = python_path.strip(':').split(':')
self.assertEqual(len(actual_path), 3)
# First entry should be lib/python3 dir from venv
self.assertEndsWith(actual_path[0], 'virtualenvs/dummy_pack/lib/python3.6')
# Second entry should be python3 site-packages dir from venv
self.assertEndsWith(actual_path[1], 'virtualenvs/dummy_pack/lib/python3.6/site-packages')
# Third entry should be actions/lib dir from pack root directory
self.assertEndsWith(actual_path[2], 'packs/dummy_pack/actions/lib')
@mock.patch('os.path.isdir', mock.Mock(return_value=True))
@mock.patch('os.listdir', mock.Mock(return_value=['python3.6']))
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path_for_python_action_inherit_from_parent_process_only(self,
mock_get_python_lib):
# Inherit python path from current process
# Mock the current process python path
with mock.patch.dict(os.environ, {'PYTHONPATH': ':/data/test1:/data/test2'}):
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
python_path = get_sandbox_python_path_for_python_action(pack='dummy_pack',
inherit_from_parent=True,
inherit_parent_virtualenv=False)
actual_path = python_path.strip(':').split(':')
self.assertEqual(len(actual_path), 6)
# First entry should be lib/python3 dir from venv
self.assertEndsWith(actual_path[0], 'virtualenvs/dummy_pack/lib/python3.6')
# Second entry should be python3 site-packages dir from venv
self.assertEndsWith(actual_path[1], 'virtualenvs/dummy_pack/lib/python3.6/site-packages')
# Third entry should be actions/lib dir from pack root directory
self.assertEndsWith(actual_path[2], 'packs/dummy_pack/actions/lib')
# And the rest of the paths from get_sandbox_python_path
self.assertEqual(actual_path[3], '')
self.assertEqual(actual_path[4], '/data/test1')
self.assertEqual(actual_path[5], '/data/test2')
@mock.patch('os.path.isdir', mock.Mock(return_value=True))
@mock.patch('os.listdir', mock.Mock(return_value=['python3.6']))
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path_for_python_action_inherit_from_parent_process_and_venv(self,
mock_get_python_lib):
# Inherit from current process and from virtualenv (not running inside virtualenv)
clear_virtualenv_prefix()
# Inherit python path from current process
# Mock the current process python path
with mock.patch.dict(os.environ, {'PYTHONPATH': ':/data/test1:/data/test2'}):
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
python_path = get_sandbox_python_path_for_python_action(pack='dummy_pack',
inherit_from_parent=True,
inherit_parent_virtualenv=True)
actual_path = python_path.strip(':').split(':')
self.assertEqual(len(actual_path), 6)
# First entry should be lib/python3 dir from venv
self.assertEndsWith(actual_path[0], 'virtualenvs/dummy_pack/lib/python3.6')
# Second entry should be python3 site-packages dir from venv
self.assertEndsWith(actual_path[1], 'virtualenvs/dummy_pack/lib/python3.6/site-packages')
# Third entry should be actions/lib dir from pack root directory
self.assertEndsWith(actual_path[2], 'packs/dummy_pack/actions/lib')
# And the rest of the paths from get_sandbox_python_path
self.assertEqual(actual_path[3], '')
self.assertEqual(actual_path[4], '/data/test1')
self.assertEqual(actual_path[5], '/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = f'{sys.prefix}/virtualenvtest'
# Inherit python path from current process
# Mock the current process python path
with mock.patch.dict(os.environ, {'PYTHONPATH': ':/data/test1:/data/test2'}):
python_path = get_sandbox_python_path_for_python_action(pack='dummy_pack',
inherit_from_parent=True,
inherit_parent_virtualenv=True)
actual_path = python_path.strip(':').split(':')
self.assertEqual(len(actual_path), 7)
# First entry should be lib/python3 dir from venv
self.assertEndsWith(actual_path[0], 'virtualenvs/dummy_pack/lib/python3.6')
# Second entry should be python3 site-packages dir from venv
self.assertEndsWith(actual_path[1], 'virtualenvs/dummy_pack/lib/python3.6/site-packages')
# Third entry should be actions/lib dir from pack root directory
self.assertEndsWith(actual_path[2], 'packs/dummy_pack/actions/lib')
# The paths from get_sandbox_python_path
self.assertEqual(actual_path[3], '')
self.assertEqual(actual_path[4], '/data/test1')
self.assertEqual(actual_path[5], '/data/test2')
# And the parent virtualenv
self.assertEqual(actual_path[6], f'{sys.prefix}/virtualenvtest')
| 47.991561 | 100 | 0.667399 |
524b8e49dbabb05c867275c1b77cf6f6caecd30d | 27,701 | py | Python | mac/google-cloud-sdk/lib/googlecloudsdk/calliope/base.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | mac/google-cloud-sdk/lib/googlecloudsdk/calliope/base.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | mac/google-cloud-sdk/lib/googlecloudsdk/calliope/base.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for calliope commands and groups.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import collections
from functools import wraps
import itertools
import re
import sys
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import display
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.resource import resource_printer
import six
# Category constants
AI_AND_MACHINE_LEARNING_CATEGORY = 'AI and Machine Learning'
API_PLATFORM_AND_ECOSYSTEMS_CATEGORY = 'API Platform and Ecosystems'
COMPUTE_CATEGORY = 'Compute'
DATA_ANALYTICS_CATEGORY = 'Data Analytics'
DATABASES_CATEGORY = 'Databases'
IDENTITY_AND_SECURITY_CATEGORY = 'Identity and Security'
INTERNET_OF_THINGS_CATEGORY = 'Internet of Things'
MANAGEMENT_TOOLS_CATEGORY = 'Management Tools'
MOBILE_CATEGORY = 'Mobile'
NETWORKING_CATEGORY = 'Networking'
SDK_TOOLS_CATEGORY = 'SDK Tools'
DISKS_CATEGORY = 'Disks'
INFO_CATEGORY = 'Info'
INSTANCES_CATEGORY = 'Instances'
LOAD_BALANCING_CATEGORY = 'Load Balancing'
TOOLS_CATEGORY = 'Tools'
STORAGE_CATEGORY = 'Storage'
BILLING_CATEGORY = 'Billing'
SECURITY_CATEGORY = 'Security'
IDENTITY_CATEGORY = 'Identity'
BIG_DATA_CATEGORY = 'Big Data'
CI_CD_CATEGORY = 'CI/CD'
MONITORING_CATEGORY = 'Monitoring'
SOLUTIONS_CATEGORY = 'Solutions'
SERVERLESS_CATEGORY = 'Serverless'
UNCATEGORIZED_CATEGORY = 'Other'
IDENTITY_CATEGORY = 'Identity'
# Common markdown.
MARKDOWN_BOLD = '*'
MARKDOWN_ITALIC = '_'
MARKDOWN_CODE = '`'
class DeprecationException(exceptions.Error):
"""An exception for when a command or group has been deprecated."""
class ReleaseTrack(object):
"""An enum representing the release track of a command or command group.
The release track controls where a command appears. The default of GA means
it will show up under gcloud. If you enable a command or group for the alpha,
beta, or preview tracks, those commands will be duplicated under those groups
as well.
"""
class _TRACK(object):
"""An enum representing the release track of a command or command group."""
# pylint: disable=redefined-builtin
def __init__(self, id, prefix, help_tag, help_note):
self.id = id
self.prefix = prefix
self.help_tag = help_tag
self.help_note = help_note
def __str__(self):
return self.id
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
GA = _TRACK('GA', None, None, None)
BETA = _TRACK(
'BETA', 'beta',
'{0}(BETA){0} '.format(MARKDOWN_BOLD),
'This command is currently in BETA and may change without notice.')
ALPHA = _TRACK(
'ALPHA', 'alpha',
'{0}(ALPHA){0} '.format(MARKDOWN_BOLD),
'This command is currently in ALPHA and may change without notice. '
'If this command fails with API permission errors despite specifying '
'the right project, you may be trying to access an API with '
'an invitation-only early access whitelist.')
_ALL = [GA, BETA, ALPHA]
@staticmethod
def AllValues():
"""Gets all possible enum values.
Returns:
list, All the enum values.
"""
return list(ReleaseTrack._ALL)
@staticmethod
def FromPrefix(prefix):
"""Gets a ReleaseTrack from the given release track prefix.
Args:
prefix: str, The prefix string that might be a release track name.
Returns:
ReleaseTrack, The corresponding object or None if the prefix was not a
valid release track.
"""
for track in ReleaseTrack._ALL:
if track.prefix == prefix:
return track
return None
@staticmethod
def FromId(id): # pylint: disable=redefined-builtin
"""Gets a ReleaseTrack from the given release track prefix.
Args:
id: str, The id string that must be a release track name.
Raises:
ValueError: For unknown release track ids.
Returns:
ReleaseTrack, The corresponding object.
"""
for track in ReleaseTrack._ALL:
if track.id == id:
return track
raise ValueError('Unknown release track id [{}].'.format(id))
class Action(six.with_metaclass(abc.ABCMeta, object)):
"""A class that allows you to save an Action configuration for reuse."""
def __init__(self, *args, **kwargs):
"""Creates the Action.
Args:
*args: The positional args to parser.add_argument.
**kwargs: The keyword args to parser.add_argument.
"""
self.args = args
self.kwargs = kwargs
@property
def name(self):
return self.args[0]
@abc.abstractmethod
def AddToParser(self, parser):
"""Adds this Action to the given parser.
Args:
parser: The argparse parser.
Returns:
The result of adding the Action to the parser.
"""
pass
def RemoveFromParser(self, parser):
"""Removes this Action from the given parser.
Args:
parser: The argparse parser.
"""
pass
def SetDefault(self, parser, default):
"""Sets the default value for this Action in the given parser.
Args:
parser: The argparse parser.
default: The default value.
"""
pass
class ArgumentGroup(Action):
"""A class that allows you to save an argument group configuration for reuse.
"""
def __init__(self, *args, **kwargs):
super(ArgumentGroup, self).__init__(*args, **kwargs)
self.arguments = []
def AddArgument(self, arg):
self.arguments.append(arg)
def AddToParser(self, parser):
"""Adds this argument group to the given parser.
Args:
parser: The argparse parser.
Returns:
The result of parser.add_argument().
"""
group = self._CreateGroup(parser)
for arg in self.arguments:
arg.AddToParser(group)
return group
def _CreateGroup(self, parser):
return parser.add_group(*self.args, **self.kwargs)
class Argument(Action):
"""A class that allows you to save an argument configuration for reuse."""
def __GetFlag(self, parser):
"""Returns the flag object in parser."""
for flag in itertools.chain(parser.flag_args, parser.ancestor_flag_args):
if self.name in flag.option_strings:
return flag
return None
def AddToParser(self, parser):
"""Adds this argument to the given parser.
Args:
parser: The argparse parser.
Returns:
The result of parser.add_argument().
"""
return parser.add_argument(*self.args, **self.kwargs)
def RemoveFromParser(self, parser):
"""Removes this flag from the given parser.
Args:
parser: The argparse parser.
"""
flag = self.__GetFlag(parser)
if flag:
# Remove the flag and its inverse, if it exists, from its container.
name = flag.option_strings[0]
conflicts = [(name, flag)]
no_name = '--no-' + name[2:]
for no_flag in itertools.chain(parser.flag_args,
parser.ancestor_flag_args):
if no_name in no_flag.option_strings:
conflicts.append((no_name, no_flag))
# pylint: disable=protected-access, argparse, why can't we be friends
flag.container._handle_conflict_resolve(flag, conflicts)
# Remove the conflict flags from the calliope argument interceptor.
for _, flag in conflicts:
parser.defaults.pop(flag.dest, None)
if flag.dest in parser.dests:
parser.dests.remove(flag.dest)
if flag in parser.flag_args:
parser.flag_args.remove(flag)
if flag in parser.arguments:
parser.arguments.remove(flag)
def SetDefault(self, parser, default):
"""Sets the default value for this flag in the given parser.
Args:
parser: The argparse parser.
default: The default flag value.
"""
flag = self.__GetFlag(parser)
if flag:
kwargs = {flag.dest: default}
parser.set_defaults(**kwargs)
# Update the flag's help text.
original_help = flag.help
match = re.search(r'(.*The default is ).*?(\.([ \t\n].*))',
original_help, re.DOTALL)
if match:
new_help = '{}*{}*{}'.format(match.group(1), default, match.group(2))
else:
new_help = original_help + ' The default is *{}*.'.format(default)
flag.help = new_help
# Common flag definitions for consistency.
# Common flag categories.
COMMONLY_USED_FLAGS = 'COMMONLY USED'
FLAGS_FILE_FLAG = Argument(
'--flags-file',
metavar='YAML_FILE',
default=None,
category=COMMONLY_USED_FLAGS,
help="""\
A YAML or JSON file that specifies a *--flag*:*value* dictionary.
Useful for specifying complex flag values with special characters
that work with any command interpreter. Additionally, each
*--flags-file* arg is replaced by its constituent flags. See
$ gcloud topic flags-file for more information.""")
FLATTEN_FLAG = Argument(
'--flatten',
metavar='KEY',
default=None,
type=arg_parsers.ArgList(),
category=COMMONLY_USED_FLAGS,
help="""\
Flatten _name_[] output resource slices in _KEY_ into separate records
for each item in each slice. Multiple keys and slices may be specified.
This also flattens keys for *--format* and *--filter*. For example,
*--flatten=abc.def* flattens *abc.def[].ghi* references to
*abc.def.ghi*. A resource record containing *abc.def[]* with N elements
will expand to N records in the flattened output. This flag interacts
with other flags that are applied in this order: *--flatten*,
*--sort-by*, *--filter*, *--limit*.""")
FORMAT_FLAG = Argument(
'--format',
default=None,
category=COMMONLY_USED_FLAGS,
help="""\
Set the format for printing command output resources. The default is a
command-specific human-friendly output format. The supported formats
are: `{0}`. For more details run $ gcloud topic formats.""".format(
'`, `'.join(resource_printer.SupportedFormats())))
LIST_COMMAND_FLAGS = 'LIST COMMAND'
ASYNC_FLAG = Argument(
'--async',
action='store_true',
dest='async_',
help="""\
Return immediately, without waiting for the operation in progress to
complete.""")
FILTER_FLAG = Argument(
'--filter',
metavar='EXPRESSION',
require_coverage_in_tests=False,
category=LIST_COMMAND_FLAGS,
help="""\
Apply a Boolean filter _EXPRESSION_ to each resource item to be listed.
If the expression evaluates `True`, then that item is listed. For more
details and examples of filter expressions, run $ gcloud topic filters. This
flag interacts with other flags that are applied in this order: *--flatten*,
*--sort-by*, *--filter*, *--limit*.""")
LIMIT_FLAG = Argument(
'--limit',
type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True),
require_coverage_in_tests=False,
category=LIST_COMMAND_FLAGS,
help="""\
Maximum number of resources to list. The default is *unlimited*.
This flag interacts with other flags that are applied in this order:
*--flatten*, *--sort-by*, *--filter*, *--limit*.
""")
PAGE_SIZE_FLAG = Argument(
'--page-size',
type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True),
require_coverage_in_tests=False,
category=LIST_COMMAND_FLAGS,
help="""\
Some services group resource list output into pages. This flag specifies
the maximum number of resources per page. The default is determined by the
service if it supports paging, otherwise it is *unlimited* (no paging).
Paging may be applied before or after *--filter* and *--limit* depending
on the service.
""")
SORT_BY_FLAG = Argument(
'--sort-by',
metavar='FIELD',
type=arg_parsers.ArgList(),
require_coverage_in_tests=False,
category=LIST_COMMAND_FLAGS,
help="""\
Comma-separated list of resource field key names to sort by. The
default order is ascending. Prefix a field with ``~'' for descending
order on that field. This flag interacts with other flags that are applied
in this order: *--flatten*, *--sort-by*, *--filter*, *--limit*.
""")
URI_FLAG = Argument(
'--uri',
action='store_true',
require_coverage_in_tests=False,
category=LIST_COMMAND_FLAGS,
help='Print a list of resource URIs instead of the default output.')
class _Common(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for Command and Group."""
category = None
_cli_generator = None
_is_hidden = False
_is_unicode_supported = False
_release_track = None
_valid_release_tracks = None
_notices = None
def __init__(self, is_group=False):
self.exit_code = 0
self.is_group = is_group
@staticmethod
def Args(parser):
"""Set up arguments for this command.
Args:
parser: An argparse.ArgumentParser.
"""
pass
@staticmethod
def _Flags(parser):
"""Adds subclass flags.
Args:
parser: An argparse.ArgumentParser object.
"""
pass
@classmethod
def IsHidden(cls):
return cls._is_hidden
@classmethod
def IsUnicodeSupported(cls):
if six.PY2:
return cls._is_unicode_supported
# We always support unicode on Python 3.
return True
@classmethod
def ReleaseTrack(cls):
return cls._release_track
@classmethod
def ValidReleaseTracks(cls):
return cls._valid_release_tracks
@classmethod
def GetTrackedAttribute(cls, obj, attribute):
"""Gets the attribute value from obj for tracks.
The values are checked in ReleaseTrack._ALL order.
Args:
obj: The object to extract attribute from.
attribute: The attribute name in object.
Returns:
The attribute value from obj for tracks.
"""
for track in ReleaseTrack._ALL: # pylint: disable=protected-access
if track not in cls._valid_release_tracks: # pylint: disable=unsupported-membership-test
continue
names = []
names.append(attribute + '_' + track.id)
if track.prefix:
names.append(attribute + '_' + track.prefix)
for name in names:
if hasattr(obj, name):
return getattr(obj, name)
return getattr(obj, attribute, None)
@classmethod
def Notices(cls):
return cls._notices
@classmethod
def AddNotice(cls, tag, msg, preserve_existing=False):
if not cls._notices:
cls._notices = {}
if tag in cls._notices and preserve_existing:
return
cls._notices[tag] = msg
@classmethod
def GetCLIGenerator(cls):
"""Get a generator function that can be used to execute a gcloud command.
Returns:
A bound generator function to execute a gcloud command.
"""
if cls._cli_generator:
return cls._cli_generator.Generate
return None
class Group(_Common):
"""Group is a base class for groups to implement."""
IS_COMMAND_GROUP = True
def __init__(self):
super(Group, self).__init__(is_group=True)
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
"""
pass
class Command(six.with_metaclass(abc.ABCMeta, _Common)):
"""Command is a base class for commands to implement.
Attributes:
_cli_do_not_use_directly: calliope.cli.CLI, The CLI object representing this
command line tool. This should *only* be accessed via commands that
absolutely *need* introspection of the entire CLI.
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
_uri_cache_enabled: bool, The URI cache enabled state.
"""
IS_COMMAND = True
def __init__(self, cli, context):
super(Command, self).__init__(is_group=False)
self._cli_do_not_use_directly = cli
self.context = context
self._uri_cache_enabled = False
@property
def _cli_power_users_only(self):
return self._cli_do_not_use_directly
def ExecuteCommandDoNotUse(self, args):
"""Execute a command using the given CLI.
Do not introduce new invocations of this method unless your command
*requires* it; any such new invocations must be approved by a team lead.
Args:
args: list of str, the args to Execute() via the CLI.
Returns:
pass-through of the return value from Execute()
"""
return self._cli_power_users_only.Execute(args, call_arg_complete=False)
@staticmethod
def _Flags(parser):
"""Sets the default output format.
Args:
parser: The argparse parser.
"""
parser.display_info.AddFormat('default')
@abc.abstractmethod
def Run(self, args):
"""Runs the command.
Args:
args: argparse.Namespace, An object that contains the values for the
arguments specified in the .Args() method.
Returns:
A resource object dispatched by display.Displayer().
"""
pass
def Epilog(self, resources_were_displayed):
"""Called after resources are displayed if the default format was used.
Args:
resources_were_displayed: True if resources were displayed.
"""
_ = resources_were_displayed
def GetReferencedKeyNames(self, args):
"""Returns the key names referenced by the filter and format expressions."""
return display.Displayer(self, args, None).GetReferencedKeyNames()
def GetUriFunc(self):
"""Returns a function that transforms a command resource item to a URI.
Returns:
func(resource) that transforms resource into a URI.
"""
return None
class TopicCommand(six.with_metaclass(abc.ABCMeta, Command)):
"""A command that displays its own help on execution."""
def Run(self, args):
self.ExecuteCommandDoNotUse(args.command_path[1:] +
['--document=style=topic'])
return None
class SilentCommand(six.with_metaclass(abc.ABCMeta, Command)):
"""A command that produces no output."""
@staticmethod
def _Flags(parser):
parser.display_info.AddFormat('none')
class DescribeCommand(six.with_metaclass(abc.ABCMeta, Command)):
"""A command that prints one resource in the 'default' format."""
class CacheCommand(six.with_metaclass(abc.ABCMeta, Command)):
"""A command that affects the resource URI cache."""
def __init__(self, *args, **kwargs):
super(CacheCommand, self).__init__(*args, **kwargs)
self._uri_cache_enabled = True
class ListCommand(six.with_metaclass(abc.ABCMeta, CacheCommand)):
"""A command that pretty-prints all resources."""
@staticmethod
def _Flags(parser):
"""Adds the default flags for all ListCommand commands.
Args:
parser: The argparse parser.
"""
FILTER_FLAG.AddToParser(parser)
LIMIT_FLAG.AddToParser(parser)
PAGE_SIZE_FLAG.AddToParser(parser)
SORT_BY_FLAG.AddToParser(parser)
URI_FLAG.AddToParser(parser)
parser.display_info.AddFormat('default')
def Epilog(self, resources_were_displayed):
"""Called after resources are displayed if the default format was used.
Args:
resources_were_displayed: True if resources were displayed.
"""
if not resources_were_displayed:
log.status.Print('Listed 0 items.')
class CreateCommand(CacheCommand, SilentCommand):
"""A command that creates resources."""
class DeleteCommand(CacheCommand, SilentCommand):
"""A command that deletes resources."""
class RestoreCommand(CacheCommand, SilentCommand):
"""A command that restores resources."""
class UpdateCommand(SilentCommand):
"""A command that updates resources."""
pass
def Hidden(cmd_class):
"""Decorator for hiding calliope commands and groups.
Decorate a subclass of base.Command or base.Group with this function, and the
decorated command or group will not show up in help text.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._is_hidden = True
return cmd_class
def UnicodeIsSupported(cmd_class):
"""Decorator for calliope commands and groups that support unicode.
Decorate a subclass of base.Command or base.Group with this function, and the
decorated command or group will not raise the argparse unicode command line
argument exception.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._is_unicode_supported = True
return cmd_class
def ReleaseTracks(*tracks):
"""Mark this class as the command implementation for the given release tracks.
Args:
*tracks: [ReleaseTrack], A list of release tracks that this is valid for.
Returns:
The decorated function.
"""
def ApplyReleaseTracks(cmd_class):
"""Wrapper function for the decorator."""
# pylint: disable=protected-access
cmd_class._valid_release_tracks = set(tracks)
return cmd_class
return ApplyReleaseTracks
def Deprecate(is_removed=True,
warning='This command is deprecated.',
error='This command has been removed.'):
"""Decorator that marks a Calliope command as deprecated.
Decorate a subclass of base.Command with this function and the
decorated command will be modified as follows:
- If is_removed is false, a warning will be logged when *command* is run,
otherwise an *exception* will be thrown containing error message
-Command help output will be modified to include warning/error message
depending on value of is_removed
- Command help text will automatically hidden from the reference documentation
(e.g. @base.Hidden) if is_removed is True
Args:
is_removed: boolean, True if the command should raise an error
when executed. If false, a warning is printed
warning: string, warning message
error: string, error message
Returns:
A modified version of the provided class.
"""
def DeprecateCommand(cmd_class):
"""Wrapper Function that creates actual decorated class.
Args:
cmd_class: base.Command or base.Group subclass to be decorated
Returns:
The decorated class.
"""
if is_removed:
msg = error
deprecation_tag = '{0}(REMOVED){0} '.format(MARKDOWN_BOLD)
else:
msg = warning
deprecation_tag = '{0}(DEPRECATED){0} '.format(MARKDOWN_BOLD)
cmd_class.AddNotice(deprecation_tag, msg)
def RunDecorator(run_func):
@wraps(run_func)
def WrappedRun(*args, **kw):
if is_removed:
raise DeprecationException(error)
log.warning(warning)
return run_func(*args, **kw)
return WrappedRun
if issubclass(cmd_class, Group):
cmd_class.Filter = RunDecorator(cmd_class.Filter)
else:
cmd_class.Run = RunDecorator(cmd_class.Run)
if is_removed:
return Hidden(cmd_class)
return cmd_class
return DeprecateCommand
def _ChoiceValueType(value):
"""Returns a function that ensures choice flag values match Cloud SDK Style.
Args:
value: string, string representing flag choice value parsed from command
line.
Returns:
A string value entirely in lower case, with words separated by
hyphens.
"""
return value.replace('_', '-').lower()
def ChoiceArgument(name_or_flag, choices, help_str=None, required=False,
action=None, metavar=None, dest=None, default=None,
hidden=False):
"""Returns Argument with a Cloud SDK style compliant set of choices.
Args:
name_or_flag: string, Either a name or a list of option strings,
e.g. foo or -f, --foo.
choices: container, A container (e.g. set, dict, list, tuple) of the
allowable values for the argument. Should consist of strings entirely in
lower case, with words separated by hyphens.
help_str: string, A brief description of what the argument does.
required: boolean, Whether or not the command-line option may be omitted.
action: string or argparse.Action, The basic type of argeparse.action
to be taken when this argument is encountered at the command line.
metavar: string, A name for the argument in usage messages.
dest: string, The name of the attribute to be added to the object returned
by parse_args().
default: string, The value produced if the argument is absent from the
command line.
hidden: boolean, Whether or not the command-line option is hidden.
Returns:
Argument object with choices, that can accept both lowercase and uppercase
user input with hyphens or undersores.
Raises:
TypeError: If choices are not an iterable container of string options.
ValueError: If provided choices are not Cloud SDK Style compliant.
"""
if not choices:
raise ValueError('Choices must not be empty.')
if (not isinstance(choices, collections.Iterable)
or isinstance(choices, six.string_types)):
raise TypeError(
'Choices must be an iterable container of options: [{}].'.format(
', '.join(choices)))
# Valid choices should be alphanumeric sequences followed by an optional
# period '.', separated by a single hyphen '-'.
choice_re = re.compile(r'^([a-z0-9]\.?-?)+[a-z0-9]$')
invalid_choices = [x for x in choices if not choice_re.match(x)]
if invalid_choices:
raise ValueError(
('Invalid choices [{}]. Choices must be entirely in lowercase with '
'words separated by hyphens(-)').format(', '.join(invalid_choices)))
return Argument(name_or_flag, choices=choices, required=required,
type=_ChoiceValueType, help=help_str, action=action,
metavar=metavar, dest=dest, default=default, hidden=hidden)
def DisableUserProjectQuota():
"""Disable the quota header if the user hasn't manually specified it."""
if not properties.VALUES.billing.quota_project.IsExplicitlySet():
properties.VALUES.billing.quota_project.Set(
properties.VALUES.billing.LEGACY)
def EnableUserProjectQuota():
"""Enable the quota header for current project."""
properties.VALUES.billing.quota_project.Set(
properties.VALUES.billing.CURRENT_PROJECT)
def LogCommand(prog, args):
"""Log (to debug) the command/arguments being run in a standard format.
`gcloud feedback` depends on this format.
Example format is:
Running [gcloud.example.command] with arguments: [--bar: "baz"]
Args:
prog: string, the dotted name of the command being run (ex.
"gcloud.foos.list")
args: argparse.namespace, the parsed arguments from the command line
"""
specified_args = sorted(six.iteritems(args.GetSpecifiedArgs()))
arg_string = ', '.join(['{}: "{}"'.format(k, v) for k, v in specified_args])
log.debug('Running [{}] with arguments: [{}]'.format(prog, arg_string))
| 30.208288 | 95 | 0.693513 |
70e5be6866fd0c184941bceb014c23519574e050 | 33,762 | py | Python | test/functional/rpc_fundrawtransaction.py | TriCron/shirecoin | 50ab7e5b7dc32350e4bcbe33ad728b3926212e5a | [
"MIT"
] | null | null | null | test/functional/rpc_fundrawtransaction.py | TriCron/shirecoin | 50ab7e5b7dc32350e4bcbe33ad728b3926212e5a | [
"MIT"
] | null | null | null | test/functional/rpc_fundrawtransaction.py | TriCron/shirecoin | 50ab7e5b7dc32350e4bcbe33ad728b3926212e5a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import ShirecoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(ShirecoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# This test isn't testing tx relay. Set whitelist on the peers for
# instant tx relay.
self.extra_args = [['-whitelist=noban@127.0.0.1']] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
def run_test(self):
self.log.info("Connect nodes, set fees, generate blocks, and sync")
self.min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(self.min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
self.fee_tolerance = 2 * self.min_relay_tx_fee / 1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
self.test_change_position()
self.test_simple()
self.test_simple_two_coins()
self.test_simple_two_outputs()
self.test_change()
self.test_no_change()
self.test_invalid_option()
self.test_invalid_change_address()
self.test_valid_change_address()
self.test_change_type()
self.test_coin_selection()
self.test_two_vin()
self.test_two_vin_two_vout()
self.test_invalid_input()
self.test_fee_p2pkh()
self.test_fee_p2pkh_multi_out()
self.test_fee_p2sh()
self.test_fee_4of5()
self.test_spend_2of2()
self.test_locked_wallet()
self.test_many_inputs_fee()
self.test_many_inputs_send()
self.test_op_return()
self.test_watchonly()
self.test_all_watched_funds()
self.test_option_feerate()
self.test_address_reuse()
self.test_option_subtract_fee_from_outputs()
self.test_subtract_fee_with_presets()
def test_change_position(self):
"""Ensure setting changePosition in fundraw with an exact match is handled properly."""
self.log.info("Test fundrawtxn changePosition option")
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
self.watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
self.watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, self.watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
self.watchonly_vout = find_vout_for_address(self.nodes[0], self.watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": self.watchonly_txid, "vout": self.watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), self.watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
def test_simple(self):
self.log.info("Test fundrawtxn")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test that we have enough inputs
def test_simple_two_coins(self):
self.log.info("Test fundrawtxn with 2 coins")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test if we have enough inputs
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_simple_two_outputs(self):
self.log.info("Test fundrawtxn with 2 outputs")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert len(dec_tx['vin']) > 0
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_change(self):
self.log.info("Test fundrawtxn with a vin > required amount")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
self.test_no_change_fee = fee # Use the same fee for the next tx
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_no_change(self):
self.log.info("Test fundrawtxn not having a change output")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = {self.nodes[0].getnewaddress(): Decimal(5.0) - self.test_no_change_fee - self.fee_tolerance}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_invalid_option(self):
self.log.info("Test fundrawtxn with an invalid option")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
def test_invalid_change_address(self):
self.log.info("Test fundrawtxn with an invalid change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid shirecoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
def test_valid_change_address(self):
self.log.info("Test fundrawtxn with a provided change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
def test_change_type(self):
self.log.info("Test fundrawtxn with a provided change type")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
def test_coin_selection(self):
self.log.info("Test fundrawtxn with a vin < required amount")
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
def test_two_vin(self):
self.log.info("Test fundrawtxn with 2 vins")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
def test_two_vin_two_vout(self):
self.log.info("Test fundrawtxn with 2 vins and 2 vouts")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
def test_invalid_input(self):
self.log.info("Test fundrawtxn with an invalid vin")
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
def test_fee_p2pkh(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn p2pkh fee")
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2pkh_multi_out(self):
"""Compare fee of a standard pubkeyhash transaction with multiple outputs."""
self.log.info("Test fundrawtxn p2pkh fee with multiple outputs")
inputs = []
outputs = {
self.nodes[1].getnewaddress():1.1,
self.nodes[1].getnewaddress():1.2,
self.nodes[1].getnewaddress():0.1,
self.nodes[1].getnewaddress():1.3,
self.nodes[1].getnewaddress():0.2,
self.nodes[1].getnewaddress():0.3,
}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2sh(self):
"""Compare fee of a 2-of-2 multisig p2sh transaction."""
# Create 2-of-2 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_4of5(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn fee with 4-of-5 addresses")
# Create 4-of-5 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(
4,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
addr3Obj['pubkey'],
addr4Obj['pubkey'],
addr5Obj['pubkey'],
]
)['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_spend_2of2(self):
"""Spend a 2-of-2 multisig transaction over fundraw."""
self.log.info("Test fundrawtxn spending 2-of-2 multisig")
# Create 2-of-2 addr.
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(
2,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
]
)['address']
# Send 1.2 SHIRE to msig addr.
self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.nodes[0].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[2].sendrawtransaction(signedTx['hex'])
self.nodes[2].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
def test_locked_wallet(self):
self.log.info("Test fundrawtxn with locked wallet")
self.nodes[1].encryptwallet("test")
# Drain the keypool.
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
# Refill the keypool.
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Now we need to unlock.
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
def test_many_inputs_fee(self):
"""Multiple (~19) inputs tx test | Compare fee."""
self.log.info("Test fundrawtxn fee with many inputs")
# Empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance * 19 #~19 inputs
def test_many_inputs_send(self):
"""Multiple (~19) inputs tx test | sign/send."""
self.log.info("Test fundrawtxn sign+send with many inputs")
# Again, empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
def test_op_return(self):
self.log.info("Test fundrawtxn with OP_RETURN and no vin")
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
def test_watchonly(self):
self.log.info("Test fundrawtxn using only watchonly")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], self.watchonly_txid)
assert "fee" in result.keys()
assert_greater_than(result["changepos"], -1)
def test_all_watched_funds(self):
self.log.info("Test fundrawtxn using entirety of watched funds")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching).
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert res_dec["vin"][0]["txid"] == self.watchonly_txid or res_dec["vin"][1]["txid"] == self.watchonly_txid
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], self.watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert not signedtx["complete"]
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert signedtx["complete"]
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
def test_option_feerate(self):
self.log.info("Test fundrawtxn feeRate option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses self.min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10 * self.min_relay_tx_fee})
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by -maxtxfee", self.nodes[3].fundrawtransaction, rawtx, {"feeRate": 1})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
def test_address_reuse(self):
"""Test no address reuse occurs."""
self.log.info("Test fundrawtxn does not reuse addresses")
rawtx = self.nodes[3].createrawtransaction(inputs=[], outputs={self.nodes[3].getnewaddress(): 1})
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert changeaddress != ""
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool.
assert changeaddress != nextaddr
def test_option_subtract_fee_from_outputs(self):
self.log.info("Test fundrawtxn subtractFeeFromOutputs option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee, "subtractFeeFromOutputs": [0]}),]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# Split the fee between outputs 0, 2, and 3, but not output 1.
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction.
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions.
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# Output 1 is the same in both transactions.
assert_equal(share[1], 0)
# The other 3 outputs are smaller as a result of subtractFeeFromOutputs.
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# Outputs 2 and 3 take the same share of the fee.
assert_equal(share[2], share[3])
# Output 0 takes at least as much share of the fee, and no more than 2
# satoshis more, than outputs 2 and 3.
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# The fee is the same in both transactions.
assert_equal(result[0]['fee'], result[1]['fee'])
# The total subtracted from the outputs is equal to the fee.
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
def test_subtract_fee_with_presets(self):
self.log.info("Test fundrawtxn subtract fee from outputs with preset inputs that are sufficient")
addr = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
rawtx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], [{self.nodes[0].getnewaddress(): 5}])
fundedtx = self.nodes[0].fundrawtransaction(rawtx, {'subtractFeeFromOutputs': [0]})
signedtx = self.nodes[0].signrawtransactionwithwallet(fundedtx['hex'])
self.nodes[0].sendrawtransaction(signedtx['hex'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.482213 | 165 | 0.635478 |
deffeefbe8e5dd13a925fcf61b60536c7482a3dc | 35 | py | Python | octodns-custom-providers/__init__.py | ganawaj/octodns-custom-provider | d695a9f805f77e2f4083ad9b10433a260f012e86 | [
"MIT"
] | 2 | 2021-06-07T16:11:20.000Z | 2021-08-10T22:35:16.000Z | octodns-custom-providers/__init__.py | ganawaj/octodns-custom-provider | d695a9f805f77e2f4083ad9b10433a260f012e86 | [
"MIT"
] | 1 | 2021-06-07T20:12:14.000Z | 2021-06-07T20:12:14.000Z | octodns-custom-providers/__init__.py | ganawaj/octodns-custom-provider | d695a9f805f77e2f4083ad9b10433a260f012e86 | [
"MIT"
] | 2 | 2021-06-07T17:20:32.000Z | 2022-03-19T00:33:05.000Z | 'Custom octodns providers/sources'
| 17.5 | 34 | 0.828571 |
7bb334bcb8f7af79fab8325875ce1964f424cd69 | 1,340 | py | Python | make_thumb.py | igoradmtg/simple_video_thumbnails | d13118dff05a6ea28c8c003ad642e7e7283981a4 | [
"MIT"
] | 1 | 2021-08-13T17:37:29.000Z | 2021-08-13T17:37:29.000Z | make_thumb.py | igoradmtg/simple_video_thumbnails | d13118dff05a6ea28c8c003ad642e7e7283981a4 | [
"MIT"
] | null | null | null | make_thumb.py | igoradmtg/simple_video_thumbnails | d13118dff05a6ea28c8c003ad642e7e7283981a4 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-import os
import os
import sys
from moviepy.editor import VideoFileClip
def main() :
if (len(sys.argv)<3) :
print("Error argument "+str(len(sys.argv)))
print("Use:")
print("make_thumb.py dir_name file_name_video [thumb_dir]")
print(" dir_name ")
print(" file_name_video ")
print(" thumb_dir ")
else :
#print(sys.argv[1])
#print(sys.argv[2])
#print(sys.argv[3])
video_path = os.path.join(sys.argv[1], sys.argv[2])
if (not os.path.isfile(video_path)) :
print("Not found file")
sys.exit(0)
if len(sys.argv)>=4:
image_path = os.path.join(sys.argv[1], sys.argv[3], sys.argv[2] + ".png")
dir_thumb = os.path.join(sys.argv[1], sys.argv[3])
if (not os.path.isdir(dir_thumb)) :
os.mkdir(dir_thumb)
else:
image_path = os.path.join(sys.argv[1], sys.argv[2] + ".png")
if os.path.isfile(image_path):
print("File exists:",image_path)
return False
print("Video: " + video_path)
print("Image: " + image_path)
clip1 = VideoFileClip(video_path)
clip1.save_frame(image_path, t = clip1.duration / 2)
if __name__ == "__main__":
main() | 34.358974 | 85 | 0.552985 |
1e8f59ca6d9471ee3fa6e845170c1b231b02e869 | 7,955 | py | Python | datalad/customremotes/ria_utils.py | DisasterMo/datalad | dfa956984f6e737e11518f4a9b230b6a6eadc97a | [
"MIT"
] | 298 | 2015-01-25T17:36:29.000Z | 2022-03-20T03:38:47.000Z | datalad/customremotes/ria_utils.py | adswa/datalad | c86643fe2e974da8d7403e9799997efcfee97384 | [
"MIT"
] | 6,387 | 2015-01-02T18:15:01.000Z | 2022-03-31T20:58:58.000Z | datalad/customremotes/ria_utils.py | adswa/datalad | c86643fe2e974da8d7403e9799997efcfee97384 | [
"MIT"
] | 109 | 2015-01-25T17:49:40.000Z | 2022-03-06T06:54:54.000Z | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Helper for RIA stores
"""
import logging
from pathlib import Path
lgr = logging.getLogger('datalad.customremotes.ria_utils')
class UnknownLayoutVersion(Exception):
pass
# TODO: Make versions a tuple of (label, description)?
# Object tree versions we introduced so far. This is about the layout within a
# dataset in a RIA store
known_versions_objt = ['1', '2']
# Dataset tree versions we introduced so far. This is about the layout of
# datasets in a RIA store
known_versions_dst = ['1']
# TODO: This is wrong and should consider both versions (store+dataset)
def get_layout_locations(version, base_path, dsid):
"""Return dataset-related path in a RIA store
Parameters
----------
version : int
Layout version of the store.
base_path : Path
Base path of the store.
dsid : str
Dataset ID
Returns
-------
Path, Path, Path
The location of the bare dataset repository in the store,
the directory with archive files for the dataset, and the
annex object directory are return in that order.
"""
if version == 1:
dsgit_dir = base_path / dsid[:3] / dsid[3:]
archive_dir = dsgit_dir / 'archives'
dsobj_dir = dsgit_dir / 'annex' / 'objects'
return dsgit_dir, archive_dir, dsobj_dir
else:
raise ValueError("Unknown layout version: {}. Supported: {}"
"".format(version, known_versions_dst))
def verify_ria_url(url, cfg):
"""Verify and decode ria url
Expects a ria-URL pointing to a RIA store, applies rewrites and tries to
decode potential host and base path for the store from it. Additionally
raises if `url` is considered invalid.
ria+ssh://somehost:/path/to/store
ria+file:///path/to/store
Parameters
----------
url : str
URL to verify an decode.
cfg : dict-like
Configuration settings for rewrite_url()
Raises
------
ValueError
Returns
-------
tuple
(host, base-path, rewritten url)
`host` is not just a hostname, but is a stub URL that may also contain
username, password, and port, if specified in a given URL.
"""
from datalad.config import rewrite_url
from datalad.support.network import URL
if not url:
raise ValueError("Got no URL")
url = rewrite_url(cfg, url)
url_ri = URL(url)
if not url_ri.scheme.startswith('ria+'):
raise ValueError("Missing ria+ prefix in final URL: %s" % url)
if url_ri.fragment:
raise ValueError(
"Unexpected fragment in RIA-store URL: %s" % url_ri.fragment)
protocol = url_ri.scheme[4:]
if protocol not in ['ssh', 'file', 'http', 'https']:
raise ValueError("Unsupported protocol: %s. "
"Supported: ssh, file, http(s)" %
protocol)
host = '{proto}://{user}{pdlm}{passwd}{udlm}{host}{portdlm}{port}'.format(
proto=protocol,
user=url_ri.username or '',
pdlm=':' if url_ri.password else '',
passwd=url_ri.password or '',
udlm='@' if url_ri.username else '',
host=url_ri.hostname or '',
portdlm=':' if url_ri.port else '',
port=url_ri.port or '',
)
return host if protocol != 'file' else None, \
url_ri.path if url_ri.path else '/', \
url
def _ensure_version(io, base_path, version):
"""Check a store or dataset version and make sure it is declared
Parameters
----------
io: SSHRemoteIO or LocalIO
base_path: Path
root path of a store or dataset
version: str
target layout version of the store (dataset tree)
"""
version_file = base_path / 'ria-layout-version'
if io.exists(version_file):
existing_version = io.read_file(version_file).split('|')[0].strip()
if existing_version != version.split('|')[0]:
# We have an already existing location with a conflicting version on
# record.
# Note, that a config flag after pipe symbol is fine.
raise ValueError("Conflicting version found at target: {}"
.format(existing_version))
else:
# already exists, recorded version fits - nothing to do
return
# Note, that the following does create the base-path dir as well, since
# mkdir has parents=True:
io.mkdir(base_path)
io.write_file(version_file, version)
def create_store(io, base_path, version):
"""Helper to create a RIA store
Note, that this is meant as an internal helper and part of intermediate
RF'ing. Ultimately should lead to dedicated command or option for
create-sibling-ria.
Parameters
----------
io: SSHRemoteIO or LocalIO
Respective execution instance.
Note: To be replaced by proper command abstraction
base_path: Path
root path of the store
version: str
layout version of the store (dataset tree)
"""
# At store level the only version we know as of now is 1.
if version not in known_versions_dst:
raise UnknownLayoutVersion("RIA store layout version unknown: {}."
"Supported versions: {}"
.format(version, known_versions_dst))
_ensure_version(io, base_path, version)
error_logs = base_path / 'error_logs'
io.mkdir(error_logs)
def create_ds_in_store(io, base_path, dsid, obj_version, store_version, alias=None):
"""Helper to create a dataset in a RIA store
Note, that this is meant as an internal helper and part of intermediate
RF'ing. Ultimately should lead to a version option for create-sibling-ria
in conjunction with a store creation command/option.
Parameters
----------
io: SSHRemoteIO or LocalIO
Respective execution instance.
Note: To be replaced by proper command abstraction
base_path: Path
root path of the store
dsid: str
dataset id
store_version: str
layout version of the store (dataset tree)
obj_version: str
layout version of the dataset itself (object tree)
alias: str, optional
alias for the dataset in the store
"""
# TODO: Note for RF'ing, that this is about setting up a valid target
# for the special remote not a replacement for create-sibling-ria.
# There's currently no git (bare) repo created.
try:
# TODO: This is currently store layout version!
# Too entangled by current get_layout_locations.
dsgit_dir, archive_dir, dsobj_dir = \
get_layout_locations(int(store_version), base_path, dsid)
except ValueError as e:
raise UnknownLayoutVersion(str(e))
if obj_version not in known_versions_objt:
raise UnknownLayoutVersion("Dataset layout version unknown: {}. "
"Supported: {}"
.format(obj_version, known_versions_objt))
_ensure_version(io, dsgit_dir, obj_version)
io.mkdir(archive_dir)
io.mkdir(dsobj_dir)
if alias:
alias_dir = base_path / "alias"
io.mkdir(alias_dir)
try:
# go for a relative path to keep the alias links valid
# when moving a store
io.symlink(
Path('..') / dsgit_dir.relative_to(base_path),
alias_dir / alias)
except FileExistsError:
lgr.warning("Alias %r already exists in the RIA store, not adding an "
"alias.", alias)
| 33.284519 | 87 | 0.618102 |
c2609fd1e1bc8278926e8a0a774a450b9a08fd70 | 7,496 | py | Python | test/test_plugins.py | ttkltll/bottle | c8179b28d93b2875a31866c6b84a9b5b59c0c8b4 | [
"MIT"
] | null | null | null | test/test_plugins.py | ttkltll/bottle | c8179b28d93b2875a31866c6b84a9b5b59c0c8b4 | [
"MIT"
] | null | null | null | test/test_plugins.py | ttkltll/bottle | c8179b28d93b2875a31866c6b84a9b5b59c0c8b4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from . import tools
from bottle import HTTPResponse, HTTPError
class MyPlugin(object):
def __init__(self):
self.app = None
self.add_args = {}
self.add_content = ''
def setup(self, app):
self.app = app
def apply(self, func, config):
def wrapper(*a, **ka):
ka.update(self.add_args)
self.lastcall = func, a, ka
return ''.join(func(*a, **ka)) + self.add_content
return wrapper
def my_decorator(func):
def wrapper(*a, **ka):
return list(func(*a, **ka))[-1]
class TestPluginManagement(tools.ServerTestBase):
def verify_installed(self, plugin, otype, **config):
self.assertEqual(type(plugin), otype)
self.assertEqual(plugin.config, config)
self.assertEqual(plugin.app, self.app)
self.assertTrue(plugin in self.app.plugins)
def test_install_plugin(self):
plugin = MyPlugin()
installed = self.app.install(plugin)
self.assertEqual(plugin, installed)
self.assertTrue(plugin in self.app.plugins)
def test_install_decorator(self):
installed = self.app.install(my_decorator)
self.assertEqual(my_decorator, installed)
self.assertTrue(my_decorator in self.app.plugins)
def test_install_non_plugin(self):
self.assertRaises(TypeError, self.app.install, 'I am not a plugin')
def test_uninstall_by_instance(self):
plugin = self.app.install(MyPlugin())
plugin2 = self.app.install(MyPlugin())
self.app.uninstall(plugin)
self.assertTrue(plugin not in self.app.plugins)
self.assertTrue(plugin2 in self.app.plugins)
def test_uninstall_by_type(self):
plugin = self.app.install(MyPlugin())
plugin2 = self.app.install(MyPlugin())
self.app.uninstall(MyPlugin)
self.assertTrue(plugin not in self.app.plugins)
self.assertTrue(plugin2 not in self.app.plugins)
def test_uninstall_by_name(self):
plugin = self.app.install(MyPlugin())
plugin2 = self.app.install(MyPlugin())
plugin.name = 'myplugin'
self.app.uninstall('myplugin')
self.assertTrue(plugin not in self.app.plugins)
self.assertTrue(plugin2 in self.app.plugins)
def test_uninstall_all(self):
plugin = self.app.install(MyPlugin())
plugin2 = self.app.install(MyPlugin())
self.app.uninstall(True)
self.assertFalse(self.app.plugins)
def test_route_plugin(self):
plugin = MyPlugin()
plugin.add_content = ';foo'
@self.app.route('/a')
@self.app.route('/b', apply=[plugin])
def a(): return 'plugin'
self.assertBody('plugin', '/a')
self.assertBody('plugin;foo', '/b')
def test_plugin_oder(self):
self.app.install(MyPlugin()).add_content = ';global-1'
self.app.install(MyPlugin()).add_content = ';global-2'
l1 = MyPlugin()
l1.add_content = ';local-1'
l2 = MyPlugin()
l2.add_content = ';local-2'
@self.app.route('/a')
@self.app.route('/b', apply=[l1, l2])
def a(): return 'plugin'
self.assertBody('plugin;global-2;global-1', '/a')
self.assertBody('plugin;local-2;local-1;global-2;global-1', '/b')
def test_skip_by_instance(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
g2 = self.app.install(MyPlugin())
g2.add_content = ';global-2'
l1 = MyPlugin()
l1.add_content = ';local-1'
l2 = MyPlugin()
l2.add_content = ';local-2'
@self.app.route('/a', skip=[g2, l2])
@self.app.route('/b', apply=[l1, l2], skip=[g2, l2])
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin;local-1;global-1', '/b')
def test_skip_by_class(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
@self.app.route('/a')
@self.app.route('/b', skip=[MyPlugin])
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin', '/b')
def test_skip_by_name(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
g1.name = 'test'
@self.app.route('/a')
@self.app.route('/b', skip=['test'])
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin', '/b')
def test_skip_all(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
@self.app.route('/a')
@self.app.route('/b', skip=[True])
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin', '/b')
def test_skip_nonlist(self):
g1 = self.app.install(MyPlugin())
g1.add_content = ';global-1'
@self.app.route('/a')
@self.app.route('/b', skip=g1)
def a(): return 'plugin'
self.assertBody('plugin;global-1', '/a')
self.assertBody('plugin', '/b')
def test_json_plugin_catches_httpresponse(self):
@self.app.get('/return')
def _():
return HTTPResponse({'test': 'ko'}, 402)
@self.app.get('/raise')
def _():
raise HTTPResponse({'test': 'ko2'}, 402)
self.assertBody(b'{"test": "ko"}', '/return')
self.assertBody(b'{"test": "ko2"}', '/raise')
class TestPluginAPI(tools.ServerTestBase):
def setUp(self):
super(TestPluginAPI, self).setUp()
@self.app.route('/', test='plugin.cfg')
def test(**args):
return ', '.join('%s:%s' % (k,v) for k,v in args.items())
def test_callable(self):
def plugin(func):
def wrapper(*a, **ka):
return func(test='me', *a, **ka) + '; tail'
return wrapper
self.app.install(plugin)
self.assertBody('test:me; tail', '/')
def test_apply(self):
class Plugin(object):
def apply(self, func, route):
def wrapper(*a, **ka):
return func(test=route.config['test'], *a, **ka) + '; tail'
return wrapper
def __call__(self, func):
raise AssertionError("Plugins must not be called "\
"if they implement 'apply'")
self.app.install(Plugin())
self.assertBody('test:plugin.cfg; tail', '/')
def test_instance_method_wrapper(self):
class Plugin(object):
api=2
def apply(self, callback, route):
return self.b
def b(self): return "Hello"
self.app.install(Plugin())
self.assertBody('Hello', '/')
def test_setup(self):
class Plugin(object):
def __call__(self, func): return func
def setup(self, app): self.app = app
plugin = self.app.install(Plugin())
self.assertEqual(getattr(plugin, 'app', None), self.app)
def test_close(self):
class Plugin(object):
def __call__(self, func): return func
def close(self): self.closed = True
plugin = self.app.install(Plugin())
plugin2 = self.app.install(Plugin())
self.app.uninstall(plugin)
self.assertTrue(getattr(plugin, 'closed', False))
self.app.close()
self.assertTrue(getattr(plugin2, 'closed', False))
| 33.765766 | 79 | 0.576708 |
1cc216e969fd8fa20a9a68f99e29f36418f26d38 | 174 | py | Python | pyboi/base.py | tomis007/pyboi | cff297d60593812d6885854f77c11815b181677a | [
"MIT"
] | 2 | 2017-02-26T16:54:37.000Z | 2017-06-17T14:50:45.000Z | pyboi/base.py | tomis007/pyboi | cff297d60593812d6885854f77c11815b181677a | [
"MIT"
] | null | null | null | pyboi/base.py | tomis007/pyboi | cff297d60593812d6885854f77c11815b181677a | [
"MIT"
] | null | null | null | from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
'''this is so all the database classes share the same base/ allow the database creation'''
| 34.8 | 90 | 0.798851 |
71efa12ed925e27c09d41ccc336bbceb79e61789 | 432 | py | Python | biosppy/__init__.py | rickyHong/BioSPPy-repl | 8b7b04c5b48ce434e81ab8a9fe40fb7ffb8f25c1 | [
"BSD-3-Clause"
] | 1 | 2018-04-29T16:27:57.000Z | 2018-04-29T16:27:57.000Z | biosppy/__init__.py | rickyHong/BioSPPy-repl | 8b7b04c5b48ce434e81ab8a9fe40fb7ffb8f25c1 | [
"BSD-3-Clause"
] | null | null | null | biosppy/__init__.py | rickyHong/BioSPPy-repl | 8b7b04c5b48ce434e81ab8a9fe40fb7ffb8f25c1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
biosppy
-------
A toolbox for biosignal processing written in Python.
:copyright: (c) 2015-2017 by Instituto de Telecomunicacoes
:license: BSD 3-clause, see LICENSE for more details.
"""
# compat
from __future__ import absolute_import, division, print_function
# get version
from .version import version as __version__
# allow lazy loading
from .signals import bvp, ecg, eda, eeg, emg, resp, tools
| 21.6 | 64 | 0.733796 |
699a5c534a3aa5fac43174ebb869d03fa49a82a0 | 43,294 | py | Python | zerver/worker/queue_processors.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 17,004 | 2015-09-25T18:27:24.000Z | 2022-03-31T22:02:32.000Z | zerver/worker/queue_processors.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 20,344 | 2015-09-25T19:02:42.000Z | 2022-03-31T23:54:40.000Z | zerver/worker/queue_processors.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 7,271 | 2015-09-25T18:48:39.000Z | 2022-03-31T21:06:11.000Z | # Documented in https://zulip.readthedocs.io/en/latest/subsystems/queuing.html
import base64
import copy
import datetime
import email
import email.policy
import functools
import logging
import os
import signal
import socket
import tempfile
import time
import urllib
from abc import ABC, abstractmethod
from collections import deque
from email.message import EmailMessage
from functools import wraps
from threading import Lock, Timer
from types import FrameType
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
MutableSequence,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
)
import orjson
import sentry_sdk
from django.conf import settings
from django.core.mail.backends.smtp import EmailBackend
from django.db import connection, transaction
from django.db.models import F
from django.db.utils import IntegrityError
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import override as override_language
from sentry_sdk import add_breadcrumb, configure_scope
from zulip_bots.lib import extract_query_without_mention
from zerver.context_processors import common_context
from zerver.lib.actions import (
do_mark_stream_messages_as_read,
do_send_confirmation_email,
do_update_embedded_data,
do_update_user_activity,
do_update_user_activity_interval,
do_update_user_presence,
internal_send_private_message,
notify_realm_export,
render_incoming_message,
)
from zerver.lib.bot_lib import EmbeddedBotHandler, EmbeddedBotQuitException, get_bot_handler
from zerver.lib.context_managers import lockfile
from zerver.lib.db import reset_queries
from zerver.lib.digest import bulk_handle_digest_email
from zerver.lib.email_mirror import decode_stream_email_address, is_missed_message_address
from zerver.lib.email_mirror import process_message as mirror_email
from zerver.lib.email_mirror import rate_limit_mirror_by_realm
from zerver.lib.email_notifications import handle_missedmessage_emails
from zerver.lib.error_notify import do_report_error
from zerver.lib.exceptions import RateLimited
from zerver.lib.export import export_realm_wrapper
from zerver.lib.outgoing_webhook import do_rest_call, get_outgoing_webhook_service_handler
from zerver.lib.push_notifications import (
clear_push_device_tokens,
handle_push_notification,
handle_remove_push_notification,
initialize_push_notifications,
)
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import SimpleQueueClient, retry_event
from zerver.lib.remote_server import PushNotificationBouncerRetryLaterError
from zerver.lib.send_email import (
EmailNotDeliveredException,
FromAddress,
handle_send_email_format_changes,
initialize_connection,
send_email,
send_future_email,
)
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.url_preview import preview as url_preview
from zerver.models import (
Message,
PreregistrationUser,
Realm,
RealmAuditLog,
ScheduledMessageNotificationEmail,
UserMessage,
UserProfile,
filter_to_valid_prereg_users,
flush_per_request_caches,
get_bot_services,
get_client,
get_system_bot,
get_user_profile_by_id,
)
logger = logging.getLogger(__name__)
class WorkerTimeoutException(Exception):
def __init__(self, queue_name: str, limit: int, event_count: int) -> None:
self.queue_name = queue_name
self.limit = limit
self.event_count = event_count
def __str__(self) -> str:
return f"Timed out in {self.queue_name} after {self.limit * self.event_count} seconds processing {self.event_count} events"
class InterruptConsumeException(Exception):
"""
This exception is to be thrown inside event consume function
if the intention is to simply interrupt the processing
of the current event and normally continue the work of the queue.
"""
pass
class WorkerDeclarationException(Exception):
pass
ConcreteQueueWorker = TypeVar("ConcreteQueueWorker", bound="QueueProcessingWorker")
def assign_queue(
queue_name: str,
enabled: bool = True,
is_test_queue: bool = False,
) -> Callable[[Type[ConcreteQueueWorker]], Type[ConcreteQueueWorker]]:
def decorate(clazz: Type[ConcreteQueueWorker]) -> Type[ConcreteQueueWorker]:
clazz.queue_name = queue_name
if enabled:
register_worker(queue_name, clazz, is_test_queue)
return clazz
return decorate
worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
test_queues: Set[str] = set()
def register_worker(
queue_name: str, clazz: Type["QueueProcessingWorker"], is_test_queue: bool = False
) -> None:
worker_classes[queue_name] = clazz
if is_test_queue:
test_queues.add(queue_name)
def get_worker(queue_name: str) -> "QueueProcessingWorker":
return worker_classes[queue_name]()
def get_active_worker_queues(only_test_queues: bool = False) -> List[str]:
"""Returns all (either test, or real) worker queues."""
return [
queue_name
for queue_name in worker_classes.keys()
if bool(queue_name in test_queues) == only_test_queues
]
def check_and_send_restart_signal() -> None:
try:
if not connection.is_usable():
logging.warning("*** Sending self SIGUSR1 to trigger a restart.")
os.kill(os.getpid(), signal.SIGUSR1)
except Exception:
pass
# If you change the function on which this decorator is used be careful that the new
# function doesn't delete the "failed_tries" attribute of "data" which is needed for
# "retry_event" to work correctly; see EmailSendingWorker for an example with deepcopy.
def retry_send_email_failures(
func: Callable[[ConcreteQueueWorker, Dict[str, Any]], None],
) -> Callable[[ConcreteQueueWorker, Dict[str, Any]], None]:
@wraps(func)
def wrapper(worker: ConcreteQueueWorker, data: Dict[str, Any]) -> None:
try:
func(worker, data)
except (
socket.gaierror,
socket.timeout,
EmailNotDeliveredException,
) as e:
error_class_name = e.__class__.__name__
def on_failure(event: Dict[str, Any]) -> None:
logging.exception(
"Event %r failed due to exception %s", event, error_class_name, stack_info=True
)
retry_event(worker.queue_name, data, on_failure)
return wrapper
class QueueProcessingWorker(ABC):
queue_name: str
MAX_CONSUME_SECONDS: Optional[int] = 30
ENABLE_TIMEOUTS = False
CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM = 50
MAX_SECONDS_BEFORE_UPDATE_STATS = 30
def __init__(self) -> None:
self.q: Optional[SimpleQueueClient] = None
if not hasattr(self, "queue_name"):
raise WorkerDeclarationException("Queue worker declared without queue_name")
self.initialize_statistics()
def initialize_statistics(self) -> None:
self.queue_last_emptied_timestamp = time.time()
self.consumed_since_last_emptied = 0
self.recent_consume_times: MutableSequence[Tuple[int, float]] = deque(maxlen=50)
self.consume_iteration_counter = 0
self.idle = True
self.last_statistics_update_time = 0.0
self.update_statistics(0)
def update_statistics(self, remaining_local_queue_size: int) -> None:
total_seconds = sum(seconds for _, seconds in self.recent_consume_times)
total_events = sum(events_number for events_number, _ in self.recent_consume_times)
if total_events == 0:
recent_average_consume_time = None
else:
recent_average_consume_time = total_seconds / total_events
stats_dict = dict(
update_time=time.time(),
recent_average_consume_time=recent_average_consume_time,
current_queue_size=remaining_local_queue_size,
queue_last_emptied_timestamp=self.queue_last_emptied_timestamp,
consumed_since_last_emptied=self.consumed_since_last_emptied,
)
os.makedirs(settings.QUEUE_STATS_DIR, exist_ok=True)
fname = f"{self.queue_name}.stats"
fn = os.path.join(settings.QUEUE_STATS_DIR, fname)
with lockfile(fn + ".lock"):
tmp_fn = fn + ".tmp"
with open(tmp_fn, "wb") as f:
f.write(
orjson.dumps(stats_dict, option=orjson.OPT_APPEND_NEWLINE | orjson.OPT_INDENT_2)
)
os.rename(tmp_fn, fn)
self.last_statistics_update_time = time.time()
def get_remaining_local_queue_size(self) -> int:
if self.q is not None:
return self.q.local_queue_size()
else:
# This is a special case that will happen if we're operating without
# using RabbitMQ (e.g. in tests). In that case there's no queuing to speak of
# and the only reasonable size to return is 0.
return 0
@abstractmethod
def consume(self, data: Dict[str, Any]) -> None:
pass
def do_consume(
self, consume_func: Callable[[List[Dict[str, Any]]], None], events: List[Dict[str, Any]]
) -> None:
consume_time_seconds: Optional[float] = None
with configure_scope() as scope:
scope.clear_breadcrumbs()
add_breadcrumb(
type="debug",
category="queue_processor",
message=f"Consuming {self.queue_name}",
data={"events": events, "local_queue_size": self.get_remaining_local_queue_size()},
)
try:
if self.idle:
# We're reactivating after having gone idle due to emptying the queue.
# We should update the stats file to keep it fresh and to make it clear
# that the queue started processing, in case the event we're about to process
# makes us freeze.
self.idle = False
self.update_statistics(self.get_remaining_local_queue_size())
time_start = time.time()
if self.MAX_CONSUME_SECONDS and self.ENABLE_TIMEOUTS:
try:
signal.signal(
signal.SIGALRM,
functools.partial(self.timer_expired, self.MAX_CONSUME_SECONDS, events),
)
try:
signal.alarm(self.MAX_CONSUME_SECONDS * len(events))
consume_func(events)
finally:
signal.alarm(0)
finally:
signal.signal(signal.SIGALRM, signal.SIG_DFL)
else:
consume_func(events)
consume_time_seconds = time.time() - time_start
self.consumed_since_last_emptied += len(events)
except Exception as e:
self._handle_consume_exception(events, e)
finally:
flush_per_request_caches()
reset_queries()
if consume_time_seconds is not None:
self.recent_consume_times.append((len(events), consume_time_seconds))
remaining_local_queue_size = self.get_remaining_local_queue_size()
if remaining_local_queue_size == 0:
self.queue_last_emptied_timestamp = time.time()
self.consumed_since_last_emptied = 0
# We've cleared all the events from the queue, so we don't
# need to worry about the small overhead of doing a disk write.
# We take advantage of this to update the stats file to keep it fresh,
# especially since the queue might go idle until new events come in.
self.update_statistics(0)
self.idle = True
return
self.consume_iteration_counter += 1
if (
self.consume_iteration_counter >= self.CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM
or time.time() - self.last_statistics_update_time
>= self.MAX_SECONDS_BEFORE_UPDATE_STATS
):
self.consume_iteration_counter = 0
self.update_statistics(remaining_local_queue_size)
def consume_single_event(self, event: Dict[str, Any]) -> None:
consume_func = lambda events: self.consume(events[0])
self.do_consume(consume_func, [event])
def timer_expired(
self, limit: int, events: List[Dict[str, Any]], signal: int, frame: FrameType
) -> None:
raise WorkerTimeoutException(self.queue_name, limit, len(events))
def _handle_consume_exception(self, events: List[Dict[str, Any]], exception: Exception) -> None:
if isinstance(exception, InterruptConsumeException):
# The exception signals that no further error handling
# is needed and the worker can proceed.
return
with configure_scope() as scope:
scope.set_context(
"events",
{
"data": events,
"queue_name": self.queue_name,
},
)
if isinstance(exception, WorkerTimeoutException):
with sentry_sdk.push_scope() as scope:
scope.fingerprint = ["worker-timeout", self.queue_name]
logging.exception(exception, stack_info=True)
else:
logging.exception(
"Problem handling data on queue %s", self.queue_name, stack_info=True
)
if not os.path.exists(settings.QUEUE_ERROR_DIR):
os.mkdir(settings.QUEUE_ERROR_DIR) # nocoverage
# Use 'mark_sanitized' to prevent Pysa from detecting this false positive
# flow. 'queue_name' is always a constant string.
fname = mark_sanitized(f"{self.queue_name}.errors")
fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
line = f"{time.asctime()}\t{orjson.dumps(events).decode()}\n"
lock_fn = fn + ".lock"
with lockfile(lock_fn):
with open(fn, "a") as f:
f.write(line)
check_and_send_restart_signal()
def setup(self) -> None:
self.q = SimpleQueueClient()
def start(self) -> None:
assert self.q is not None
self.initialize_statistics()
self.q.start_json_consumer(
self.queue_name,
lambda events: self.consume_single_event(events[0]),
)
def stop(self) -> None: # nocoverage
assert self.q is not None
self.q.stop_consuming()
class LoopQueueProcessingWorker(QueueProcessingWorker):
sleep_delay = 1
batch_size = 100
def start(self) -> None: # nocoverage
assert self.q is not None
self.initialize_statistics()
self.q.start_json_consumer(
self.queue_name,
lambda events: self.do_consume(self.consume_batch, events),
batch_size=self.batch_size,
timeout=self.sleep_delay,
)
@abstractmethod
def consume_batch(self, events: List[Dict[str, Any]]) -> None:
pass
def consume(self, event: Dict[str, Any]) -> None:
"""In LoopQueueProcessingWorker, consume is used just for automated tests"""
self.consume_batch([event])
@assign_queue("invites")
class ConfirmationEmailWorker(QueueProcessingWorker):
def consume(self, data: Mapping[str, Any]) -> None:
invite_expires_in_days = data["invite_expires_in_days"]
invitee = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(id=data["prereg_id"]), invite_expires_in_days
).first()
if invitee is None:
# The invitation could have been revoked
return
referrer = get_user_profile_by_id(data["referrer_id"])
logger.info(
"Sending invitation for realm %s to %s", referrer.realm.string_id, invitee.email
)
if "email_language" in data:
email_language = data["email_language"]
else:
email_language = referrer.realm.default_language
activate_url = do_send_confirmation_email(
invitee, referrer, email_language, invite_expires_in_days
)
# queue invitation reminder
if invite_expires_in_days >= 4:
context = common_context(referrer)
context.update(
activate_url=activate_url,
referrer_name=referrer.full_name,
referrer_email=referrer.delivery_email,
referrer_realm_name=referrer.realm.name,
)
send_future_email(
"zerver/emails/invitation_reminder",
referrer.realm,
to_emails=[invitee.email],
from_address=FromAddress.tokenized_no_reply_placeholder,
language=email_language,
context=context,
delay=datetime.timedelta(days=invite_expires_in_days - 2),
)
@assign_queue("user_activity")
class UserActivityWorker(LoopQueueProcessingWorker):
"""The UserActivity queue is perhaps our highest-traffic queue, and
requires some care to ensure it performs adequately.
We use a LoopQueueProcessingWorker as a performance optimization
for managing the queue. The structure of UserActivity records is
such that they are easily deduplicated before being sent to the
database; we take advantage of that to make this queue highly
effective at dealing with a backlog containing many similar
events. Such a backlog happen in a few ways:
* In abuse/DoS situations, if a client is sending huge numbers of
similar requests to the server.
* If the queue ends up with several minutes of backlog e.g. due to
downtime of the queue processor, many clients will have several
common events from doing an action multiple times.
"""
client_id_map: Dict[str, int] = {}
def start(self) -> None:
# For our unit tests to make sense, we need to clear this on startup.
self.client_id_map = {}
super().start()
def consume_batch(self, user_activity_events: List[Dict[str, Any]]) -> None:
uncommitted_events: Dict[Tuple[int, int, str], Tuple[int, float]] = {}
# First, we drain the queue of all user_activity events and
# deduplicate them for insertion into the database.
for event in user_activity_events:
user_profile_id = event["user_profile_id"]
if "client_id" not in event:
# This is for compatibility with older events still stuck in the queue,
# that used the client name in event["client"] instead of having
# event["client_id"] directly.
#
# TODO/compatibility: We can delete this once it is no
# longer possible to directly upgrade from 2.1 to main.
if event["client"] not in self.client_id_map:
client = get_client(event["client"])
self.client_id_map[event["client"]] = client.id
client_id = self.client_id_map[event["client"]]
else:
client_id = event["client_id"]
key_tuple = (user_profile_id, client_id, event["query"])
if key_tuple not in uncommitted_events:
uncommitted_events[key_tuple] = (1, event["time"])
else:
count, time = uncommitted_events[key_tuple]
uncommitted_events[key_tuple] = (count + 1, max(time, event["time"]))
# Then we insert the updates into the database.
#
# TODO: Doing these updates in sequence individually is likely
# inefficient; the idealized version would do some sort of
# bulk insert_or_update query.
for key_tuple in uncommitted_events:
(user_profile_id, client_id, query) = key_tuple
count, time = uncommitted_events[key_tuple]
log_time = timestamp_to_datetime(time)
do_update_user_activity(user_profile_id, client_id, query, count, log_time)
@assign_queue("user_activity_interval")
class UserActivityIntervalWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
user_profile = get_user_profile_by_id(event["user_profile_id"])
log_time = timestamp_to_datetime(event["time"])
do_update_user_activity_interval(user_profile, log_time)
@assign_queue("user_presence")
class UserPresenceWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
logging.debug("Received presence event: %s", event)
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
status = event["status"]
do_update_user_presence(user_profile, client, log_time, status)
@assign_queue("missedmessage_emails")
class MissedMessageWorker(QueueProcessingWorker):
# Aggregate all messages received over the last BATCH_DURATION
# seconds to let someone finish sending a batch of messages and/or
# editing them before they are sent out as emails to recipients.
#
# The timer is running whenever; we poll at most every TIMER_FREQUENCY
# seconds, to avoid excessive activity.
TIMER_FREQUENCY = 5
timer_event: Optional[Timer] = None
# This lock protects access to all of the data structures declared
# above. A lock is required because maybe_send_batched_emails, as
# the argument to Timer, runs in a separate thread from the rest
# of the consumer.
lock = Lock()
# Because the background `maybe_send_batched_email` thread can
# hold the lock for an indeterminate amount of time, the `consume`
# can block on that for longer than 30s, the default worker
# timeout. Allow arbitrarily-long worker `consume` calls.
MAX_CONSUME_SECONDS = None
def consume(self, event: Dict[str, Any]) -> None:
with self.lock:
logging.debug("Received missedmessage_emails event: %s", event)
# When we consume an event, check if there are existing pending emails
# for that user, and if so use the same scheduled timestamp.
user_profile_id: int = event["user_profile_id"]
user_profile = get_user_profile_by_id(user_profile_id)
batch_duration_seconds = user_profile.email_notifications_batching_period_seconds
batch_duration = datetime.timedelta(seconds=batch_duration_seconds)
try:
pending_email = ScheduledMessageNotificationEmail.objects.filter(
user_profile_id=user_profile_id
)[0]
scheduled_timestamp = pending_email.scheduled_timestamp
except IndexError:
scheduled_timestamp = timezone_now() + batch_duration
try:
ScheduledMessageNotificationEmail.objects.create(
user_profile_id=user_profile_id,
message_id=event["message_id"],
trigger=event["trigger"],
scheduled_timestamp=scheduled_timestamp,
mentioned_user_group_id=event.get("mentioned_user_group_id"),
)
self.ensure_timer()
except IntegrityError:
logging.debug(
"ScheduledMessageNotificationEmail row could not be created. The message may have been deleted. Skipping event."
)
def ensure_timer(self) -> None:
# The caller is responsible for ensuring self.lock is held when it calls this.
if self.timer_event is not None:
return
self.timer_event = Timer(
self.TIMER_FREQUENCY, MissedMessageWorker.maybe_send_batched_emails, [self]
)
self.timer_event.start()
def maybe_send_batched_emails(self) -> None:
with self.lock:
# self.timer_event just triggered execution of this
# function in a thread, so now that we hold the lock, we
# clear the timer_event attribute to record that no Timer
# is active.
self.timer_event = None
current_time = timezone_now()
with transaction.atomic():
events_to_process = ScheduledMessageNotificationEmail.objects.filter(
scheduled_timestamp__lte=current_time
).select_related()
# Batch the entries by user
events_by_recipient: Dict[int, List[Dict[str, Any]]] = {}
for event in events_to_process:
entry = dict(
user_profile_id=event.user_profile_id,
message_id=event.message_id,
trigger=event.trigger,
mentioned_user_group_id=event.mentioned_user_group_id,
)
if event.user_profile_id in events_by_recipient:
events_by_recipient[event.user_profile_id].append(entry)
else:
events_by_recipient[event.user_profile_id] = [entry]
for user_profile_id in events_by_recipient.keys():
events: List[Dict[str, Any]] = events_by_recipient[user_profile_id]
logging.info(
"Batch-processing %s missedmessage_emails events for user %s",
len(events),
user_profile_id,
)
try:
# Because we process events in batches, an
# escaped exception here would lead to
# duplicate messages being sent for other
# users in the same events_to_process batch,
# and no guarantee of forward progress.
handle_missedmessage_emails(user_profile_id, events)
except Exception:
logging.exception(
"Failed to process %d missedmessage_emails for user %s",
len(events),
user_profile_id,
stack_info=True,
)
events_to_process.delete()
# By only restarting the timer if there are actually events in
# the queue, we ensure this queue processor is idle when there
# are no missed-message emails to process. This avoids
# constant CPU usage when there is no work to do.
if ScheduledMessageNotificationEmail.objects.exists():
self.ensure_timer()
@assign_queue("email_senders")
class EmailSendingWorker(LoopQueueProcessingWorker):
def __init__(self) -> None:
super().__init__()
self.connection: EmailBackend = initialize_connection(None)
@retry_send_email_failures
def send_email(self, event: Dict[str, Any]) -> None:
# Copy the event, so that we don't pass the `failed_tries'
# data to send_email (which neither takes that
# argument nor needs that data).
copied_event = copy.deepcopy(event)
if "failed_tries" in copied_event:
del copied_event["failed_tries"]
handle_send_email_format_changes(copied_event)
self.connection = initialize_connection(self.connection)
send_email(**copied_event, connection=self.connection)
def consume_batch(self, events: List[Dict[str, Any]]) -> None:
for event in events:
self.send_email(event)
def stop(self) -> None:
try:
self.connection.close()
finally:
super().stop()
@assign_queue("missedmessage_mobile_notifications")
class PushNotificationsWorker(QueueProcessingWorker):
def start(self) -> None:
# initialize_push_notifications doesn't strictly do anything
# beyond printing some logging warnings if push notifications
# are not available in the current configuration.
initialize_push_notifications()
super().start()
def consume(self, event: Dict[str, Any]) -> None:
try:
if event.get("type", "add") == "remove":
message_ids = event.get("message_ids")
if message_ids is None:
# TODO/compatibility: Previously, we sent only one `message_id` in
# a payload for notification remove events. This was later changed
# to send a list of `message_ids` (with that field name), but we need
# compatibility code for events present in the queue during upgrade.
# Remove this when one can no longer upgrade from 1.9.2 (or earlier)
# to any version after 2.0.0
message_ids = [event["message_id"]]
handle_remove_push_notification(event["user_profile_id"], message_ids)
else:
handle_push_notification(event["user_profile_id"], event)
except PushNotificationBouncerRetryLaterError:
def failure_processor(event: Dict[str, Any]) -> None:
logger.warning(
"Maximum retries exceeded for trigger:%s event:push_notification",
event["user_profile_id"],
)
retry_event(self.queue_name, event, failure_processor)
@assign_queue("error_reports")
class ErrorReporter(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
logging.info(
"Processing traceback with type %s for %s", event["type"], event.get("user_email")
)
if settings.ERROR_REPORTING:
do_report_error(event["type"], event["report"])
@assign_queue("digest_emails")
class DigestWorker(QueueProcessingWorker): # nocoverage
# Who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event: Mapping[str, Any]) -> None:
if "user_ids" in event:
user_ids = event["user_ids"]
else:
# legacy code may have enqueued a single id
user_ids = [event["user_profile_id"]]
bulk_handle_digest_email(user_ids, event["cutoff"])
@assign_queue("email_mirror")
class MirrorWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
rcpt_to = event["rcpt_to"]
msg = email.message_from_bytes(
base64.b64decode(event["msg_base64"]),
policy=email.policy.default,
)
assert isinstance(msg, EmailMessage) # https://github.com/python/typeshed/issues/2417
if not is_missed_message_address(rcpt_to):
# Missed message addresses are one-time use, so we don't need
# to worry about emails to them resulting in message spam.
recipient_realm = decode_stream_email_address(rcpt_to)[0].realm
try:
rate_limit_mirror_by_realm(recipient_realm)
except RateLimited:
logger.warning(
"MirrorWorker: Rejecting an email from: %s to realm: %s - rate limited.",
msg["From"],
recipient_realm.name,
)
return
mirror_email(msg, rcpt_to=rcpt_to)
@assign_queue("embed_links")
class FetchLinksEmbedData(QueueProcessingWorker):
# This is a slow queue with network requests, so a disk write is negligible.
# Update stats file after every consume call.
CONSUME_ITERATIONS_BEFORE_UPDATE_STATS_NUM = 1
def consume(self, event: Mapping[str, Any]) -> None:
for url in event["urls"]:
start_time = time.time()
url_preview.get_link_embed_data(url)
logging.info(
"Time spent on get_link_embed_data for %s: %s", url, time.time() - start_time
)
message = Message.objects.get(id=event["message_id"])
# If the message changed, we will run this task after updating the message
# in zerver.lib.actions.check_update_message
if message.content != event["message_content"]:
return
if message.content is not None:
query = UserMessage.objects.filter(
message=message.id,
)
message_user_ids = set(query.values_list("user_profile_id", flat=True))
# Fetch the realm whose settings we're using for rendering
realm = Realm.objects.get(id=event["message_realm_id"])
# If rendering fails, the called code will raise a JsonableError.
rendering_result = render_incoming_message(
message, message.content, message_user_ids, realm
)
do_update_embedded_data(message.sender, message, message.content, rendering_result)
def timer_expired(
self, limit: int, events: List[Dict[str, Any]], signal: int, frame: FrameType
) -> None:
assert len(events) == 1
event = events[0]
logging.warning(
"Timed out in %s after %s seconds while fetching URLs for message %s: %s",
self.queue_name,
limit,
event["message_id"],
event["urls"],
)
raise InterruptConsumeException
@assign_queue("outgoing_webhooks")
class OutgoingWebhookWorker(QueueProcessingWorker):
def consume(self, event: Dict[str, Any]) -> None:
message = event["message"]
event["command"] = message["content"]
services = get_bot_services(event["user_profile_id"])
for service in services:
event["service_name"] = str(service.name)
service_handler = get_outgoing_webhook_service_handler(service)
do_rest_call(service.base_url, event, service_handler)
@assign_queue("embedded_bots")
class EmbeddedBotWorker(QueueProcessingWorker):
def get_bot_api_client(self, user_profile: UserProfile) -> EmbeddedBotHandler:
return EmbeddedBotHandler(user_profile)
def consume(self, event: Mapping[str, Any]) -> None:
user_profile_id = event["user_profile_id"]
user_profile = get_user_profile_by_id(user_profile_id)
message: Dict[str, Any] = event["message"]
# TODO: Do we actually want to allow multiple Services per bot user?
services = get_bot_services(user_profile_id)
for service in services:
bot_handler = get_bot_handler(str(service.name))
if bot_handler is None:
logging.error(
"Error: User %s has bot with invalid embedded bot service %s",
user_profile_id,
service.name,
)
continue
try:
if hasattr(bot_handler, "initialize"):
bot_handler.initialize(self.get_bot_api_client(user_profile))
if event["trigger"] == "mention":
message["content"] = extract_query_without_mention(
message=message,
client=self.get_bot_api_client(user_profile),
)
assert message["content"] is not None
bot_handler.handle_message(
message=message,
bot_handler=self.get_bot_api_client(user_profile),
)
except EmbeddedBotQuitException as e:
logging.warning(str(e))
@assign_queue("deferred_work")
class DeferredWorker(QueueProcessingWorker):
"""This queue processor is intended for cases where we want to trigger a
potentially expensive, not urgent, job to be run on a separate
thread from the Django worker that initiated it (E.g. so we that
can provide a low-latency HTTP response or avoid risk of request
timeouts for an operation that could in rare cases take minutes).
"""
# Because these operations have no SLO, and can take minutes,
# remove any processing timeouts
MAX_CONSUME_SECONDS = None
def consume(self, event: Dict[str, Any]) -> None:
start = time.time()
if event["type"] == "mark_stream_messages_as_read":
user_profile = get_user_profile_by_id(event["user_profile_id"])
for recipient_id in event["stream_recipient_ids"]:
count = do_mark_stream_messages_as_read(user_profile, recipient_id)
logger.info(
"Marked %s messages as read for user %s, stream_recipient_id %s",
count,
user_profile.id,
recipient_id,
)
elif event["type"] == "mark_stream_messages_as_read_for_everyone":
# This event is generated by the stream deactivation code path.
batch_size = 100
offset = 0
while True:
messages = Message.objects.filter(
recipient_id=event["stream_recipient_id"]
).order_by("id")[offset : offset + batch_size]
UserMessage.objects.filter(message__in=messages).extra(
where=[UserMessage.where_unread()]
).update(flags=F("flags").bitor(UserMessage.flags.read))
offset += len(messages)
if len(messages) < batch_size:
break
logger.info(
"Marked %s messages as read for all users, stream_recipient_id %s",
offset,
event["stream_recipient_id"],
)
elif event["type"] == "clear_push_device_tokens":
try:
clear_push_device_tokens(event["user_profile_id"])
except PushNotificationBouncerRetryLaterError:
def failure_processor(event: Dict[str, Any]) -> None:
logger.warning(
"Maximum retries exceeded for trigger:%s event:clear_push_device_tokens",
event["user_profile_id"],
)
retry_event(self.queue_name, event, failure_processor)
elif event["type"] == "realm_export":
realm = Realm.objects.get(id=event["realm_id"])
output_dir = tempfile.mkdtemp(prefix="zulip-export-")
export_event = RealmAuditLog.objects.get(id=event["id"])
user_profile = get_user_profile_by_id(event["user_profile_id"])
try:
public_url = export_realm_wrapper(
realm=realm,
output_dir=output_dir,
threads=6,
upload=True,
public_only=True,
delete_after_upload=True,
)
except Exception:
export_event.extra_data = orjson.dumps(
dict(
failed_timestamp=timezone_now().timestamp(),
)
).decode()
export_event.save(update_fields=["extra_data"])
logging.error(
"Data export for %s failed after %s",
user_profile.realm.string_id,
time.time() - start,
)
notify_realm_export(user_profile)
return
assert public_url is not None
# Update the extra_data field now that the export is complete.
export_event.extra_data = orjson.dumps(
dict(
export_path=urllib.parse.urlparse(public_url).path,
)
).decode()
export_event.save(update_fields=["extra_data"])
# Send a private message notification letting the user who
# triggered the export know the export finished.
with override_language(user_profile.default_language):
content = _(
"Your data export is complete and has been uploaded here:\n\n{public_url}"
).format(public_url=public_url)
internal_send_private_message(
sender=get_system_bot(settings.NOTIFICATION_BOT, realm.id),
recipient_user=user_profile,
content=content,
)
# For future frontend use, also notify administrator
# clients that the export happened.
notify_realm_export(user_profile)
logging.info(
"Completed data export for %s in %s",
user_profile.realm.string_id,
time.time() - start,
)
end = time.time()
logger.info("deferred_work processed %s event (%dms)", event["type"], (end - start) * 1000)
@assign_queue("test", is_test_queue=True)
class TestWorker(QueueProcessingWorker):
# This worker allows you to test the queue worker infrastructure without
# creating significant side effects. It can be useful in development or
# for troubleshooting prod/staging. It pulls a message off the test queue
# and appends it to a file in /tmp.
def consume(self, event: Mapping[str, Any]) -> None: # nocoverage
fn = settings.ZULIP_WORKER_TEST_FILE
message = orjson.dumps(event)
logging.info("TestWorker should append this message to %s: %s", fn, message.decode())
with open(fn, "ab") as f:
f.write(message + b"\n")
@assign_queue("noop", is_test_queue=True)
class NoopWorker(QueueProcessingWorker):
"""Used to profile the queue processing framework, in zilencer's queue_rate."""
def __init__(self, max_consume: int = 1000, slow_queries: Sequence[int] = []) -> None:
self.consumed = 0
self.max_consume = max_consume
self.slow_queries: Set[int] = set(slow_queries)
def consume(self, event: Mapping[str, Any]) -> None:
self.consumed += 1
if self.consumed in self.slow_queries:
logging.info("Slow request...")
time.sleep(60)
logging.info("Done!")
if self.consumed >= self.max_consume:
self.stop()
@assign_queue("noop_batch", is_test_queue=True)
class BatchNoopWorker(LoopQueueProcessingWorker):
"""Used to profile the queue processing framework, in zilencer's queue_rate."""
batch_size = 500
def __init__(self, max_consume: int = 1000, slow_queries: Sequence[int] = []) -> None:
self.consumed = 0
self.max_consume = max_consume
self.slow_queries: Set[int] = set(slow_queries)
def consume_batch(self, events: List[Dict[str, Any]]) -> None:
event_numbers = set(range(self.consumed + 1, self.consumed + 1 + len(events)))
found_slow = self.slow_queries & event_numbers
if found_slow:
logging.info("%d slow requests...", len(found_slow))
time.sleep(60 * len(found_slow))
logging.info("Done!")
self.consumed += len(events)
if self.consumed >= self.max_consume:
self.stop()
| 40.049954 | 132 | 0.630711 |
a2233e9eef62998aedd73303d137a9dff2d2ee27 | 6,141 | py | Python | examples/viewport1.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 125 | 2016-11-24T09:04:28.000Z | 2022-01-22T14:06:56.000Z | examples/viewport1.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 52 | 2017-11-08T23:23:02.000Z | 2022-03-20T03:17:39.000Z | examples/viewport1.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 25 | 2017-08-27T10:50:43.000Z | 2022-01-29T14:56:05.000Z | #
# File:
# viewport1.py
#
# Synopsis:
# Illustrates the difference between the viewport and bounding box.
#
# Categories:
# viewport
# polylines
# polymarkers
# text
#
# Author:
# Mary Haley
#
# Date of initial publication:
# August 2010
#
# Description:
# This example shows how to raw primitives and text using
# NDC coordinates. The draw_ndc_grid function is used as
# a tool to help determine which coordinates to use.
# Effects illustrated:
# o Drawing a simple filled contour plot
# o Drawing a box around a contour plot viewport
# o Drawing the bounding box
# o Changing the color and thickness of polylines
# o Drawing polylines, polymarkers, and text in NDC space
# o Using "getvalues" to retrieve resource values
# o Generating dummy data
#
# Output:
# A single visualization with the viewport and bounding box
# information included.
#
# Notes:
#
from __future__ import print_function
import numpy, Ngl
#********************************************************************
# Draw a box around the viewport of the given object..
#********************************************************************
def draw_vp_box(wks,plot):
# Retrieve the viewport values of the drawable object.
vpx = Ngl.get_float(plot,"vpXF")
vpy = Ngl.get_float(plot,"vpYF")
vpw = Ngl.get_float(plot,"vpWidthF")
vph = Ngl.get_float(plot,"vpHeightF")
print("Viewport x,y,width,height =",vpx,vpy,vpw,vph)
# Make a box with the viewport values.
xbox = [vpx,vpx+vpw,vpx+vpw,vpx,vpx]
ybox = [vpy,vpy,vpy-vph,vpy-vph,vpy]
# Set up some marker resources.
mkres = Ngl.Resources()
mkres.gsMarkerIndex = 16 # filled dot
mkres.gsMarkerSizeF = 0.02 # larger than default
mkres.gsMarkerColor = "ForestGreen"
# Draw a single marker at the vpXF/vpYF location.
Ngl.polymarker_ndc(wks,vpx,vpy,mkres)
# Set up some line resources.
lnres = Ngl.Resources()
lnres.gsLineColor = "NavyBlue" # line color
lnres.gsLineThicknessF = 3.5 # 3.5 times as thick
# Draw a box around the viewport.
Ngl.polyline_ndc(wks,xbox,ybox,lnres)
# Set up some text resources.
txres = Ngl.Resources()
txres.txJust = "CenterLeft"
txres.txFontHeightF = 0.015
txres.txFontColor = "ForestGreen"
txres.txBackgroundFillColor = "white"
# Draw a text string labeling the marker
Ngl.text_ndc(wks,"(vpXF,vpYF)",vpx+0.03,vpy,txres)
# Draw text strings labeling the viewport box.
txres.txFontColor = "black"
txres.txJust = "CenterLeft"
Ngl.text_ndc(wks,"viewport",vpx+vpw/2.,vpy-vph,txres)
Ngl.text_ndc(wks,"viewport",vpx+vpw/2.,vpy,txres)
txres.txAngleF = 90.
Ngl.text_ndc(wks,"viewport",vpx,vpy-vph/2.,txres)
Ngl.text_ndc(wks,"viewport",vpx+vpw,vpy-vph/2.,txres)
return
#********************************************************************
# Draw a box around the bounding box of the given object..
#********************************************************************
def draw_bb_box(wks,plot):
# Retrieve the bounding box of the given object.
bb = Ngl.get_bounding_box(plot)
top = bb[0]
bot = bb[1]
lft = bb[2]
rgt = bb[3]
print("Bounding box top,bottom,left,right =",top,bot,lft,rgt)
# Make a box with the bounding box values.
xbox = [rgt,lft,lft,rgt,rgt]
ybox = [top,top,bot,bot,top]
# Set up some line resources.
lnres = Ngl.Resources()
lnres.gsLineColor = "Brown"
lnres.gsLineThicknessF = 2.5
# Set up some text resources.
txres = Ngl.Resources()
txres.txFontHeightF = 0.015
txres.txBackgroundFillColor = "white"
txres.txJust = "CenterLeft"
# Draw a box showing the bounding box.
Ngl.polyline_ndc(wks,xbox,ybox,lnres)
# Draw text strings labeling the bounding box.
Ngl.text_ndc(wks,"bounding box",lft+0.05,bot,txres)
txres.txJust = "CenterRight"
Ngl.text_ndc(wks,"bounding box",rgt-0.05,top,txres)
txres.txAngleF = 90.
txres.txJust = "CenterRight"
Ngl.text_ndc(wks,"bounding box",lft,top-0.05,txres)
txres.txJust = "CenterLeft"
Ngl.text_ndc(wks,"bounding box",rgt,bot+0.05,txres)
return
#********************************************************************
# Main code
#********************************************************************
wks_type = "png"
wks = Ngl.open_wks(wks_type,"viewport1")
# Add some named colors. This is no longer needed in PyNGL 1.5.0
#forest_green = numpy.array([ 34, 139, 34])/255.
#navy_blue = numpy.array([ 0, 0, 128])/255.
#brown = numpy.array([165, 42, 42])/255.
#ig = Ngl.new_color(wks,forest_green[0],forest_green[1],forest_green[2])
#ib = Ngl.new_color(wks,navy_blue[0], navy_blue[1], navy_blue[2])
#ir = Ngl.new_color(wks,brown[0], brown[1], brown[2])
# Generate some dummy data.
cmin = -19.23
cmax = 16.81
data = Ngl.generate_2d_array([100,100], 10, 10, cmin, cmax)
nice_min,nice_max,nice_spc = Ngl.nice_cntr_levels(cmin,cmax,cint=3)
# Read in color map so we can subset it
cmap = Ngl.read_colormap_file("nrl_sirkes")
# Set up resources for a contour plot.
cnres = Ngl.Resources()
cnres.nglMaximize = True # Maximize plot in frame
cnres.nglDraw = False # Don't draw plot
cnres.nglFrame = False # Don't advance the frame
cnres.cnFillOn = True # Turn on contour fill
cnres.cnFillPalette = cmap[:-3,:]
cnres.cnLevelSelectionMode = "ManualLevels"
cnres.cnLevelSpacingF = nice_spc
cnres.cnMinLevelValF = nice_min
cnres.cnMaxLevelValF = nice_max
cnres.lbOrientation = "Vertical" # Default is horizontal
cnres.tiMainString = "This is a title"
cnres.tiXAxisString = "X axis"
cnres.tiYAxisString = "Y axis"
contourplot = Ngl.contour(wks,data,cnres)
# Draw plot with viewport and bounding boxes.
Ngl.draw(contourplot)
draw_bb_box(wks,contourplot)
draw_vp_box(wks,contourplot)
# Advance frame.
Ngl.frame(wks)
Ngl.end()
| 29.81068 | 72 | 0.616675 |
43e4f494d2742ee25a9e783041ca3a1f7e64c889 | 5,569 | py | Python | kedro/io/memory_data_set.py | louisdecharson/kedro | 5816b6c0bcbb663b25cf9e40a237ebe812a91ef5 | [
"Apache-2.0"
] | 4,923 | 2019-05-16T17:29:23.000Z | 2022-01-10T06:01:02.000Z | kedro/io/memory_data_set.py | ManishS6/kedro | 65e1c10050ba1b3c45227db8e28a275172ea1ea8 | [
"Apache-2.0"
] | 841 | 2019-05-20T16:58:11.000Z | 2022-01-09T09:36:26.000Z | kedro/io/memory_data_set.py | ManishS6/kedro | 65e1c10050ba1b3c45227db8e28a275172ea1ea8 | [
"Apache-2.0"
] | 653 | 2019-05-19T10:05:22.000Z | 2022-01-06T13:48:23.000Z | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""``MemoryDataSet`` is a data set implementation which handles in-memory data.
"""
import copy
from typing import Any, Dict
from kedro.io.core import AbstractDataSet, DataSetError
_EMPTY = object()
class MemoryDataSet(AbstractDataSet):
"""``MemoryDataSet`` loads and saves data from/to an in-memory
Python object.
Example:
::
>>> from kedro.io import MemoryDataSet
>>> import pandas as pd
>>>
>>> data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],
>>> 'col3': [5, 6]})
>>> data_set = MemoryDataSet(data=data)
>>>
>>> loaded_data = data_set.load()
>>> assert loaded_data.equals(data)
>>>
>>> new_data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5]})
>>> data_set.save(new_data)
>>> reloaded_data = data_set.load()
>>> assert reloaded_data.equals(new_data)
"""
def __init__(self, data: Any = _EMPTY, copy_mode: str = None):
"""Creates a new instance of ``MemoryDataSet`` pointing to the
provided Python object.
Args:
data: Python object containing the data.
copy_mode: The copy mode used to copy the data. Possible
values are: "deepcopy", "copy" and "assign". If not
provided, it is inferred based on the data type.
"""
self._data = _EMPTY
self._copy_mode = copy_mode
if data is not _EMPTY:
self._save(data)
def _load(self) -> Any:
if self._data is _EMPTY:
raise DataSetError("Data for MemoryDataSet has not been saved yet.")
copy_mode = self._copy_mode or _infer_copy_mode(self._data)
data = _copy_with_mode(self._data, copy_mode=copy_mode)
return data
def _save(self, data: Any):
copy_mode = self._copy_mode or _infer_copy_mode(data)
self._data = _copy_with_mode(data, copy_mode=copy_mode)
def _exists(self) -> bool:
return self._data is not _EMPTY
def _release(self) -> None:
self._data = _EMPTY
def _describe(self) -> Dict[str, Any]:
if self._data is not _EMPTY:
return dict(data=f"<{type(self._data).__name__}>")
# the string representation of datasets leaves out __init__
# arguments that are empty/None, equivalent here is _EMPTY
return dict(data=None) # pragma: no cover
def _infer_copy_mode(data: Any) -> str:
"""Infers the copy mode to use given the data type.
Args:
data: The data whose type will be used to infer the copy mode.
Returns:
One of "copy", "assign" or "deepcopy" as the copy mode to use.
"""
# pylint: disable=import-outside-toplevel
try:
import pandas as pd
except ImportError: # pragma: no cover
pd = None # pragma: no cover
try:
import numpy as np
except ImportError: # pragma: no cover
np = None # pragma: no cover
if pd and isinstance(data, pd.DataFrame) or np and isinstance(data, np.ndarray):
copy_mode = "copy"
elif type(data).__name__ == "DataFrame":
copy_mode = "assign"
else:
copy_mode = "deepcopy"
return copy_mode
def _copy_with_mode(data: Any, copy_mode: str) -> Any:
"""Returns the copied data using the copy mode specified.
If no copy mode is provided, then it is inferred based on the type of the data.
Args:
data: The data to copy.
copy_mode: The copy mode to use, one of "deepcopy", "copy" and "assign".
Raises:
DataSetError: If copy_mode is specified, but isn't valid
(i.e: not one of deepcopy, copy, assign)
Returns:
The data copied according to the specified copy mode.
"""
if copy_mode == "deepcopy":
copied_data = copy.deepcopy(data)
elif copy_mode == "copy":
copied_data = data.copy()
elif copy_mode == "assign":
copied_data = data
else:
raise DataSetError(
f"Invalid copy mode: {copy_mode}. "
f"Possible values are: deepcopy, copy, assign."
)
return copied_data
| 34.590062 | 84 | 0.649309 |
2b14ca4d7ce0532f7b7b8f256c4af41ac864cd60 | 3,616 | py | Python | lib/nms/nms.py | StanfordHCI/human-pose-estimation.pytorch | 431a078d0c46854e567f83a9d019170811d0dc48 | [
"MIT"
] | null | null | null | lib/nms/nms.py | StanfordHCI/human-pose-estimation.pytorch | 431a078d0c46854e567f83a9d019170811d0dc48 | [
"MIT"
] | null | null | null | lib/nms/nms.py | StanfordHCI/human-pose-estimation.pytorch | 431a078d0c46854e567f83a9d019170811d0dc48 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .cpu_nms import cpu_nms
from .gpu_nms import gpu_nms
def py_nms_wrapper(thresh):
def _nms(dets):
return nms(dets, thresh)
return _nms
def cpu_nms_wrapper(thresh):
def _nms(dets):
return cpu_nms(dets, thresh)
return _nms
def gpu_nms_wrapper(thresh, device_id):
def _nms(dets):
return gpu_nms(dets, thresh, device_id)
return _nms
def nms(dets, thresh):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
if dets.shape[0] == 0:
return []
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
if not isinstance(sigmas, np.ndarray):
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .89, .89, .89, .89]) / 10.0
sigmas = sigmas[:len(g) // 3]
vars = (sigmas * 2) ** 2
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
ious = np.zeros((d.shape[0]))
for n_d in range(0, d.shape[0]):
xd = d[n_d, 0::3]
yd = d[n_d, 1::3]
vd = d[n_d, 2::3]
dx = xd - xg
dy = yd - yg
e = (dx ** 2 + dy ** 2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2
if in_vis_thre is not None:
ind = list(vg > in_vis_thre) and list(vd > in_vis_thre)
e = e[ind]
ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0
return ious
def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh, overlap = oks
:param kpts_db
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
if len(kpts_db) == 0:
return []
scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
kpts = np.array([kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], sigmas, in_vis_thre)
inds = np.where(oks_ovr <= thresh)[0]
order = order[inds + 1]
return keep
| 28.928 | 141 | 0.53927 |
ebef44363174a9d690d287c89c0c4bbfb6a3f93c | 333 | py | Python | Assignments/Functions/Exercise/03. Characters in Range.py | KaloyankerR/python-fundamentals-repository | b8e69523ea7e6aa352e8398f0202e283374a0f7c | [
"MIT"
] | null | null | null | Assignments/Functions/Exercise/03. Characters in Range.py | KaloyankerR/python-fundamentals-repository | b8e69523ea7e6aa352e8398f0202e283374a0f7c | [
"MIT"
] | null | null | null | Assignments/Functions/Exercise/03. Characters in Range.py | KaloyankerR/python-fundamentals-repository | b8e69523ea7e6aa352e8398f0202e283374a0f7c | [
"MIT"
] | null | null | null | def characters_between(start: str, end: str):
start = ord(start)
end = ord(end)
if start < end:
for letter in range(start + 1, end):
print(chr(letter), end=' ')
else:
for i in range(end + 1, start, -1):
print(chr(i), end=' ')
a = input()
b = input()
characters_between(a, b)
| 22.2 | 45 | 0.534535 |
4a5c9bc4ff3587abe4f0ff1c44a720dabae09493 | 2,947 | py | Python | test/test_tcp.py | ydy1234/vpp | 9f9e969f149e40044fee9d2e47b7dd96f3ae4dfa | [
"Apache-2.0"
] | 1 | 2020-05-21T16:26:02.000Z | 2020-05-21T16:26:02.000Z | test/test_tcp.py | ydy1234/vpp | 9f9e969f149e40044fee9d2e47b7dd96f3ae4dfa | [
"Apache-2.0"
] | 2 | 2018-09-10T21:43:09.000Z | 2021-06-01T22:36:51.000Z | test/test_tcp.py | ydy1234/vpp | 9f9e969f149e40044fee9d2e47b7dd96f3ae4dfa | [
"Apache-2.0"
] | 1 | 2018-09-04T09:01:23.000Z | 2018-09-04T09:01:23.000Z | #!/usr/bin/env python
import unittest
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
class TestTCP(VppTestCase):
""" TCP Test Case """
@classmethod
def setUpClass(cls):
super(TestTCP, cls).setUpClass()
def setUp(self):
super(TestTCP, self).setUp()
self.vapi.session_enable_disable(is_enabled=1)
self.create_loopback_interfaces(2)
table_id = 0
for i in self.lo_interfaces:
i.admin_up()
if table_id != 0:
tbl = VppIpTable(self, table_id)
tbl.add_vpp_config()
i.set_table_ip4(table_id)
i.config_ip4()
table_id += 1
# Configure namespaces
self.vapi.app_namespace_add(namespace_id="0",
sw_if_index=self.loop0.sw_if_index)
self.vapi.app_namespace_add(namespace_id="1",
sw_if_index=self.loop1.sw_if_index)
def tearDown(self):
for i in self.lo_interfaces:
i.unconfig_ip4()
i.set_table_ip4(0)
i.admin_down()
self.vapi.session_enable_disable(is_enabled=0)
super(TestTCP, self).tearDown()
def test_tcp_unittest(self):
""" TCP Unit Tests """
error = self.vapi.cli("test tcp all")
if error:
self.logger.critical(error)
self.assertEqual(error.find("failed"), -1)
def test_tcp_transfer(self):
""" TCP echo client/server transfer """
# Add inter-table routes
ip_t01 = VppIpRoute(self, self.loop1.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
ip_t10 = VppIpRoute(self, self.loop0.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=0)], table_id=1)
ip_t01.add_vpp_config()
ip_t10.add_vpp_config()
# Start builtin server and client
uri = "tcp://" + self.loop0.local_ip4 + "/1234"
error = self.vapi.cli("test echo server appns 0 fifo-size 4 uri " +
uri)
if error:
self.logger.critical(error)
self.assertEqual(error.find("failed"), -1)
error = self.vapi.cli("test echo client mbytes 10 appns 1 " +
"fifo-size 4 no-output test-bytes " +
"syn-timeout 2 uri " + uri)
if error:
self.logger.critical(error)
self.assertEqual(error.find("failed"), -1)
# Delete inter-table routes
ip_t01.remove_vpp_config()
ip_t10.remove_vpp_config()
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 32.032609 | 75 | 0.538174 |
3ddc177ab2f284445c372d797bdcd0d5c7ec3e99 | 1,524 | py | Python | extract-windows.py | maxsam4/ethereum-debugging-helper | 3c51468b540ee94311ecc6873c856c0895a6e740 | [
"MIT"
] | 5 | 2018-09-01T19:38:51.000Z | 2022-02-01T23:38:49.000Z | extract-windows.py | maxsam4/ethereum-debugging-helper | 3c51468b540ee94311ecc6873c856c0895a6e740 | [
"MIT"
] | null | null | null | extract-windows.py | maxsam4/ethereum-debugging-helper | 3c51468b540ee94311ecc6873c856c0895a6e740 | [
"MIT"
] | null | null | null | import json
import glob
from sha3 import keccak_256
functionHashes = open("extract\\functionHashes.txt","w+")
for file in list(glob.glob('build\contracts\*.json')):
reader = open(file)
j = json.load(reader)
index_of_dot = reader.name.index('.')
file_name_without_extension1 = reader.name[:index_of_dot]
file_name_without_extension = file_name_without_extension1.split('\\')[-1]
f = open("extract\\"+file_name_without_extension+".abi.txt","w+")
f.write(json.dumps(j['abi']))
f.close
f = open("extract\\"+file_name_without_extension+".bytecode.txt","w+")
f.write(j['bytecode'])
f.close
abi = j['abi']
functionSelector = []
for fields in abi:
if(fields['type'] == 'function'):
functionName = fields['name']
functionInputs = []
for inputs in fields['inputs']:
functionInputs.append(inputs['type'])
functionSelector.append([functionName, functionInputs])
for functions in functionSelector:
functionName = functions[0]
functionType = functions[1]
s = functionName + '('
for i in range(len(functionType)):
s += functionType[i]
if(i+1 < len(functionType)):
s += ','
s += ')'
sha3_hash = keccak_256(s.encode('utf-8')).hexdigest()
ss = "functionHashes.set('" + sha3_hash[:8] + "', '" + s + "');"
print >> functionHashes, ss
functionHashes.close
| 31.75 | 81 | 0.582021 |
737b763a59a48f695e847d9cbbbb350a8881bc3f | 4,556 | py | Python | qiskit/opflow/list_ops/tensored_op.py | artmenlope/qiskit-terra | 7e02ac9ed29cfed359ae295bcf19f4a6e8b912fd | [
"Apache-2.0"
] | null | null | null | qiskit/opflow/list_ops/tensored_op.py | artmenlope/qiskit-terra | 7e02ac9ed29cfed359ae295bcf19f4a6e8b912fd | [
"Apache-2.0"
] | null | null | null | qiskit/opflow/list_ops/tensored_op.py | artmenlope/qiskit-terra | 7e02ac9ed29cfed359ae295bcf19f4a6e8b912fd | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" TensoredOp Class """
from functools import partial, reduce
from typing import List, Union, cast
import numpy as np
from qiskit.circuit import ParameterExpression, QuantumCircuit
from qiskit.opflow.exceptions import OpflowError
from qiskit.opflow.list_ops.list_op import ListOp
from qiskit.opflow.operator_base import OperatorBase
from qiskit.quantum_info import Statevector
class TensoredOp(ListOp):
"""A class for lazily representing tensor products of Operators. Often Operators cannot be
efficiently tensored to one another, but may be manipulated further so that they can be
later. This class holds logic to indicate that the Operators in ``oplist`` are meant to
be tensored together, and therefore if they reach a point in which they can be, such as after
conversion to QuantumCircuits, they can be reduced by tensor product."""
def __init__(
self,
oplist: List[OperatorBase],
coeff: Union[complex, ParameterExpression] = 1.0,
abelian: bool = False,
) -> None:
"""
Args:
oplist: The Operators being tensored.
coeff: A coefficient multiplying the operator
abelian: Indicates whether the Operators in ``oplist`` are known to mutually commute.
"""
super().__init__(oplist, combo_fn=partial(reduce, np.kron), coeff=coeff, abelian=abelian)
@property
def num_qubits(self) -> int:
return sum(op.num_qubits for op in self.oplist)
@property
def distributive(self) -> bool:
return False
def _expand_dim(self, num_qubits: int) -> "TensoredOp":
"""Appends I ^ num_qubits to ``oplist``. Choice of PauliOp as
identity is arbitrary and can be substituted for other PrimitiveOp identity.
Returns:
TensoredOp expanded with identity operator.
"""
# pylint: disable=cyclic-import
from ..operator_globals import I
return TensoredOp(self.oplist + [I ^ num_qubits], coeff=self.coeff)
def tensor(self, other: OperatorBase) -> OperatorBase:
if isinstance(other, TensoredOp):
return TensoredOp(self.oplist + other.oplist, coeff=self.coeff * other.coeff)
return TensoredOp(self.oplist + [other], coeff=self.coeff)
# TODO eval should partial trace the input into smaller StateFns each of size
# op.num_qubits for each op in oplist. Right now just works through matmul.
def eval(
self, front: Union[str, dict, np.ndarray, OperatorBase, Statevector] = None
) -> Union[OperatorBase, complex]:
if self._is_empty():
return 0.0
return cast(Union[OperatorBase, complex], self.to_matrix_op().eval(front=front))
# Try collapsing list or trees of tensor products.
# TODO do this smarter
def reduce(self) -> OperatorBase:
reduced_ops = [op.reduce() for op in self.oplist]
if self._is_empty():
return self.__class__([], coeff=self.coeff, abelian=self.abelian)
reduced_ops = reduce(lambda x, y: x.tensor(y), reduced_ops) * self.coeff
if isinstance(reduced_ops, ListOp) and len(reduced_ops.oplist) == 1:
return reduced_ops.oplist[0]
else:
return cast(OperatorBase, reduced_ops)
def to_circuit(self) -> QuantumCircuit:
"""Returns the quantum circuit, representing the tensored operator.
Returns:
The circuit representation of the tensored operator.
Raises:
OpflowError: for operators where a single underlying circuit can not be produced.
"""
circuit_op = self.to_circuit_op()
# pylint: disable=cyclic-import
from ..state_fns.circuit_state_fn import CircuitStateFn
from ..primitive_ops.primitive_op import PrimitiveOp
if isinstance(circuit_op, (PrimitiveOp, CircuitStateFn)):
return circuit_op.to_circuit()
raise OpflowError(
"Conversion to_circuit supported only for operators, where a single "
"underlying circuit can be produced."
)
| 39.964912 | 97 | 0.684372 |
27af017319268734dfb0be284fd7f9670301ad0a | 4,275 | py | Python | tests/integration-tests/tests/multiple_nics/test_multiple_nics.py | enrico-usai/aws-parallelcluster | caed724204c7db00424fabd803aa07d9fac2d962 | [
"Apache-2.0"
] | 1 | 2021-07-10T13:59:46.000Z | 2021-07-10T13:59:46.000Z | tests/integration-tests/tests/multiple_nics/test_multiple_nics.py | QPC-database/aws-parallelcluster | 8c2e9595ca171340df21695c27d85dc00f19d3e4 | [
"Apache-2.0"
] | 14 | 2022-03-11T10:26:58.000Z | 2022-03-28T10:40:43.000Z | tests/integration-tests/tests/multiple_nics/test_multiple_nics.py | QPC-database/aws-parallelcluster | 8c2e9595ca171340df21695c27d85dc00f19d3e4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import pytest
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from utils import get_compute_nodes_instance_ids
from tests.common.schedulers_common import get_scheduler_commands
@pytest.mark.regions(["us-east-1"])
@pytest.mark.instances(["p4d.24xlarge"])
@pytest.mark.schedulers(["slurm"])
@pytest.mark.usefixtures("os", "instance", "scheduler")
def test_multiple_nics(scheduler, region, pcluster_config_reader, clusters_factory):
cluster_config = pcluster_config_reader()
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_head_node_nics(remote_command_executor, region)
_test_compute_node_nics(cluster, region, remote_command_executor, scheduler_commands)
def _get_private_ip_addresses(instance_id, region, remote_command_executor):
result = remote_command_executor.run_remote_command(
"aws ec2 describe-instances --instance-id {0} --region {1} "
'--query "Reservations[0].Instances[0].NetworkInterfaces[*].PrivateIpAddresses[*].PrivateIpAddress" '
"--output text".format(instance_id, region)
)
return result.stdout.strip().split("\n")
def _test_head_node_nics(remote_command_executor, region):
# On the head node we just check that all the private IPs have been assigned to NICs
token = remote_command_executor.run_remote_command(
"curl --retry 3 --retry-delay 0 --fail -s -X PUT 'http://169.254.169.254/latest/api/token' "
"-H 'X-aws-ec2-metadata-token-ttl-seconds: 300'"
).stdout
head_node_instance_id = remote_command_executor.run_remote_command(
f'curl --retry 3 --retry-delay 0 --fail -s -H "X-aws-ec2-metadata-token: {token}" '
"http://169.254.169.254/latest/meta-data/instance-id"
).stdout
head_node_ip_addresses = _get_private_ip_addresses(head_node_instance_id, region, remote_command_executor)
ip_a_result = remote_command_executor.run_remote_command("ip a").stdout
for ip_address in head_node_ip_addresses:
assert_that(ip_a_result).matches(".* inet {0}.*".format(ip_address))
def _test_compute_node_nics(cluster, region, remote_command_executor, scheduler_commands):
compute_instance_id = get_compute_nodes_instance_ids(cluster.cfn_name, region)[0]
# Get compute node's IP addresses
compute_ip_addresses = _get_private_ip_addresses(compute_instance_id, region, remote_command_executor)
for ip_address in compute_ip_addresses:
_test_compute_node_nic(ip_address, remote_command_executor, scheduler_commands)
def _test_compute_node_nic(ip_address, remote_command_executor, scheduler_commands):
# ping test from head node
result = remote_command_executor.run_remote_command("ping -c 5 {0}".format(ip_address))
assert_that(result.stdout).matches(".*5 packets transmitted, 5 received, 0% packet loss,.*")
# ssh test from head node
result = remote_command_executor.run_remote_command(
"ssh -o StrictHostKeyChecking=no -q {0} echo Hello".format(ip_address)
)
assert_that(result.stdout).matches("Hello")
# ping test from compute node
result = scheduler_commands.submit_command("ping -I {0} -c 5 amazon.com > /shared/ping_{0}.out".format(ip_address))
job_id = scheduler_commands.assert_job_submitted(result.stdout)
scheduler_commands.wait_job_completed(job_id)
scheduler_commands.assert_job_succeeded(job_id)
result = remote_command_executor.run_remote_command("cat /shared/ping_{0}.out".format(ip_address))
assert_that(result.stdout).matches(".*5 packets transmitted, 5 received, 0% packet loss,.*")
| 49.709302 | 119 | 0.765614 |
419f6e2a681e795bd5de024d2a907dafed549f17 | 811 | py | Python | Speedo/plugins/cricket.py | aviskumar/speedo | 758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa | [
"BSD-3-Clause"
] | null | null | null | Speedo/plugins/cricket.py | aviskumar/speedo | 758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa | [
"BSD-3-Clause"
] | null | null | null | Speedo/plugins/cricket.py | aviskumar/speedo | 758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa | [
"BSD-3-Clause"
] | 3 | 2021-10-12T08:17:01.000Z | 2021-12-21T01:17:54.000Z | import urllib.request
from bs4 import BeautifulSoup
from . import *
@speedo.on(Speedo_cmd(pattern="cs$"))
@speedo.on(sudo_cmd(pattern="cs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
score_page = "http://static.cricinfo.com/rss/livescores.xml"
page = urllib.request.urlopen(score_page)
soup = BeautifulSoup(page, "html.parser")
result = soup.find_all("description")
Sed = ""
for match in result:
Sed += match.get_text() + "\n\n"
await event.edit(
f"<b><u>Match information gathered successful</b></u>\n\n\n<code>{Sed}</code>",
parse_mode="HTML",
)
CmdHelp("cricket").add_command(
"cs", None, "Collects all the live cricket scores."
).add_info(
"Cricket Kheloge Vro?"
).add_warning(
"✅ Harmless Module."
).add()
| 26.16129 | 87 | 0.653514 |
8e4e86dd2eda354c384df42b7f8679ad9f0344b5 | 10,589 | py | Python | ml/rl/caffe_utils.py | joshrose/Horizon | a2eb407b31a16560ae78aa6751eb83672a122a7e | [
"BSD-3-Clause"
] | 2 | 2021-05-23T22:11:21.000Z | 2021-06-17T13:08:53.000Z | ml/rl/caffe_utils.py | joshrose/Horizon | a2eb407b31a16560ae78aa6751eb83672a122a7e | [
"BSD-3-Clause"
] | null | null | null | ml/rl/caffe_utils.py | joshrose/Horizon | a2eb407b31a16560ae78aa6751eb83672a122a7e | [
"BSD-3-Clause"
] | 2 | 2021-01-06T01:06:50.000Z | 2021-06-24T01:12:52.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import os
import traceback
from io import BytesIO
from typing import Any, Dict, List, Optional
import caffe2.python.onnx.backend
import numpy as np
import onnx
import torch
from caffe2.python import core, workspace
from caffe2.python.core import BlobReference
logger = logging.getLogger(__name__)
class C2Meta(type):
def __getattr__(cls, method_name):
if method_name.startswith("__"):
return super().__getattr__(method_name)
def method(*inputs, **kwargs):
tb = traceback.extract_stack(limit=2)
blob_prefix = "{}:{}:{}".format(
os.path.basename(tb[0].filename), tb[0].lineno, method_name
)
OpSchema = workspace.C.OpSchema
schema = OpSchema.get(method_name)
num_outputs = schema.CalculateOutput(len(inputs))
outputs = []
if num_outputs < 0:
num_outputs = schema.max_output
for x in range(num_outputs):
outputs.append(C2._net.NextBlob(blob_prefix + "_output" + str(x)))
promoted_inputs = []
for i in inputs:
if type(i) != str and type(i) != BlobReference:
# Promote input by stuffing into a blob
input_name = C2._net.NextBlob(blob_prefix + "_input" + str(x))
if type(i) == np.ndarray:
workspace.FeedBlob(input_name, i)
else:
workspace.FeedBlob(input_name, np.array([i], dtype=np.float32))
promoted_inputs.append(input_name)
else:
promoted_inputs.append(i)
return C2._net.__getattr__(method_name)(promoted_inputs, outputs, **kwargs)
return method
class C2(metaclass=C2Meta):
_net: Optional[Any] = None
_init_net: Optional[Any] = None
_model: Optional[Any] = None
@staticmethod
def set_net(net):
C2._model = None
C2._net = net
C2._init_net = None
@staticmethod
def set_net_and_init_net(net, init_net):
C2._model = None
C2._net = net
C2._init_net = init_net
@staticmethod
def net():
return C2._net
@staticmethod
def init_net():
return C2._init_net
@staticmethod
def set_model(model):
C2._model = model
C2._init_net = None
if model is None:
C2._net = None
else:
C2._net = model.net
@staticmethod
def model():
return C2._model
@staticmethod
def NextBlob(prefix: str) -> str:
assert C2._net is not None
tb = traceback.extract_stack(limit=2)
prefix = "{}:{}:{}:{}".format(
C2._net.Name(), os.path.basename(tb[0].filename), tb[0].lineno, prefix
)
retval: str = C2._net.NextBlob(prefix)
return retval
class StackedArray(object):
def __init__(self, lengths, values):
self.lengths = lengths
self.values = values
@classmethod
def from_list_list(cls, d: List[List[float]], blob_prefix: str):
lengths_blob = blob_prefix + "_lengths"
values_blob = blob_prefix + "_values"
workspace.FeedBlob(lengths_blob, np.array([len(x) for x in d], dtype=np.int32))
workspace.FeedBlob(
values_blob, np.array(list(itertools.chain(*d)), dtype=np.float32)
)
return cls(lengths_blob, values_blob)
class StackedAssociativeArray(object):
def __init__(self, lengths, keys, values):
self.lengths = lengths
self.keys = keys
self.values = values
def to_python(self) -> List[Dict[Any, Any]]:
keys = workspace.FetchBlob(self.keys)
lengths = workspace.FetchBlob(self.lengths)
values = workspace.FetchBlob(self.values)
retval: List[Dict[Any, Any]] = []
cursor = 0
for length in lengths:
d = {}
for _ in range(length):
key = keys[cursor]
value = values[cursor]
d[key] = value
cursor += 1
retval.append(d)
return retval
@classmethod
def from_dict_list(cls, d: List[Dict[int, float]], blob_prefix: str):
lengths_blob = blob_prefix + "_lengths"
keys_blob = blob_prefix + "_keys"
values_blob = blob_prefix + "_values"
workspace.FeedBlob(lengths_blob, np.array([len(x) for x in d], dtype=np.int32))
key_list_2d = [list(x.keys()) for x in d]
workspace.FeedBlob(
keys_blob, np.array(list(itertools.chain(*key_list_2d)), dtype=np.int32)
)
value_list_2d = [list(x.values()) for x in d]
workspace.FeedBlob(
values_blob,
np.array(list(itertools.chain(*value_list_2d)), dtype=np.float32),
)
return cls(lengths_blob, keys_blob, values_blob)
class StackedTwoLevelAssociativeArray(object):
def __init__(
self,
outer_lengths: str,
outer_keys: str,
inner_lengths: str,
inner_keys: str,
inner_values: str,
) -> None:
self.outer_lengths = outer_lengths
self.outer_keys = outer_keys
self.inner_lengths = inner_lengths
self.inner_keys = inner_keys
self.inner_values = inner_values
def to_python(self) -> List[Dict[Any, Dict[Any, Any]]]:
outer_keys = workspace.FetchBlob(self.outer_keys)
outer_lengths = workspace.FetchBlob(self.outer_lengths)
inner_keys = workspace.FetchBlob(self.inner_keys)
inner_lengths = workspace.FetchBlob(self.inner_lengths)
inner_values = workspace.FetchBlob(self.inner_values)
retval: List[Dict[Any, Dict[Any, Any]]] = []
outer_cursor = 0
inner_cursor = 0
for length in outer_lengths:
outer_dict = {}
for _ in range(length):
outer_key = outer_keys[outer_cursor]
inner_length = inner_lengths[outer_cursor]
outer_cursor += 1
inner_dict = {}
for _ in range(inner_length):
inner_key = inner_keys[inner_cursor]
inner_value = inner_values[inner_cursor]
inner_cursor += 1
inner_dict[inner_key] = inner_value
outer_dict[outer_key] = inner_dict
retval.append(outer_dict)
return retval
class PytorchCaffe2Converter(object):
@staticmethod
def pytorch_net_to_caffe2_netdef(*args, **kwargs):
buffer = PytorchCaffe2Converter.pytorch_net_to_buffer(*args, **kwargs)
return PytorchCaffe2Converter.buffer_to_caffe2_netdef(buffer)
@staticmethod
def pytorch_net_to_buffer(pytorch_net, input_dim, model_on_gpu, float_input=True):
"""Traces a pytorch net and outputs a python buffer object
holding net."""
training = pytorch_net.training
pytorch_net.train(False)
for name, p in pytorch_net.named_parameters():
inf_count = torch.isinf(p).sum().item()
nan_count = torch.isnan(p).sum().item()
assert inf_count + nan_count == 0, "{} has {} inf and {} nan".format(
name, inf_count, nan_count
)
if float_input:
dtype = torch.cuda.FloatTensor if model_on_gpu else torch.FloatTensor
dummy_input = torch.randn(1, input_dim).type(dtype)
else:
dtype = torch.cuda.LongTensor if model_on_gpu else torch.LongTensor
dummy_input = torch.randint(low=0, high=1, size=(1, input_dim)).type(dtype)
write_buffer = BytesIO()
try:
torch.onnx.export(pytorch_net, dummy_input, write_buffer)
finally:
pytorch_net.train(training)
return write_buffer
@staticmethod
def buffer_to_caffe2_netdef(buffer):
"""Creates caffe2 NetDef from buffer object and returns pointer to
input and output blobs and the NetDef."""
protobuf_model = onnx.load(BytesIO(buffer.getvalue()))
input_blob_name = protobuf_model.graph.input[0].name
output_blob_name = protobuf_model.graph.output[0].name
logger.info(
"INPUT BLOB: " + input_blob_name + ". OUTPUT BLOB:" + output_blob_name
)
return (
input_blob_name,
output_blob_name,
caffe2.python.onnx.backend.prepare(protobuf_model),
)
@staticmethod
def remap_blobs(input_blob, output_blob, netdef, prefix):
init_net = core.Net(netdef.init_net)
predict_net = core.Net(netdef.predict_net)
blob_remap = {
str(b): "{}/{}".format(prefix, str(b))
for n in [init_net, predict_net]
for b in n.external_inputs + n.external_outputs
}
remapped_input_blob = blob_remap[input_blob]
remapped_output_blob = blob_remap[output_blob]
remapped_init_net, _blob_remap = core.clone_and_bind_net(
init_net, "{}_init".format(prefix), "{}_init/".format(prefix), blob_remap
)
remapped_predict_net, predict_blob_remap = core.clone_and_bind_net(
predict_net,
"{}_predict".format(prefix),
"{}_predict/".format(prefix),
blob_remap,
)
torch_workspace = netdef.workspace
parameters = torch_workspace.Blobs()
for blob_str in parameters:
workspace.FeedBlob(
blob_remap[blob_str], torch_workspace.FetchBlob(blob_str)
)
remapped_parameters = [predict_blob_remap[b] for b in parameters]
return (
remapped_input_blob,
remapped_output_blob,
remapped_parameters,
remapped_init_net,
remapped_predict_net,
)
def softmax(x, temperature):
"""Compute softmax values for each sets of scores in x."""
x = x / temperature
return torch.nn.functional.softmax(x, dim=1)
def masked_softmax(x, mask, temperature):
"""Compute softmax values for each sets of scores in x."""
x = x / temperature
mask_min_x = x - ((1.0 - mask) * 1e20)
mask_min_x -= torch.max(mask_min_x, dim=1, keepdim=True)[0]
e_x = torch.exp(mask_min_x)
e_x *= mask
out = e_x / e_x.sum(dim=1, keepdim=True)
# Set NaN values to 0 (NaN happens when a full mask row is passed in)
out[out != out] = 0
return out
| 32.885093 | 87 | 0.603456 |
7c8618e59a4c56df0db014403e6cde9306472519 | 7,078 | py | Python | image-generation/pggan/sliced_wasserstein.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | 228 | 2017-11-20T06:05:56.000Z | 2022-03-23T12:40:05.000Z | image-generation/pggan/sliced_wasserstein.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | 36 | 2018-01-11T23:26:20.000Z | 2022-03-12T00:53:38.000Z | image-generation/pggan/sliced_wasserstein.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | 76 | 2017-11-22T22:00:00.000Z | 2022-03-28T05:58:57.000Z | # Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
import argparse
from nnabla import logger
import scipy
import time
from datasets import data_iterator
import matplotlib.pylab as plt
from networks import Generator
from functions import pixel_wise_feature_vector_normalization
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import numpy as np
def get_descriptors_for_minibatch(minibatch, nhood_size, nhoods_per_image):
S = minibatch.shape # (minibatch, channel, height, width)
assert len(S) == 4 and S[1] == 3
N = nhoods_per_image * S[0]
H = nhood_size // 2
nhood, chan, x, y = np.ogrid[0:N, 0:3, -H:H+1, -H:H+1]
img = nhood // nhoods_per_image
x = x + np.random.randint(H, S[3] - H, size=(N, 1, 1, 1))
y = y + np.random.randint(H, S[2] - H, size=(N, 1, 1, 1))
idx = ((img * S[1] + chan) * S[2] + y) * S[3] + x
return minibatch.flat[idx]
# ----------------------------------------------------------------------------
def finalize_descriptors(desc):
if isinstance(desc, list):
desc = np.concatenate(desc, axis=0)
assert desc.ndim == 4 # (neighborhood, channel, height, width)
desc -= np.mean(desc, axis=(0, 2, 3), keepdims=True)
desc /= np.std(desc, axis=(0, 2, 3), keepdims=True)
desc = desc.reshape(desc.shape[0], -1)
return desc
# ----------------------------------------------------------------------------
def _sliced_wasserstein(A, B, dirs_per_repeat):
# (neighborhood, descriptor_component)
assert A.ndim == 2 and A.shape == B.shape
results = []
# (descriptor_component, direction)
dirs = np.random.randn(A.shape[1], dirs_per_repeat)
# normalize descriptor components for each direction
dirs /= np.sqrt(np.sum(np.square(dirs), axis=0, keepdims=True))
dirs = dirs.astype(np.float32)
# (neighborhood, direction)
projA = np.matmul(A, dirs)
projB = np.matmul(B, dirs)
# sort neighborhood projections for each direction
projA = np.sort(projA, axis=0)
projB = np.sort(projB, axis=0)
# pointwise wasserstein distances
dists = np.abs(projA - projB)
# average over neighborhoods and directions
results.append(np.mean(dists))
# average over repeats
return results
# ----------------------------------------------------------------------------
def downscale_minibatch(minibatch, lod):
if lod == 0:
return minibatch
t = minibatch.astype(np.float32)
for i in range(lod):
t = (t[:, :, 0::2, 0::2] + t[:, :, 0::2, 1::2] +
t[:, :, 1::2, 0::2] + t[:, :, 1::2, 1::2]) * 0.25
return np.round(t).clip(0, 255).astype(np.uint8)
# ----------------------------------------------------------------------------
gaussian_filter = np.float32([
[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, 36, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]]) / 256.0
def pyr_down(minibatch): # matches cv2.pyrDown()
assert minibatch.ndim == 4
return scipy.ndimage.convolve(minibatch, gaussian_filter[np.newaxis, np.newaxis, :, :], mode='mirror')[:, :, ::2, ::2]
def pyr_up(minibatch): # matches cv2.pyrUp()
assert minibatch.ndim == 4
S = minibatch.shape
res = np.zeros((S[0], S[1], S[2] * 2, S[3] * 2), minibatch.dtype)
res[:, :, ::2, ::2] = minibatch
return scipy.ndimage.convolve(res, gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0, mode='mirror')
def generate_laplacian_pyramid(minibatch, num_levels):
pyramid = [np.float32(minibatch)]
for i in range(1, num_levels):
pyramid.append(pyr_down(pyramid[-1]))
pyramid[-2] -= pyr_up(pyramid[-1])
return pyramid
def reconstruct_laplacian_pyramid(pyramid):
minibatch = pyramid[-1]
for level in pyramid[-2::-1]:
minibatch = pyr_up(minibatch) + level
return minibatch
# ----------------------------------------------------------------------------
def sliced_wasserstein(A, B, dir_repeats, dirs_per_repeat):
# (neighborhood, descriptor_component)
assert A.ndim == 2 and A.shape == B.shape
results = []
import multiprocessing as mp
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=mp.cpu_count())
results = []
score = []
for repeat in range(dir_repeats):
ret = pool.apply_async(_sliced_wasserstein, (A, B, dirs_per_repeat))
results.append(ret)
pool.close()
pool.join()
for repeat in range(dir_repeats):
dists_mean = results[repeat].get()
score.append(dists_mean)
return np.mean(score)
def compute_metric(di, gen, latent, num_minibatch, nhoods_per_image,
nhood_size, level_list, dir_repeats,
dirs_per_repeat, hyper_sphere=True):
logger.info("Generate images")
st = time.time()
real_descriptor = [[] for _ in level_list]
fake_descriptor = [[] for _ in level_list]
for k in range(num_minibatch):
logger.info("iter={} / {}".format(k, num_minibatch))
real, _ = di.next()
real = np.uint8((real + 1.) / 2. * 255)
B = len(real)
z_data = np.random.randn(B, latent, 1, 1)
z = nn.Variable.from_numpy_array(z_data)
z = pixel_wise_feature_vector_normalization(z) if hyper_sphere else z
y = gen(z)
fake = y.d
fake = np.uint8((y.d + 1.) / 2. * 255)
for i, desc in enumerate(generate_laplacian_pyramid(real, len(level_list))):
real_descriptor[i].append(get_descriptors_for_minibatch(
desc, nhood_size, nhoods_per_image))
for i, desc in enumerate(generate_laplacian_pyramid(fake, len(level_list))):
fake_descriptor[i].append(get_descriptors_for_minibatch(
desc, nhood_size, nhoods_per_image))
logger.info(
"Elapsed time for generating images: {} [s]".format(time.time() - st))
logger.info("Compute Sliced Wasserstein Distance")
scores = []
for i, level in enumerate(level_list):
st = time.time()
real = finalize_descriptors(real_descriptor[i])
fake = finalize_descriptors(fake_descriptor[i])
scores.append(sliced_wasserstein(
real, fake, dir_repeats, dirs_per_repeat))
logger.info("Level: {}, dist: {}".format(level, scores[-1]))
logger.info(
"Elapsed time: {} [s] at {}-th level".format(time.time() - st, i))
return scores
| 35.747475 | 122 | 0.611331 |
d95561a02f314ab64425184b6204f48607c75048 | 30,011 | py | Python | tests/integration/test_lambda.py | mesillo/localstack | 5bc6cff903a8d11f821667bd217796596e94ef7d | [
"Apache-2.0"
] | null | null | null | tests/integration/test_lambda.py | mesillo/localstack | 5bc6cff903a8d11f821667bd217796596e94ef7d | [
"Apache-2.0"
] | null | null | null | tests/integration/test_lambda.py | mesillo/localstack | 5bc6cff903a8d11f821667bd217796596e94ef7d | [
"Apache-2.0"
] | null | null | null | import re
import os
import json
import time
import unittest
import six
from io import BytesIO
from localstack import config
from localstack.constants import LOCALSTACK_ROOT_FOLDER, LOCALSTACK_MAVEN_VERSION
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
short_uid, load_file, to_str, mkdir, download, run_safe, get_free_tcp_port, get_service_protocol)
from localstack.services.infra import start_proxy
from localstack.services.awslambda import lambda_api, lambda_executors
from localstack.services.generic_proxy import ProxyListener
from localstack.services.awslambda.lambda_api import (
LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_RUBY25, LAMBDA_RUNTIME_PYTHON27,
use_docker, LAMBDA_RUNTIME_PYTHON36, LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_NODEJS810, LAMBDA_RUNTIME_CUSTOM_RUNTIME
)
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_LAMBDA_PYTHON = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.py')
TEST_LAMBDA_PYTHON3 = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_python3.py')
TEST_LAMBDA_NODEJS = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.js')
TEST_LAMBDA_RUBY = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.rb')
TEST_LAMBDA_DOTNETCORE2 = os.path.join(THIS_FOLDER, 'lambdas', 'dotnetcore2', 'dotnetcore2.zip')
TEST_LAMBDA_CUSTOM_RUNTIME = os.path.join(THIS_FOLDER, 'lambdas', 'custom-runtime')
TEST_LAMBDA_JAVA = os.path.join(LOCALSTACK_ROOT_FOLDER, 'localstack', 'infra', 'localstack-utils-tests.jar')
TEST_LAMBDA_JAVA_WITH_LIB = os.path.join(THIS_FOLDER, 'lambdas', 'java', 'lambda-function-with-lib-0.0.1.jar')
TEST_LAMBDA_ENV = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_environment.py')
TEST_LAMBDA_NAME_PY = 'test_lambda_py'
TEST_LAMBDA_NAME_PY3 = 'test_lambda_py3'
TEST_LAMBDA_NAME_JS = 'test_lambda_js'
TEST_LAMBDA_NAME_RUBY = 'test_lambda_ruby'
TEST_LAMBDA_NAME_DOTNETCORE2 = 'test_lambda_dotnetcore2'
TEST_LAMBDA_NAME_CUSTOM_RUNTIME = 'test_lambda_custom_runtime'
TEST_LAMBDA_NAME_JAVA = 'test_lambda_java'
TEST_LAMBDA_NAME_JAVA_STREAM = 'test_lambda_java_stream'
TEST_LAMBDA_NAME_JAVA_SERIALIZABLE = 'test_lambda_java_serializable'
TEST_LAMBDA_NAME_JAVA_WITH_LIB = 'test_lambda_java_with_lib'
TEST_LAMBDA_NAME_ENV = 'test_lambda_env'
MAVEN_BASE_URL = 'https://repo.maven.apache.org/maven2'
TEST_LAMBDA_JAR_URL = ('{url}/cloud/localstack/{name}/{version}/{name}-{version}-tests.jar').format(
version=LOCALSTACK_MAVEN_VERSION, url=MAVEN_BASE_URL, name='localstack-utils')
TEST_LAMBDA_LIBS = ['localstack', 'localstack_client', 'requests',
'psutil', 'urllib3', 'chardet', 'certifi', 'idna', 'pip', 'dns']
class LambdaTestBase(unittest.TestCase):
def check_lambda_logs(self, func_name, expected_lines=[]):
logs_client = aws_stack.connect_to_service('logs')
log_group_name = '/aws/lambda/%s' % func_name
streams = logs_client.describe_log_streams(logGroupName=log_group_name)['logStreams']
streams = sorted(streams, key=lambda x: x['creationTime'], reverse=True)
log_events = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=streams[0]['logStreamName'])['events']
log_messages = [e['message'] for e in log_events]
for line in expected_lines:
if '.*' in line:
found = [re.match(line, m) for m in log_messages]
if any(found):
continue
self.assertIn(line, log_messages)
class TestLambdaBaseFeatures(unittest.TestCase):
def test_forward_to_fallback_url_dynamodb(self):
db_table = 'lambda-records'
ddb_client = aws_stack.connect_to_service('dynamodb')
def num_items():
return len((run_safe(ddb_client.scan, TableName=db_table) or {'Items': []})['Items'])
items_before = num_items()
self._run_forward_to_fallback_url('dynamodb://%s' % db_table)
items_after = num_items()
self.assertEqual(items_after, items_before + 3)
def test_forward_to_fallback_url_http(self):
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
records.append(data)
return 200
records = []
local_port = get_free_tcp_port()
proxy = start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener())
items_before = len(records)
self._run_forward_to_fallback_url('%s://localhost:%s' % (get_service_protocol(), local_port))
items_after = len(records)
self.assertEqual(items_after, items_before + 3)
proxy.stop()
def _run_forward_to_fallback_url(self, url, num_requests=3):
lambda_client = aws_stack.connect_to_service('lambda')
config.LAMBDA_FALLBACK_URL = url
try:
for i in range(num_requests):
lambda_client.invoke(FunctionName='non-existing-lambda-%s' % i,
Payload=b'{}', InvocationType='RequestResponse')
finally:
config.LAMBDA_FALLBACK_URL = ''
def test_add_lambda_permission(self):
iam_client = aws_stack.connect_to_service('iam')
lambda_client = aws_stack.connect_to_service('lambda')
# create lambda permission
action = 'lambda:InvokeFunction'
resp = lambda_client.add_permission(FunctionName=TEST_LAMBDA_NAME_PY, Action=action,
StatementId='s3', Principal='s3.amazonaws.com', SourceArn=aws_stack.s3_bucket_arn('test-bucket'))
self.assertIn('Statement', resp)
# fetch lambda policy
policy = lambda_client.get_policy(FunctionName=TEST_LAMBDA_NAME_PY)['Policy']
self.assertIsInstance(policy, six.string_types)
policy = json.loads(to_str(policy))
self.assertEqual(policy['Statement'][0]['Action'], action)
self.assertEqual(policy['Statement'][0]['Resource'], lambda_api.func_arn(TEST_LAMBDA_NAME_PY))
# fetch IAM policy
policies = iam_client.list_policies(Scope='Local', MaxItems=500)['Policies']
matching = [p for p in policies if p['PolicyName'] == 'lambda_policy_%s' % TEST_LAMBDA_NAME_PY]
self.assertEqual(len(matching), 1)
self.assertIn(':policy/', matching[0]['Arn'])
# remove permission that we just added
resp = lambda_client.remove_permission(FunctionName=TEST_LAMBDA_NAME_PY,
StatementId=resp['Statement'], Qualifier='qual1', RevisionId='r1')
self.assertEqual(resp['ResponseMetadata']['HTTPStatusCode'], 200)
class TestPythonRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
cls.s3_client = aws_stack.connect_to_service('s3')
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_PY,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON27
)
@classmethod
def tearDownClass(cls):
testutil.delete_lambda_function(TEST_LAMBDA_NAME_PY)
def test_invocation_type_not_set(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}')
result_data = json.loads(result['Payload'].read())
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(result_data['event'], json.loads('{}'))
def test_invocation_type_request_response(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY,
Payload=b'{}', InvocationType='RequestResponse')
result_data = result['Payload'].read()
result_data = json.loads(to_str(result_data))
self.assertEqual(result['StatusCode'], 200)
self.assertIsInstance(result_data, dict)
def test_invocation_type_event(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY,
Payload=b'{}', InvocationType='Event')
self.assertEqual(result['StatusCode'], 202)
def test_invocation_type_dry_run(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}',
InvocationType='DryRun')
self.assertEqual(result['StatusCode'], 204)
def test_lambda_environment(self):
vars = {'Hello': 'World'}
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_ENV), get_content=True,
libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_ENV, zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON27, envvars=vars)
# invoke function and assert result contains env vars
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_ENV, Payload=b'{}')
result_data = result['Payload']
self.assertEqual(result['StatusCode'], 200)
self.assertDictEqual(json.load(result_data), vars)
# get function config and assert result contains env vars
result = self.lambda_client.get_function_configuration(
FunctionName=TEST_LAMBDA_NAME_ENV)
self.assertEqual(result['Environment'], {'Variables': vars})
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_ENV)
def test_invocation_with_qualifier(self):
lambda_name = 'test_lambda_%s' % short_uid()
bucket_name = 'test_bucket_lambda2'
bucket_key = 'test_lambda.zip'
# upload zip file to S3
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.upload_fileobj(
BytesIO(zip_file), bucket_name, bucket_key)
# create lambda function
response = self.lambda_client.create_function(
FunctionName=lambda_name, Handler='handler.handler',
Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1',
Code={
'S3Bucket': bucket_name,
'S3Key': bucket_key
},
Publish=True
)
self.assertIn('Version', response)
# invoke lambda function
data_before = b'{"foo": "bar with \'quotes\\""}'
result = self.lambda_client.invoke(
FunctionName=lambda_name,
Payload=data_before,
Qualifier=response['Version']
)
data_after = json.loads(result['Payload'].read())
self.assertEqual(json.loads(to_str(data_before)), data_after['event'])
context = data_after['context']
self.assertEqual(response['Version'], context['function_version'])
self.assertEqual(lambda_name, context['function_name'])
# assert that logs are present
expected = ['Lambda log message - print function']
if use_docker():
# Note that during regular test execution, nosetests captures the output from
# the logging module - hence we can only expect this when running in Docker
expected.append('.*Lambda log message - logging module')
self.check_lambda_logs(lambda_name, expected_lines=expected)
# clean up
testutil.delete_lambda_function(lambda_name)
def test_upload_lambda_from_s3(self):
lambda_name = 'test_lambda_%s' % short_uid()
bucket_name = 'test_bucket_lambda'
bucket_key = 'test_lambda.zip'
# upload zip file to S3
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.upload_fileobj(
BytesIO(zip_file), bucket_name, bucket_key)
# create lambda function
self.lambda_client.create_function(
FunctionName=lambda_name, Handler='handler.handler',
Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1',
Code={
'S3Bucket': bucket_name,
'S3Key': bucket_key
}
)
# invoke lambda function
data_before = b'{"foo": "bar with \'quotes\\""}'
result = self.lambda_client.invoke(
FunctionName=lambda_name, Payload=data_before)
data_after = json.loads(result['Payload'].read())
self.assertEqual(json.loads(to_str(data_before)), data_after['event'])
context = data_after['context']
self.assertEqual('$LATEST', context['function_version'])
self.assertEqual(lambda_name, context['function_name'])
# clean up
testutil.delete_lambda_function(lambda_name)
def test_python_lambda_running_in_docker(self):
if not use_docker():
return
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON3),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON36
)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_PY3,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON36
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY3, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_PY3)
def test_handler_in_submodule(self):
func_name = 'lambda-%s' % short_uid()
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON), get_content=True,
libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON36,
file_name='abc/def/main.py')
testutil.create_lambda_function(func_name=func_name, zip_file=zip_file,
handler='abc.def.main.handler', runtime=LAMBDA_RUNTIME_PYTHON36)
# invoke function and assert result
result = self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
result_data = json.loads(result['Payload'].read())
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(result_data['event'], json.loads('{}'))
class TestNodeJSRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
def test_nodejs_lambda_running_in_docker(self):
if not use_docker():
return
zip_file = testutil.create_zip_file(
TEST_LAMBDA_NODEJS, get_content=True)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JS,
zip_file=zip_file,
handler='lambda_integration.handler',
runtime=LAMBDA_RUNTIME_NODEJS810
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JS, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
# assert that logs are present
expected = ['.*Node.js Lambda handler executing.']
self.check_lambda_logs(TEST_LAMBDA_NAME_JS, expected_lines=expected)
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JS)
class TestCustomRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
def test_nodejs_lambda_running_in_docker(self):
if not use_docker():
return
zip_file = testutil.create_zip_file(
TEST_LAMBDA_CUSTOM_RUNTIME, get_content=True)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_CUSTOM_RUNTIME,
zip_file=zip_file,
handler='function.handler',
runtime=LAMBDA_RUNTIME_CUSTOM_RUNTIME
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_CUSTOM_RUNTIME,
Payload=b'{"text":"bar with \'quotes\\""}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(
to_str(result_data).strip(),
"""Echoing request: '{"text": "bar with \'quotes\\""}'""")
# assert that logs are present
expected = ['.*Custom Runtime Lambda handler executing.']
self.check_lambda_logs(
TEST_LAMBDA_NAME_CUSTOM_RUNTIME, expected_lines=expected)
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_CUSTOM_RUNTIME)
class TestDotNetCoreRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
# lambda .NET Core 2.0 is already a zip
zip_file = TEST_LAMBDA_DOTNETCORE2
cls.zip_file_content = None
with open(zip_file, 'rb') as file_obj:
cls.zip_file_content = file_obj.read()
def test_dotnet_lambda_running_in_docker(self):
if not use_docker():
return
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_DOTNETCORE2,
zip_file=self.zip_file_content,
handler='DotNetCore2::DotNetCore2.Lambda.Function::SimpleFunctionHandler',
runtime=LAMBDA_RUNTIME_DOTNETCORE2
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_DOTNETCORE2, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
# assert that logs are present
expected = ['Running .NET Core 2.0 Lambda']
self.check_lambda_logs(TEST_LAMBDA_NAME_DOTNETCORE2, expected_lines=expected)
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_DOTNETCORE2)
class TestRubyRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
def test_ruby_lambda_running_in_docker(self):
if not use_docker():
return
zip_file = testutil.create_zip_file(
TEST_LAMBDA_RUBY, get_content=True)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_RUBY,
zip_file=zip_file,
handler='lambda_integration.handler',
runtime=LAMBDA_RUNTIME_RUBY25
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_RUBY, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_RUBY)
class TestJavaRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
# deploy lambda - Java
if not os.path.exists(TEST_LAMBDA_JAVA):
mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
# Lambda supports single JAR deployments without the zip,
# so we upload the JAR directly.
cls.test_java_jar = load_file(TEST_LAMBDA_JAVA, mode='rb')
cls.test_java_zip = testutil.create_zip_file(TEST_LAMBDA_JAVA, get_content=True)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JAVA,
zip_file=cls.test_java_jar,
runtime=LAMBDA_RUNTIME_JAVA8,
handler='cloud.localstack.sample.LambdaHandler'
)
# deploy lambda - Java with stream handler
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JAVA_STREAM,
zip_file=cls.test_java_jar,
runtime=LAMBDA_RUNTIME_JAVA8,
handler='cloud.localstack.sample.LambdaStreamHandler'
)
# deploy lambda - Java with serializable input object
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE,
zip_file=cls.test_java_zip,
runtime=LAMBDA_RUNTIME_JAVA8,
handler='cloud.localstack.sample.SerializedInputLambdaHandler'
)
# upload the JAR directly
cls.test_java_jar_with_lib = load_file(TEST_LAMBDA_JAVA_WITH_LIB, mode='rb')
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JAVA_WITH_LIB,
zip_file=cls.test_java_jar_with_lib,
runtime=LAMBDA_RUNTIME_JAVA8,
handler='cloud.localstack.sample.LambdaHandlerWithLib'
)
@classmethod
def tearDownClass(cls):
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA)
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA_STREAM)
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA_SERIALIZABLE)
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA_WITH_LIB)
def test_java_runtime(self):
self.assertIsNotNone(self.test_java_jar)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertIn('LinkedHashMap', to_str(result_data))
def test_java_runtime_with_lib(self):
self.assertIsNotNone(self.test_java_jar_with_lib)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA_WITH_LIB, Payload=b'{"echo":"echo"}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertIn('echo', to_str(result_data))
def test_sns_event(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event',
Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}')
self.assertEqual(result['StatusCode'], 202)
def test_ddb_event(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event',
Payload=b'{"Records": [{"dynamodb": {"Message": "{}"}}]}')
self.assertEqual(result['StatusCode'], 202)
def test_kinesis_invocation(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA,
Payload=b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertIn('KinesisEvent', to_str(result_data))
def test_kinesis_event(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event',
Payload=b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 202)
self.assertEqual(to_str(result_data).strip(), '')
def test_stream_handler(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA_STREAM, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
def test_serializable_input_object(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE,
Payload=b'{"bucket": "test_bucket", "key": "test_key"}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertDictEqual(
json.loads(to_str(result_data)),
{'validated': True, 'bucket': 'test_bucket', 'key': 'test_key'}
)
class TestDockerBehaviour(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
def test_prime_and_destroy_containers(self):
# run these tests only for the "reuse containers" Lambda executor
if not isinstance(lambda_api.LAMBDA_EXECUTOR,
lambda_executors.LambdaExecutorReuseContainers):
return
executor = lambda_api.LAMBDA_EXECUTOR
func_name = 'test_prime_and_destroy_containers'
func_arn = lambda_api.func_arn(func_name)
# make sure existing containers are gone
executor.cleanup()
self.assertEqual(len(executor.get_all_container_names()), 0)
# deploy and invoke lambda without Docker
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_ENV),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
testutil.create_lambda_function(
func_name=func_name,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON27,
envvars={'Hello': 'World'}
)
self.assertEqual(len(executor.get_all_container_names()), 0)
self.assertDictEqual(executor.function_invoke_times, {})
# invoke a few times.
durations = []
num_iterations = 3
for i in range(0, num_iterations + 1):
prev_invoke_time = None
if i > 0:
prev_invoke_time = executor.function_invoke_times[func_arn]
start_time = time.time()
self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
duration = time.time() - start_time
self.assertEqual(len(executor.get_all_container_names()), 1)
# ensure the last invoke time is being updated properly.
if i > 0:
self.assertGreater(executor.function_invoke_times[func_arn], prev_invoke_time)
else:
self.assertGreater(executor.function_invoke_times[func_arn], 0)
durations.append(duration)
# the first call would have created the container. subsequent calls would reuse and be faster.
for i in range(1, num_iterations + 1):
self.assertLess(durations[i], durations[0])
status = executor.get_docker_container_status(func_arn)
self.assertEqual(status, 1)
container_network = executor.get_docker_container_network(func_arn)
self.assertEqual(container_network, 'default')
executor.cleanup()
status = executor.get_docker_container_status(func_arn)
self.assertEqual(status, 0)
self.assertEqual(len(executor.get_all_container_names()), 0)
# clean up
testutil.delete_lambda_function(func_name)
def test_docker_command_for_separate_container_lambda_executor(self):
# run these tests only for the "separate containers" Lambda executor
if not isinstance(lambda_api.LAMBDA_EXECUTOR,
lambda_executors.LambdaExecutorSeparateContainers):
return
executor = lambda_api.LAMBDA_EXECUTOR
func_name = 'test_docker_command_for_separate_container_lambda_executor'
func_arn = lambda_api.func_arn(func_name)
handler = 'handler'
lambda_cwd = '/app/lambda'
network = 'compose_network'
config.LAMBDA_DOCKER_NETWORK = network
cmd = executor.prepare_execution(func_arn, {}, LAMBDA_RUNTIME_NODEJS810, '', handler, lambda_cwd)
expected = 'docker run -v "%s":/var/task --network="%s" --rm "lambci/lambda:%s" "%s"' % (
lambda_cwd, network, LAMBDA_RUNTIME_NODEJS810, handler)
self.assertIn(('--network="%s"' % network), cmd, 'cmd=%s expected=%s' % (cmd, expected))
config.LAMBDA_DOCKER_NETWORK = ''
def test_destroy_idle_containers(self):
# run these tests only for the "reuse containers" Lambda executor
if not isinstance(lambda_api.LAMBDA_EXECUTOR,
lambda_executors.LambdaExecutorReuseContainers):
return
executor = lambda_api.LAMBDA_EXECUTOR
func_name = 'test_destroy_idle_containers'
func_arn = lambda_api.func_arn(func_name)
# make sure existing containers are gone
executor.destroy_existing_docker_containers()
self.assertEqual(len(executor.get_all_container_names()), 0)
# deploy and invoke lambda without Docker
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_ENV),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
testutil.create_lambda_function(
func_name=func_name,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON27,
envvars={'Hello': 'World'}
)
self.assertEqual(len(executor.get_all_container_names()), 0)
self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
self.assertEqual(len(executor.get_all_container_names()), 1)
# try to destroy idle containers.
executor.idle_container_destroyer()
self.assertEqual(len(executor.get_all_container_names()), 1)
# simulate an idle container
executor.function_invoke_times[func_arn] = time.time() - lambda_executors.MAX_CONTAINER_IDLE_TIME_MS
executor.idle_container_destroyer()
self.assertEqual(len(executor.get_all_container_names()), 0)
# clean up
testutil.delete_lambda_function(func_name)
| 39.540184 | 110 | 0.671054 |
993035b9e7e9418efe01cea99bbbfd19b48bf2ec | 601 | py | Python | old_code/setup2.py | aitchslash/lahman-updater | e3c92780a27b7c0fe63f67d2dcf6a95478f920cc | [
"MIT"
] | 3 | 2016-03-20T18:03:58.000Z | 2018-05-07T15:50:07.000Z | setup.py | aitchslash/lahman-updater | e3c92780a27b7c0fe63f67d2dcf6a95478f920cc | [
"MIT"
] | null | null | null | setup.py | aitchslash/lahman-updater | e3c92780a27b7c0fe63f67d2dcf6a95478f920cc | [
"MIT"
] | null | null | null | """Setup for lahman_update."""
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='lahman_updater',
version='1.0.0',
description='Update lahman database',
long_description=readme,
author='Benjamin Field',
author_email='benjamin.field@gmail.com',
url='https://github.com/aitchslash',
license=license,
install_requires=['spynner==2.19',
'PyMySQL==0.6.7',
'beautifulsoup4==4.4.1'],
packages=find_packages()
)
| 24.04 | 47 | 0.620632 |
7e324849e9d1c9a05bc9a81fba5322ec4cf9282a | 7,620 | py | Python | lantz/lantz/drivers/rigol/dg1022.py | zhong-lab/optics | 9de1942d9a128183ecb3d360b160b27126e7b8f0 | [
"BSD-2-Clause"
] | 6 | 2016-04-13T12:59:18.000Z | 2020-06-24T17:43:04.000Z | lantz/lantz/drivers/rigol/dg1022.py | zhong-lab/optics | 9de1942d9a128183ecb3d360b160b27126e7b8f0 | [
"BSD-2-Clause"
] | null | null | null | lantz/lantz/drivers/rigol/dg1022.py | zhong-lab/optics | 9de1942d9a128183ecb3d360b160b27126e7b8f0 | [
"BSD-2-Clause"
] | 6 | 2015-12-14T19:30:36.000Z | 2020-06-29T21:16:01.000Z | """
lantz.drivers.rigol.dg1022
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implementation of Rigol DG1022 function generator with 2 channels. More or
less based around on the Ag33522a driver from Berk Diler.
Manual available from: https://www.rigolna.com/products/waveform-generators/dg1000/
Author: Peter Mintun
Date: 12/11/2017
"""
import numpy as np
import lantz
from lantz import Action, Feat, DictFeat, ureg
from collections import OrderedDict
from lantz.messagebased import MessageBasedDriver
class DG1022(MessageBasedDriver):
DEFAULTS = {
'COMMON': {
'write_termination': '\n',
'read_termination': '\n',
}
}
CHANNELS = OrderedDict([(1, 1),
(2,2)])
TOGGLE = OrderedDict([('on', 'ON'),
('off', 'OFF')])
WAVEFORMS = OrderedDict([('arbitrary', 'ARB'),
('dc', 'DC'),
('harmonic', 'HARM'),
('noise', 'NOIS'),
('pulse', 'PULS'),
('ramp', 'RAMP'),
('sine', 'SIN'),
('square', 'SQU'),
('triangle', 'TRI'),
('user', 'USER')])
@Feat(read_once=True)
def idn(self):
return self.query('*IDN?')
@Action()
def reset(self):
return self.write('*RST')
@Feat()
def error(self):
msg = self.query("SYST:ERR?")
return msg.split(',') #error code, error message
@DictFeat(keys = CHANNELS, units="Hz", limits=(1e-6, 25e6))
def frequency(self,channel):
"""
Returns the frequency of the specified channel, in Hertz.
"""
return float(self.query('SOUR{}:FREQ?'.format(channel)))
@frequency.setter
def frequency(self,channel,value):
"""
Sets the frequency of the specified channel, to value. Note that this
is not smart enough to keep track of the different bandwidth constraints
on different types of waveforms, so see the manual accordingly.
"""
return self.write('SOUR{}:FREQ {:1.6f}'.format(channel,value))
@DictFeat(keys=CHANNELS, values=WAVEFORMS)
def function(self,channel):
"""
Returns the function of the specified channel from the options
enumerated in WAVEFORMS.
"""
result = self.query('SOUR{}:APPL?'.format(channel))[1:-1]
return result.split(',')[0]
@function.setter
def function(self,channel,value):
"""
Returns the function of the specified channel to value (specified in
WAVEFORMS).
"""
return self.write('SOUR{}:APPL:{}'.format(channel, value))
@DictFeat(keys = CHANNELS, values = TOGGLE)
def output(self,channel):
"""
Reads the output state of the specified channel.
"""
return self.query('OUTP{}?'.format(channel))
@output.setter
def output(self,channel,val):
"""
Sets the output state of the specified channel to val.
"""
return self.write('OUTP{} {}'.format(channel,val))
@DictFeat(keys=CHANNELS, units="V", limits=(-10.,10.))
def voltage_low(self,channel):
"""
Queries the low voltage level for the specified channel.
"""
return float(self.query("SOUR{}:VOLT:LOW?".format(channel)))
@voltage_low.setter
def voltage_low(self,channel,value):
"""
Sets the high voltage level for the specified channel.
"""
return self.write("SOUR{}:VOLT:LOW {:1.6f}".format(channel, value))
@DictFeat(keys=CHANNELS, units="V", limits=(-10.,10.))
def voltage_high(self,channel):
"""
Queries the high voltage level for the specified channel.
"""
return float(self.query("SOUR{}:VOLT:HIGH?".format(channel)))
@voltage_high.setter
def voltage_high(self,channel,value):
"""
Sets the high voltage level for the specified channel.
"""
return self.write("SOUR{}:VOLT:HIGH {:1.6f}".format(channel, value))
@DictFeat(keys = CHANNELS, units = "V", limits=(0., 20.))
def voltage_amplitude(self,channel):
"""
Queries the peak-to-peak voltage amplitude of the specified output
channel.
"""
return float(self.query("SOUR{}:VOLT?".format(channel)))
@voltage_amplitude.setter
def voltage_amplitude(self,channel,value):
"""
Sets the peak-to-peak voltage amplitude of the specified output channel.
"""
return self.write("SOUR{}:VOLT {:1.6f}".format(channel,value))
@DictFeat(keys=CHANNELS, units="V", limits=(-10., 10.))
def voltage_offset(self, channel):
"""
Queries the offset voltage of the specified output channel.
"""
return float(self.query("SOUR{}:VOLT:OFFS?".format(channel)))
@voltage_offset.setter
def voltage_offset(self, channel, value):
"""
Sets the offset voltage of the specified output channel.
"""
self.write("SOUR{}:VOLT:OFFS {:1.6f}".format(channel, value))
if __name__ == '__main__':
# note: if you don't see your device, it may not work over USB 3.0?
addr = 'USB0::0x1AB1::0x0642::DG1ZA192902819::INSTR'
try:
inst = DG1022(addr)
inst.initialize()
inst.reset()
print('Identification:{}'.format(inst.idn))
print('Error:{}'.format(inst.error))
except:
print('Could not find instrument, check connection/address!')
# code to check various parameters from supported channels
for channel in inst.CHANNELS.keys():
inst.frequency[channel] = 1e-6
print('Channel {} frequency: {}'.format(channel, inst.frequency[channel]))
inst.frequency[channel] = 20e6
print('Channel {} frequency: {}'.format(channel, inst.frequency[channel]))
print('Channel {} function: {}'.format(channel, inst.function[channel]))
inst.function[channel] = 'square'
print('Channel {} function: {}'.format(channel, inst.function[channel]))
inst.output[channel] = 'off'
print('Channel {} output:{}'.format(channel, inst.output[channel]))
inst.output[channel] = 'on'
print('Channel {} output:{}'.format(channel, inst.output[channel]))
inst.output[channel] = 'off'
print('Channel {} output:{}'.format(channel, inst.output[channel]))
print('Channel {} low voltage:{}'.format(channel, inst.voltage_low[channel]))
inst.voltage_low[channel] = -1.0
print('Channel {} low voltage:{}'.format(channel, inst.voltage_low[channel]))
print('Channel {} high voltage:{}'.format(channel, inst.voltage_high[channel]))
inst.voltage_high[channel] = 1.0
print('Channel {} high voltage:{}'.format(channel, inst.voltage_high[channel]))
print('Channel {} voltage amplitude:{}'.format(channel, inst.voltage_amplitude[channel]))
print('Channel {} voltage offset:{}'.format(channel, inst.voltage_offset[channel]))
inst.voltage_amplitude[channel] = 5.0
inst.voltage_offset[channel] = 0.0
print('Channel {} voltage amplitude:{}'.format(channel, inst.voltage_amplitude[channel]))
print('Channel {} voltage offset:{}'.format(channel, inst.voltage_offset[channel]))
print('Channel {} low voltage:{}'.format(channel, inst.voltage_low[channel]))
print('Channel {} high voltage:{}'.format(channel, inst.voltage_high[channel]))
| 33.866667 | 97 | 0.59042 |
c4386a0e2aacabd8703e84e211f68e4d589d889e | 5,331 | py | Python | lib/constitution.py | dmatthewsbnd251/Inventory_Manager | 148ff7734141a80c960fb02b1171363e5c82169b | [
"Unlicense",
"MIT"
] | null | null | null | lib/constitution.py | dmatthewsbnd251/Inventory_Manager | 148ff7734141a80c960fb02b1171363e5c82169b | [
"Unlicense",
"MIT"
] | null | null | null | lib/constitution.py | dmatthewsbnd251/Inventory_Manager | 148ff7734141a80c960fb02b1171363e5c82169b | [
"Unlicense",
"MIT"
] | null | null | null | """This class contains all of the rules,
criteria, and actions."""
from lib.criteria import Criteria
from lib.rule import Rule
class Constitution:
criteria = set()
rules = set()
# state_template_options[config_name] = {
# options: value,
# options2: value2,
# ...
# }
state_template_options = {}
required_datasources = []
def __init__(self):
pass
def __str__(self):
return_str = "Rules:\n"
for r in self.get_next_rule_by_priority():
return_str += str(r)
return return_str
def add_criteria(self, c):
if not isinstance(c, Criteria):
raise ValueError("Only criteria instances can be added to the critieria of the constitution.")
else:
self.criteria.add(c)
def add_rule(self, r):
if not isinstance(r, Rule):
raise ValueError("Only rule instances can be added to the rules of the constitution.")
else:
self.rules.add(r)
def load_legislation(self, config):
#Load required data sources
dsources = config.get("Monitoring", "required_datasources")
for d in dsources.split(','):
d = d.strip().lower()
if d:
self.required_datasources.append(d)
"""Input: ConfigParser object"""
for section in config.sections():
if section.startswith('Criteria ') and len(section) > 9:
criteria_name = ''.join(section[9:]).lower()
if 'reverse' in config.options(section):
reverse = config.getboolean(section, 'reverse')
else:
reverse = False
c = Criteria(criteria_name=criteria_name, reverse=reverse)
hascriteria = False
for key, val in config.items(section):
if key.startswith('criteria_name_'):
index = key[14:]
c.add_criteria(val, config.get(section, 'criteria_criteria_' + index))
hascriteria = True
if hascriteria:
self.criteria.add(c)
elif section.startswith("Config ") and len(section) > 7:
config_name = ''.join(section[7:]).lower()
self.state_template_options[config_name] = {}
for key, val in config.items(section):
self.state_template_options[config_name][key] = val
for section in config.sections():
if section.startswith('Rule ') and len(section) > 5:
rule_name = ''.join(section[5:]).lower()
priority = config.getint(section, 'priority')
criteria = set()
if 'state_template' in config.options(section):
state_template = config.get(section, 'state_template')
else:
state_template = None
kwargs = None
cfg = None
if 'options_config' in config.options(section):
cfg = config.get(section, 'options_config').lower().strip()
kwargs = self.state_template_options[cfg]
new_rule = Rule(rule_name=rule_name, priority=priority, state_template=state_template, kwargs=kwargs,
config_name=cfg)
self.add_rule(new_rule)
for key, val in config.items(section):
if key.startswith('rule_type_') and val.lower() == 'criteria':
index = ''.join(key[10:]).lower()
criteria.add(
self.find_criteria_by_name(
config.get(section, 'rule_name_' + index).lower()
)
)
for c in criteria:
new_rule.add_criteria(c)
for section in config.sections():
if section.startswith('Rule ') and len(section) > 5:
rule_target_name = ''.join(section[5:]).lower()
for key, val in config.items(section):
if key.startswith('rule_type_') and val.lower() == 'rule':
index = ''.join(key[10:]).lower()
rule_source_name = config.get(section, 'rule_name_' + index).lower()
r_source = self.find_rule_by_name(rule_source_name)
r_target = self.find_rule_by_name(rule_target_name)
r_target.add_rule(r_source)
def get_next_rule_by_priority(self):
rules_priorities = {}
for r in self.rules:
rules_priorities[int(r.priority)] = r
for p in sorted(rules_priorities.keys(), reverse=True):
yield rules_priorities[p]
def get_rules_with_templates_by_priority(self):
for r in self.get_next_rule_by_priority():
if r.state_template is not None:
yield r
def find_rule_by_name(self, name):
for r in self.rules:
if r.rule_name == name:
return r
return None
def find_criteria_by_name(self, name):
for c in self.criteria:
if c.criteria_name == name:
return c
return None
| 38.352518 | 117 | 0.536672 |
03536df3c245a1d8480429a380045a56e1f3a049 | 24 | py | Python | moviepy/version.py | bobatsar/moviepy | 17028410205a56f2937011e08ae0e91971e49318 | [
"MIT"
] | 1 | 2021-06-13T02:53:26.000Z | 2021-06-13T02:53:26.000Z | moviepy/version.py | bobatsar/moviepy | 17028410205a56f2937011e08ae0e91971e49318 | [
"MIT"
] | 6 | 2019-12-17T13:32:08.000Z | 2021-06-02T00:49:29.000Z | moviepy/version.py | bobatsar/moviepy | 17028410205a56f2937011e08ae0e91971e49318 | [
"MIT"
] | 2 | 2021-03-21T10:37:02.000Z | 2021-03-21T10:52:23.000Z | __version__ = "0.2.3.2"
| 12 | 23 | 0.625 |
f496f0635fb036f725165bcab1deb5260d2ad770 | 3,541 | py | Python | garage/config.py | XavierJingfeng/starter | 274566e491d5c7157f3c8deff136c56838022349 | [
"MIT"
] | null | null | null | garage/config.py | XavierJingfeng/starter | 274566e491d5c7157f3c8deff136c56838022349 | [
"MIT"
] | null | null | null | garage/config.py | XavierJingfeng/starter | 274566e491d5c7157f3c8deff136c56838022349 | [
"MIT"
] | null | null | null | import os
import os.path as osp
# General config
GARAGE_PROJECT_PATH = os.environ.get(
'GARAGE_PROJECT_PATH', osp.abspath(osp.join(osp.dirname(__file__), '..')))
GARAGE_LOG_DIR = os.environ.get('GARAGE_LOG_DIR',
osp.join(GARAGE_PROJECT_PATH, 'data'))
GARAGE_LOG_TENSORBOARD = bool(os.environ.get('GARAGE_LOG_TENSORBOARD', True))
GARAGE_USE_TF = bool(os.environ.get('GARAGE_USE_TF', False))
GARAGE_USE_GPU = bool(os.environ.get('GARAGE_USE_GPU', False))
GARAGE_MUJOCO_KEY_PATH = os.environ.get('GARAGE_MUJOCO_KEY_PATH',
osp.expanduser('~/.mujoco'))
GARAGE_ENV = eval(os.environ.get('GARAGE_ENV', '{}'))
GARAGE_LABEL = os.environ.get('GARAGE_LABEL', 'default')
# Code copying rules (for Docker/AWS/Kubernetes)
GARAGE_CODE_SYNC_IGNORES = eval(
os.environ.get(
'GARAGE_CODE_SYNC_IGNORES', '''[
"*.git/*",
"*data/*",
"*src/*",
"*.pods/*",
"*tests/*",
"*examples/*",
"docs/*"
]'''))
GARAGE_FAST_CODE_SYNC = bool(os.environ.get('GARAGE_FAST_CODE_SYNC', True))
GARAGE_FAST_CODE_SYNC_IGNORES = eval(
os.environ.get('GARAGE_FAST_CODE_SYNC_IGNORES',
'[".git", "data", ".pods"]'))
GARAGE_DOCKER_IMAGE = os.environ.get('GARAGE_DOCKER_IMAGE',
'rlworkgroup/garage-headless')
GARAGE_DOCKER_LOG_DIR = os.environ.get('GARAGE_DOCKER_LOG_DIR', '/tmp/expt')
GARAGE_DOCKER_CODE_DIR = os.environ.get('GARAGE_DOCKER_CODE_DIR',
'/root/code/garage')
# AWS
GARAGE_AWS_S3_PATH = os.environ.get('GARAGE_AWS_S3_PATH', 'INVALID_S3_PATH')
GARAGE_AWS_IMAGE_ID = os.environ.get('GARAGE_AWS_IMAGE_ID', None)
GARAGE_AWS_INSTANCE_TYPE = os.environ.get('GARAGE_AWS_INSTANCE_TYPE',
'm4.xlarge')
GARAGE_AWS_KEY_NAME = os.environ.get('GARAGE_AWS_KEY_NAME', None)
GARAGE_AWS_SPOT = bool(os.environ.get('GARAGE_AWS_SPOT', True))
GARAGE_AWS_SPOT_PRICE = os.environ.get('GARAGE_AWS_SPOT_PRICE', '1.0')
GARAGE_AWS_ACCESS_KEY = os.environ.get("GARAGE_AWS_ACCESS_KEY", None)
GARAGE_AWS_ACCESS_SECRET = os.environ.get("GARAGE_AWS_ACCESS_SECRET", None)
GARAGE_AWS_IAM_INSTANCE_PROFILE_NAME = os.environ.get(
'GARAGE_AWS_IAM_INSTANCE_PROFILE_NAME', 'garage')
GARAGE_AWS_SECURITY_GROUPS = eval(
os.environ.get('GARAGE_AWS_SECURITY_GROUPS', '["garage"]'))
GARAGE_AWS_SECURITY_GROUP_IDS = eval(
os.environ.get('GARAGE_AWS_SECURITY_GROUP_IDS', '[]'))
GARAGE_AWS_NETWORK_INTERFACES = eval(
os.environ.get('GARAGE_AWS_NETWORK_INTERFACES', '[]'))
GARAGE_AWS_EXTRA_CONFIGS = eval(
os.environ.get('GARAGE_AWS_EXTRA_CONFIGS', '{}'))
GARAGE_AWS_REGION_NAME = os.environ.get('GARAGE_AWS_REGION_NAME', 'us-east-1')
GARAGE_AWS_CODE_SYNC_S3_PATH = os.environ.get('GARAGE_AWS_CODE_SYNC_S3_PATH',
's3://to/be/overridden')
GARAGE_AWS_EBS_OPTIMIZED = bool(
os.environ.get('GARAGE_AWS_EBS_OPTIMIZED', True))
# Kubernetes
GARAGE_KUBE_DEFAULT_RESOURCES = eval(
os.environ.get('GARAGE_KUBE_DEFAULT_RESOURCES',
'{"requests": {"cpu": 0.8}}'))
GARAGE_KUBE_DEFAULT_NODE_SELECTOR = eval(
os.environ.get('GARAGE_KUBE_DEFAULT_NODE_SELECTOR',
'{"aws/type": "m4.xlarge"}'))
GARAGE_KUBE_PREFIX = os.environ.get('GARAGE_KUBE_PREFIX', 'garage_')
GARAGE_KUBE_POD_DIR = os.environ.get('GARAGE_KUBE_POD_DIR',
osp.join(GARAGE_PROJECT_PATH, '/.pods'))
| 46.592105 | 78 | 0.672974 |
587d3bfb8fd687c9578655e718eb8e9fb857bcd7 | 852 | py | Python | 100dayspython/day008/caesarcipher01.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | 100dayspython/day008/caesarcipher01.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | 100dayspython/day008/caesarcipher01.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'y', 'x', 'z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i','j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'y', 'x', 'z']
direction = str(input("Type 'encode' to encrypt, type 'decode' to decrypt: "))
text = str(input("Type your message: ")).strip().replace(' ', '')
shift = int(input("Type the shift number: "))
def encrypt(plain_text, shift_amount):
cipher_text = ""
for letter in plain_text:
position = alphabet.index(letter)
new_position = position + shift_amount
new_letter = alphabet[new_position]
cipher_text += new_letter
print(f"The encoded text is {cipher_text}")
encrypt(plain_text=text, shift_amount=shift)
| 40.571429 | 78 | 0.504695 |
b891607b051c6792cf51e928c72665591e411a1c | 8,145 | py | Python | Fancy_aggregations/binary_parser.py | Fuminides/Fancy_aggregations | 9d7dbfa9b615ab6f582d8a36681ab24ccc94f759 | [
"MIT"
] | 10 | 2019-11-13T15:38:06.000Z | 2022-01-29T09:27:38.000Z | Fancy_aggregations/binary_parser.py | Fuminides/Fancy_aggregations | 9d7dbfa9b615ab6f582d8a36681ab24ccc94f759 | [
"MIT"
] | null | null | null | Fancy_aggregations/binary_parser.py | Fuminides/Fancy_aggregations | 9d7dbfa9b615ab6f582d8a36681ab24ccc94f759 | [
"MIT"
] | 4 | 2020-05-18T11:24:12.000Z | 2022-03-07T08:34:13.000Z | # -*- coding: utf-8 -*-
"""
Created on 04/12/2019
@author: Javier Fumanal Idocin
"""
import numpy as np
from . import integrals
from . import moderate_deviations
from . import tnorms
from . import owas
from . import overlaps
from . import dissimilarities as dis
supported_functions = ['mean', 'median', 'min', 'max', 'sugeno', 'shamacher', 'choquet', 'cfminmin', 'cf12', 'cf', 'owa1', 'owa2', 'owa3', 'geomean', 'sinoverlap', 'hmean',
'hamacher', 'luka', 'drastic', 'nilpotent', 'probabilistic_sum', 'bounded_sum', 'drastic_tcnorm', 'nilpotent_maximum', 'einstein_sum']
classic_aggs = ['mean', 'median', 'min', 'max']
owa = ['owa1', 'owa2', 'owa3']
fuzzy_integral = ['sugeno', 'choquet']
choquet_family = ['choquet', 'cf', 'cf12', 'cfminmin']
sugeno_family = ['sugeno', 'shamacher', 'fhamacher']
tnorm = ['min', 'prod', 'hamacher', ' luka', 'drastic', 'nilpotent']
tcnorm = ['probabilistic_sum', 'bounded_sum', 'drastic_tcnorm', 'nilpotent_maximum', 'einstein_sum']
overlap = ['geomean', 'sinoverlap', 'hmean']
def parse(agg_name, axis_f = 0, keepdims_f=True):
agg_minuscula = agg_name.lower()
if agg_minuscula == 'mean':
return lambda a, axis=axis_f, keepdims=keepdims_f: np.mean(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'median':
return lambda a, axis=axis_f, keepdims=keepdims_f: np.median(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'min':
return lambda a, axis=axis_f, keepdims=keepdims_f: np.min(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'max':
return lambda a, axis=axis_f, keepdims=keepdims_f: np.max(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'md':
return lambda a, axis=axis_f, keepdims=keepdims_f: moderate_deviations.md_aggregation(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'sugeno':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.sugeno_fuzzy_integral(a, integrals.generate_cardinality_matrix(a.shape, axis), axis=axis, keepdims=keepdims)
elif agg_minuscula == 'shamacher':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.sugeno_fuzzy_integral_generalized(a, integrals.generate_cardinality_matrix(a.shape, axis), axis=axis, keepdims=keepdims, f1 = tnorms.hamacher_tnorm, f2 = np.amax)
elif agg_minuscula == 'pre_hamacher':
my_pre = lambda x, y: x * np.abs(2*y - 1)
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.sugeno_fuzzy_integral_generalized(a, integrals.generate_cardinality_matrix(a.shape, axis), axis=axis, keepdims=keepdims, f1 = my_pre, f2 = np.amax)
elif agg_minuscula == 'fhamacher':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.sugeno_fuzzy_integral_generalized(a, integrals.generate_cardinality_matrix(a.shape, axis), axis=axis, keepdims=keepdims, f1 = tnorms.prod, f2 = np.sum)
elif agg_minuscula == 'fsugeno':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.sugeno_fuzzy_integral_generalized(a, integrals.generate_cardinality_matrix(a.shape, axis), axis=axis, keepdims=keepdims, f1 = tnorms.prod, f2 = np.sum)
elif agg_minuscula == 'choquet':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.choquet_integral_symmetric(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims)
elif agg_minuscula == 'choquetdx0':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.general_choquet_dx(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims, rdf=dis.abs_v)
elif agg_minuscula == 'choquetdx1':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.general_choquet_dx(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims, rdf=dis.quadratic)
elif agg_minuscula == 'choquetdx2':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.general_choquet_dx(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims, rdf=dis.square)
elif agg_minuscula == 'choquetdx3':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.general_choquet_dx(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims, rdf=dis.squarewise)
elif agg_minuscula == 'choquetdx4':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.general_choquet_dx(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims, rdf=dis.abs_square)
elif agg_minuscula == 'choquetdx5':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.general_choquet_dx(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims, rdf=dis.root_square)
elif agg_minuscula == 'cfminmin':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.choquet_integral_symmetric_cf12(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims, f1=np.minimum, f2=np.minimum)
elif agg_minuscula == 'cf12':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.choquet_integral_symmetric_cf12(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims, f1=lambda a,b, axis=axis, keepdims=keepdims: np.sqrt(a* b), f2=tnorms.lukasiewicz_tnorm)
elif agg_minuscula == 'owa1':
return lambda a, axis=axis_f, keepdims=keepdims_f: owas.OWA1(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'owa2':
return lambda a, axis=axis_f, keepdims=keepdims_f: owas.OWA2(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'owa3':
return lambda a, axis=axis_f, keepdims=keepdims_f: owas.OWA3(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'cf':
return lambda a, axis=axis_f, keepdims=keepdims_f: integrals.choquet_integral_CF(a, integrals.generate_cardinality(a.shape[axis]), axis=axis, keepdims=keepdims)
elif agg_minuscula == 'geomean':
return lambda a, axis=axis_f, keepdims=keepdims_f: overlaps.geo_mean(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'sinoverlap':
return lambda a, axis=axis_f, keepdims=keepdims_f: overlaps.sin_overlap(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'hmean':
return lambda a, axis=axis_f, keepdims=keepdims_f: overlaps.harmonic_mean(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'hamacher':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.hamacher_tnorm(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'luka':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.lukasiewicz_tnorm(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'drastic':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.drastic_tnorm(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'nilpotent':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.nilpotent_tnorm(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'probabilistic_sum':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.probabilistc_sum(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'bounded_sum':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.bounded_sum(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'drastic_tcnorm':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.drastic_tcnorm(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'nilpotent_maximum':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.nilpotent_maximum(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'einstein_sum':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.einstein_sum(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'prod':
return lambda a, axis=axis_f, keepdims=keepdims_f: tnorms.prod(a, axis=axis, keepdims=keepdims)
elif agg_minuscula == 'mode':
from scipy.stats import mode
if keepdims_f:
print('Warning: mode does not keep dimension')
return lambda a, axis=axis_f, keepdims=keepdims_f: mode(a, axis=axis)
else:
raise KeyError(agg_name)
| 71.447368 | 269 | 0.726949 |
ba454601304614a6004c1fff3a9dc03fe3e6b314 | 589 | py | Python | admin_tools_stats/migrations/0004_dashboardstats_y_tick_format.py | alexey74/django-admin-charts | 40d9ec98e1f5ba30261311fa6b172e3bf9f957fe | [
"MIT"
] | 76 | 2019-10-08T00:26:19.000Z | 2022-03-19T07:07:46.000Z | admin_tools_stats/migrations/0004_dashboardstats_y_tick_format.py | BrentGuttmann/django-admin-charts | c8e441e963df46041958598cd877112089298cf1 | [
"MIT"
] | 36 | 2019-10-04T17:33:32.000Z | 2022-03-27T17:47:14.000Z | admin_tools_stats/migrations/0004_dashboardstats_y_tick_format.py | BrentGuttmann/django-admin-charts | c8e441e963df46041958598cd877112089298cf1 | [
"MIT"
] | 18 | 2020-03-07T02:47:07.000Z | 2022-03-19T07:07:44.000Z | # Generated by Django 2.2.9 on 2020-01-21 15:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin_tools_stats', '0003_auto_20191007_0950'),
]
operations = [
migrations.AddField(
model_name='dashboardstats',
name='y_axis_format',
field=models.CharField(blank=True, default=None, help_text="Format of Y axis. <a href='https://github.com/d3/d3-format'>See description of possible values</a>.", max_length=90, null=True, verbose_name='Y axis format'),
),
]
| 31 | 230 | 0.657046 |
f52cdb061b915c410a9a555ef4b2a44a8babf9d1 | 8,473 | py | Python | train_util.py | shvetsiya/carvana | acc594cba53c44d577c9e3e326e0163eea8b4862 | [
"MIT"
] | 11 | 2018-01-28T04:22:57.000Z | 2018-12-20T10:09:40.000Z | train_util.py | shvetsiya/carvana | acc594cba53c44d577c9e3e326e0163eea8b4862 | [
"MIT"
] | null | null | null | train_util.py | shvetsiya/carvana | acc594cba53c44d577c9e3e326e0163eea8b4862 | [
"MIT"
] | 2 | 2017-10-04T00:58:10.000Z | 2019-02-14T17:47:25.000Z | from collections import OrderedDict
from tqdm import tqdm
import cv2
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau
from model.losses import Loss
import tools
from config import *
class CarvanaSegmenationTrain:
def __init__(self, net:nn.Module, num_epochs, learning_rate, load_model=False):
"""
The classifier for carvana used for training and launching predictions
Args:
net (nn.Module): The neural net module containing the definition of your model
num_epochs (int): The maximum number of epochs which is used to train the model
"""
self.net = nn.DataParallel(net, device_ids=GPU_IDS).cuda()
self.model_path_last = str(SAVED_MODEL)
self.model_path_best = str(BEST_MODEL)
self.epoch_old = 0
if load_model:
self.load_best_model(self.model_path_best)
self.num_epochs = num_epochs
self.criterion = Loss()
self.optimizer = optim.RMSprop(self.net.parameters(), lr=learning_rate)
self.lr_scheduler = ReduceLROnPlateau(self.optimizer,
mode='min',
factor=0.3,
patience=2,
verbose=True,
min_lr=1e-7)
#self.optimizer = optim.Adam(model.parameters(), weight_decay=regularization)
def load_best_model(self, model_path) -> None:
"""
Restore a model parameters from the one given in argument
Args:
model_path=(str): The path to the model to restore
"""
state = torch.load(model_path)
self.net.load_state_dict(state['state_dict'])
self.epoch_old = state['epoch']
print('Loaded model from epoch {}'.format(self.epoch_old)) # '{epoch}'.format(**state)
def dice_loss(self, pred, target):
smooth = 1e-10
num = target.size(0)# batch size
m1 = pred.view(num, -1)
m2 = target.view(num, -1)
#thanks to broadcasting
intersection = (m1*m2).sum(1) + smooth
tsquares = m1.sum(1) + m2.sum(1) + smooth
return 2*(intersection/tsquares).mean()
def train_epoch_step(self, epoch:int, train_loader: DataLoader):
losses = tools.AverageMeter()
dice_c = tools.AverageMeter()
# Set train mode.
self.net.train()
it_count = len(train_loader)
batch_size = train_loader.batch_size
with tqdm(total=it_count,
desc="Epochs {}/{}".format(epoch+1, self.num_epochs),
bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{remaining}{postfix}]'
) as pbar:
for images, targets in train_loader:
images = Variable(images.cuda())
targets = Variable(targets.cuda())
# Compute output:
output = self.net(images)
preds = output.ge(THRESHOLD).float()
#metrics
loss = self.criterion(output, targets)
dice = self.dice_loss(preds, targets)
# Update weights.
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses.update(loss.data[0], batch_size)
dice_c.update(dice.data[0], batch_size)
# Update pbar
pbar.set_postfix(OrderedDict(loss='{0:1.5f}'.format(loss.data[0]),
dice_coeff='{0:1.5f}'.format(dice.data[0])))
pbar.update(1)
return losses.avg, dice_c.avg
def validate(self, valid_loader: DataLoader):
losses = tools.AverageMeter()
dice_c = tools.AverageMeter()
# Set evaluation mode.
self.net.eval()
it_count = len(valid_loader)
batch_size = valid_loader.batch_size
images = None # To save the last image batch
targets = None # To save the last target batch
preds = None # To save the last prediction batch
with tqdm(total=it_count, desc="Validating", leave=False) as pbar:
for images, targets in valid_loader:
# Volatile is used for pure inference mode
images = Variable(images.cuda(), volatile=True)
targets = Variable(targets.cuda(), volatile=True)
# Compute output:
output = self.net(images)
preds = output.ge(THRESHOLD).float()
#metrics
loss = self.criterion(output, targets)
dice = self.dice_loss(preds, targets)
losses.update(loss.data[0], batch_size)
dice_c.update(dice.data[0], batch_size)
pbar.update(1)
return losses.avg, dice_c.avg, images, targets, preds
def train(self, train_loader: DataLoader, valid_loader: DataLoader, callbacks):
for epoch in range(self.num_epochs):
train_loss, train_dice = self.train_epoch_step(epoch, train_loader)
valid_loss, valid_dice, last_images, last_targets, last_preds = self.validate(valid_loader)
# Reduce learning rate if is is needed
self.lr_scheduler.step(valid_loss, epoch)
# save last and the best models
if callbacks:
for cb in callbacks:
cb(net=self.net,
last_valid_batch=(last_images, last_targets, last_preds),
epoch=self.epoch_old+epoch,
train_loss=train_loss, train_dice=train_dice,
valid_loss=valid_loss, valid_dice=valid_dice
)
print("train_loss = {:03f}, train_dice = {:03f}\n"
"valid_loss = {:03f}, valid_dice = {:03f}"
.format(train_loss, train_dice, valid_loss, valid_dice))
class CarvanaSegmenationTest:
def __init__(self, net:nn.Module, pred_folder):
"""
The classifier for carvana used predictions
Args:
net (nn.Module): The neural net module containing the definition of your model
num_epochs (int): The maximum number of epochs which is used to train the model
"""
#net = UNet()
self.net = nn.DataParallel(net, device_ids=GPU_IDS).cuda()
self.model_path_best = str(BEST_MODEL)
self.load_best_model(self.model_path_best)
self.pred_folder = pred_folder
if not os.path.exists(self.pred_folder):
os.makedirs(self.pred_folder)
def load_best_model(self, model_path) -> None:
"""
Restore a model parameters from the one given in argument
Args:
model_path=(str): The path to the model to restore
"""
state = torch.load(model_path)
self.net.load_state_dict(state['state_dict'])
print('Loaded model from epoch {epoch}'.format(**state))
def predict(self, test_loader):
"""
Launch the prediction on the given loader and pass
each predictions to the given callbacks.
Args:
test_loader (DataLoader): The loader containing the test dataset
callbacks (list): List of callbacks functions to call at prediction pass
"""
# Switch to evaluation mode
self.net.eval()
it_count = len(test_loader)
with tqdm(total=it_count, desc="Predict") as pbar:
for images, file_names in test_loader:
images = Variable(images.cuda(), volatile=True)
# forward
batch_probs = (255*(self.net(images).data)).cpu().numpy().squeeze().astype(np.uint8)
self.write_batch(batch_probs, file_names)
pbar.update(1)
def write_batch(self, batch_probs, ids):
for (pred, name) in zip(batch_probs, ids):
filename = os.path.join(self.pred_folder, name + '_pred_mask.png')
pred = cv2.resize(pred, (ORIGINAL_WIDTH, ORIGINAL_HEIGHT))
cv2.imwrite(filename, pred)
| 38.689498 | 103 | 0.570872 |
3676beb1e61042db375cf46b24b0cb9d0a28acdd | 1,608 | py | Python | rl/agent/freeze_dqn.py | AsimKhan2019/OpenAI-Lab | d0669d89268f2dc01c1cf878e4879775c7b6eb3c | [
"MIT"
] | 340 | 2017-02-21T02:32:39.000Z | 2021-12-20T00:47:18.000Z | rl/agent/freeze_dqn.py | AsimKhan2019/OpenAI-Lab | d0669d89268f2dc01c1cf878e4879775c7b6eb3c | [
"MIT"
] | 14 | 2017-02-15T19:46:36.000Z | 2018-12-15T23:42:21.000Z | rl/agent/freeze_dqn.py | AsimKhan2019/OpenAI-Lab | d0669d89268f2dc01c1cf878e4879775c7b6eb3c | [
"MIT"
] | 72 | 2017-02-21T05:16:00.000Z | 2021-12-06T03:34:57.000Z | import numpy as np
from rl.agent.double_dqn import DoubleDQN
from rl.agent.dqn import DQN
from rl.util import logger, clone_model
class FreezeDQN(DoubleDQN):
'''
Extends DQN agent to freeze target Q network
and periodically update them to the weights of the
exploration model
Avoids oscillations and breaks correlation
between Q-network and target
http://www0.cs.ucl.ac.uk/staff/d.silver/web/Resources_files/deep_rl.pdf
Exploration model periodically cloned into target Q network
'''
def compute_Q_states(self, minibatch):
Q_states = np.clip(self.model.predict(minibatch['states']),
-self.clip_val, self.clip_val)
Q_next_states = np.clip(self.model_2.predict(minibatch['next_states']),
-self.clip_val, self.clip_val)
Q_next_states_max = np.amax(Q_next_states, axis=1)
return (Q_states, Q_next_states, Q_next_states_max)
def train_an_epoch(self):
# Should call DQN to train an epoch, not DoubleDQN
return DQN.train_an_epoch(self)
def update_target_model(self):
# Also, loading logic seems off
self.model_2 = clone_model(self.model)
logger.debug("Updated target model weights")
def update(self, sys_vars):
'''
Agent update apart from training the Q function
'''
done = sys_vars['done']
timestep_check = sys_vars['t'] == (self.env_spec['timestep_limit'] - 1)
if done or timestep_check:
self.update_target_model()
super(FreezeDQN, self).update(sys_vars)
| 35.733333 | 79 | 0.666045 |
3b4488ef6275f766dd6bb7aaab02d28e34c0d25f | 2,004 | py | Python | test/_data_base.py | Khan-Xu/Pyrod | 3ee62e3d6037328a010d9340bf1e8ff991f48414 | [
"MIT"
] | null | null | null | test/_data_base.py | Khan-Xu/Pyrod | 3ee62e3d6037328a010d9340bf1e8ff991f48414 | [
"MIT"
] | null | null | null | test/_data_base.py | Khan-Xu/Pyrod | 3ee62e3d6037328a010d9340bf1e8ff991f48414 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#%%
from __future__ import absolute_import
#----------LIBRARYS-----------
# exceptions library
from _exception import (The_Parameter_Exception, The_Data_Load_Exception, The_Data_Base_Exception)
# python stdlib library
import os
import pickle
# python processing library
import numpy as np
import pandas as pd
#----------CONSTANTS----------
try:
# atomic_form_factor.xlsx atomic_mass.xlsx and atomic_scattering_facotr.pickle
ATOMIC_FORM_FACTOR = os.path.abspath(os.path.dirname('atomic_form_facotr.xlsx')) + '/base/atomic_form_factor.xlsx'
ATOMIC_MASS = os.path.abspath(os.path.dirname('atomic_mass.xlsx')) + '/base/atomic_mass.xlsx'
ATOMIC_SCATTERING_FACTOR = os.path.abspath(os.path.dirname('atomic_scattering_factor.pickle')) + '\\base\\atomic_scattering_factor.pickle'
# loading to python vars
IONS_TABLE = pd.read_excel(ATOMIC_FORM_FACTOR, sheet_name = 'atomic_form_factor', index_col = 0)
ATOM_TABLE = pd.read_excel(ATOMIC_MASS, sheet_name = 'Sheet1', index_col = 0)
ASF_file = open(ATOMIC_SCATTERING_FACTOR, 'rb')
ASF_TABLE = pickle.load(ASF_file)
ASF_file.close()
IONS_LIST = IONS_TABLE.index.values.tolist()
ATOM_LIST = ATOM_TABLE.index.values.tolist()
except The_Data_Load_Exception:
print('Data base loading fail. Used library: pandas; pickle')
#----------FUNCTIONS----------
def _check_ions(ion):
if isinstance(ion, str):
if ion not in IONS_LIST:
raise The_Data_Base_Exception("The ion is not included in database 'atomic_form_factor.xlsx'")
else:
raise The_Parameter_Exception("The input parameter 'ion' should be string")
def _check_atom(atom):
if isinstance(atom, str):
if atom not in ATOM_LIST:
raise The_Data_Base_Exception("This ion is not included in database 'atomic_mass.xlsx'")
else:
raise The_Parameter_Exception("The input parameter 'atom' should be string")
#%%
| 36.436364 | 143 | 0.697605 |
66795790e52244e0f4bd2320b2f920e7af7a9346 | 899 | py | Python | 1Week_3-Python/hw/2016/lyw/THUNews_website/THUNews_website/urls.py | wmhst7/- | 1f33084c58a1aaf7120a6182396bbabb7a836ce5 | [
"MIT"
] | 2 | 2019-08-25T05:07:32.000Z | 2019-09-04T07:37:25.000Z | 1Week_3-Python/hw/2016/lyw/THUNews_website/THUNews_website/urls.py | wmhst7/- | 1f33084c58a1aaf7120a6182396bbabb7a836ce5 | [
"MIT"
] | 1 | 2022-03-05T10:11:21.000Z | 2022-03-05T10:11:21.000Z | 1Week_3-Python/hw/2016/lyw/THUNews_website/THUNews_website/urls.py | wmhst7/- | 1f33084c58a1aaf7120a6182396bbabb7a836ce5 | [
"MIT"
] | 1 | 2022-03-05T10:15:16.000Z | 2022-03-05T10:15:16.000Z | """THUNews_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from thunews.views import index, query, ApiQuery
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^query', query, name='query'),
url(r'^api/query', ApiQuery.as_view(), name='api_query')
]
| 35.96 | 79 | 0.694105 |
78db7dbcc310871b46a05809978eb3210d096e9d | 1,893 | py | Python | setup.py | generic-ci-org/birdy | 63c2d0aacad67569d8d8fc25c9a702d80c69fcd0 | [
"Apache-2.0"
] | null | null | null | setup.py | generic-ci-org/birdy | 63c2d0aacad67569d8d8fc25c9a702d80c69fcd0 | [
"Apache-2.0"
] | null | null | null | setup.py | generic-ci-org/birdy | 63c2d0aacad67569d8d8fc25c9a702d80c69fcd0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import find_packages
from setuptools import setup
from pathlib import Path
import re
def parse_reqs(file):
egg_regex = re.compile(r"#egg=(\w+)")
reqs = list()
for req in open(file):
req = req.strip()
git_url_match = egg_regex.search(req)
if git_url_match:
req = git_url_match.group(1)
reqs.append(req)
return reqs
with open(Path(__file__).parent / 'birdy' / '__init__.py', 'r') as f:
version = re.search(r'__version__ = [\'"](.+?)[\'"]', f.read()).group(1)
description = 'Birdy provides a command-line tool to work with Web Processing Services.'
long_description = (
open('README.rst').read() + '\n' + open('AUTHORS.rst').read() + '\n' + open('CHANGES.rst').read()
)
requirements = parse_reqs("requirements.txt")
dev_requirements = parse_reqs('requirements_dev.txt')
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(name='birdhouse-birdy',
version=version,
description=description,
long_description=long_description,
classifiers=classifiers,
keywords='wps pywps owslib geopython birdy birdhouse',
author='Carsten Ehbrecht',
author_email="ehbrecht@dkrz.de",
url='https://github.com/bird-house/birdy',
license="Apache License v2.0",
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
extras_require={
"dev": dev_requirements, # pip install ".[dev]"
},
entry_points={
'console_scripts': [
'birdy=birdy.cli.run:cli']},
)
| 30.532258 | 101 | 0.636027 |
2babc1fd5a4df6a50ae4426d5e105bbbf5679b05 | 4,540 | py | Python | examples/host_extension.py | aarondadler/pySBOL | 97cf161eda1c8e1ccaf0f6cb5f0f53b24f6030b5 | [
"Apache-2.0"
] | 27 | 2015-04-14T20:34:48.000Z | 2021-03-23T22:45:57.000Z | examples/host_extension.py | aarondadler/pySBOL | 97cf161eda1c8e1ccaf0f6cb5f0f53b24f6030b5 | [
"Apache-2.0"
] | 104 | 2015-03-27T22:05:15.000Z | 2022-01-24T16:36:56.000Z | examples/host_extension.py | aarondadler/pySBOL | 97cf161eda1c8e1ccaf0f6cb5f0f53b24f6030b5 | [
"Apache-2.0"
] | 17 | 2015-05-08T14:53:54.000Z | 2020-04-02T19:27:56.000Z | # -*- coding: utf-8 -*-
"""
The following example script demonstrates pySBOL extension classes. PySBOL extensions allow users to create their own classes of data and serialize it into SBOL files as RDF/XML.
A Host is a ComponentDefinition that represents a cell. To indicate that the ComponentDefinition is a cell, as opposed to DNA, RNA, or other type of ComponentDefinition, the Gene Ontology term for cell is used as the value for the types field.
The Genome, Plasmid, and Mutation classes are all ComponentDefinitions representing DNA molecules. They are initialized with the corresponding SequenceOntology term in the roles field.
A Host may be defined using a taxonomic term that indicates its strain or species. Its genotype may also be specified in terms of genetic nomenclature. The extract_marker method then converts each of these into a Marker object.
"""
from sbol import *
import re
# Define XML namespace for custom extension data. Namespaces should end in a delimiter such as / or #
EXTENSION_NAMESPACE = 'http://sys-bio.org#'
# Ontology terms
GENE_ONTOLOGY_CELL = 'http://purl.obolibrary.org/obo/GO_0005623'
NCBI_TAXONOMY = 'https://www.ncbi.nlm.nih.gov/Taxonomy/'
class Host(ComponentDefinition):
def __init__(self, id = 'my_strain', plasmids = []):
ComponentDefinition.__init__(self, SBOL_COMPONENT_DEFINITION, id, GENE_ONTOLOGY_CELL, '1.0.0')
# Validate plasmid arguments
for p in plasmids:
if p.type != SBOL_COMPONENT_DEFINITION:
raise SBOLError('Failed to instantiate host. An invalid plasmid component was specified.')
# Initialize object properties
self.taxonomy = URIProperty(self, EXTENSION_NAMESPACE + 'taxonomy', '0', '1')
self.genotype = TextProperty(self, EXTENSION_NAMESPACE + 'genotype', '0', '1')
self.genome = Genome(id + '_genome')
self.origin = Origin(id + '_origin_of_replication')
# Associate these objects with an SBOL Document for file I/O
self.document = Document()
self.document.addComponentDefinition(self)
self.document.addComponentDefinition(self.genome)
self.document.addComponentDefinition(self.origin)
self.document.addComponentDefinition(plasmids)
# Create an abstraction hierarchy
self.assemble([Genome] + [Origin] + plasmids)
def set_genotype(self, genotype):
# Replace Unicode characters with Ascii
self.genotype = re.sub(r'[^a-zA-Z0-9 ]', '_', genotype)
def parse_genotype(self):
markers = self.genotype.split(' ')
markers = [ m for m in markers if m != ""]
markers = list(set(markers)) # Remove duplicates
markers = [ re.sub(r'[^a-zA-Z0-9 ]', '_', m) for m in markers ] # Convert to alphanumeric and underscores
return markers
def extract_markers(self):
markers = self.parse_genotype()
marker_components = []
for m in markers:
marker_component = Marker(m)
self.document.addComponentDefinition(marker_component)
marker_components.append(marker_component)
# Add markers to the base genome
self.genome.assemble(marker_components)
def write(self, file_name):
self.document.write(file_name)
class Genome(ComponentDefinition):
def __init__(self, id = 'genome'):
ComponentDefinition.__init__(self, SBOL_COMPONENT_DEFINITION, id, BIOPAX_DNA, '1.0.0')
self.roles = SO + '0001026' # Set
class Origin(ComponentDefinition):
def __init__(self, id = 'oriR'):
ComponentDefinition.__init__(self, SBOL_COMPONENT_DEFINITION, id, BIOPAX_DNA, '1.0.0')
self.roles = SO + '0000296'
class Plasmid(ComponentDefinition):
def __init__(self, id = 'plasmid'):
ComponentDefinition.__init__(self, SBOL_COMPONENT_DEFINITION, id, BIOPAX_DNA, '1.0.0')
self.roles = SO + '0000155'
class Marker(ComponentDefinition):
def __init__(self, id = 'test'):
ComponentDefinition.__init__(self, SBOL_COMPONENT_DEFINITION, id, BIOPAX_DNA, '1.0.0')
self.notes = TextProperty(self, EXTENSION_NAMESPACE + "note", '0', '*')
circuit = Plasmid('circuit')
host = Host('DH5alpha', [circuit])
host.taxonomy = NCBI_TAXONOMY + '668369' # Taxonomic ID for DH5alpha strain
print host.taxonomy
host.set_genotype('F– endA1 glnV44 thi-1 recA1 relA1 gyrA96 deoR nupG purB20 φ80dlacZΔM15 Δ(lacZYA-argF)U169, hsdR17(rK–mK+), λ–')
host.extract_markers()
Config.setOption('validate', False)
host.write('DH5alpha.xml')
| 44.950495 | 244 | 0.702203 |
d8de787b5991aa08a8a348cb4fd3632db0d320b8 | 3,848 | py | Python | test.py | anatolykopyl/jumpcutter | 2e79a752621d4c59c453be2cdab20b52e9cfdccf | [
"MIT"
] | null | null | null | test.py | anatolykopyl/jumpcutter | 2e79a752621d4c59c453be2cdab20b52e9cfdccf | [
"MIT"
] | null | null | null | test.py | anatolykopyl/jumpcutter | 2e79a752621d4c59c453be2cdab20b52e9cfdccf | [
"MIT"
] | null | null | null | from pytube import YouTube
import os
import subprocess
testfiles = ["30fps.mp4", "60 fps.mp4",
"15fps.mp4", "soundless.mp4", "music.mp4"]
def downloadFile(url):
sep = os.path.sep
originalPath = YouTube(url).streams.first().download()
filepath = originalPath.split(sep)
filepath[-1] = filepath[-1].replace(' ', '_')
filepath = sep.join(filepath)
os.rename(originalPath, filepath)
return filepath
def downloadTestdata():
p = downloadFile("https://www.youtube.com/watch?v=aqz-KE-bpKQ")
command = ["ffmpeg", "-i", p, "-r", "15", "-t", "00:01:00", testfiles[2]]
subprocess.run(command)
command = ["ffmpeg", "-i", p, "-r", "60", "-t", "00:01:00", testfiles[1]]
subprocess.run(command)
command = ["ffmpeg", "-i", p, "-t", "00:01:00", testfiles[0]]
subprocess.run(command)
command = ["ffmpeg", "-i", testfiles[0], "-an", testfiles[3]]
subprocess.run(command)
command = ["ffmpeg", "-i", testfiles[0], "-vn", testfiles[4]]
subprocess.run(command)
os.remove(p)
# prepare testdata if missing
for src in testfiles:
if(not os.path.isfile(src)):
print("missing "+src)
downloadTestdata()
print("15fps autodetection test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[2], "--output_file", "t.mp4"]
subprocess.run(command)
assert(os.path.getsize("t.mp4") == 8443196)
os.remove("t.mp4")
print("30fps autodetection test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[0], "--output_file", "t.mp4"]
subprocess.run(command)
assert(os.path.getsize("t.mp4") == 8571040)
os.remove("t.mp4")
print("60fps autodetection test + space test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[1], "--output_file", "t t.mp4"]
subprocess.run(command)
assert(os.path.getsize("t t.mp4") == 8113359)
os.remove("t t.mp4")
print("soundless test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[3], "--output_file", "t.mp4"]
subprocess.run(command)
print("music test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[4], "--output_file", "t.mp4"]
subprocess.run(command)
print("audio_only music test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[4], "--output_file", "t.mp4", "--audio_only"]
subprocess.run(command)
assert(os.path.getsize("t.mp4") == 565547)
os.remove("t.mp4")
print("audio_only video test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[2], "--output_file", "t.mp4", "--audio_only"]
subprocess.run(command)
assert(os.path.getsize("t.mp4") == 408510)
print("slowdown test + force test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[2], "--output_file", "t.mp4", "--force", "--sounded_speed", "0.5", "--silent_speed", "0.9"]
subprocess.run(command)
assert(os.path.getsize("t.mp4") == 22962113)
os.remove("t.mp4")
print("low quality test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[2], "--output_file", "t.mp4", "--frame_quality", "31", "--crf", "50", "--preset", "ultrafast"]
subprocess.run(command)
assert(os.path.getsize("t.mp4") == 796732)
os.remove("t.mp4")
print("phasevocoder test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[2], "--output_file", "t.mp4", "--stretch_algorithm", "phasevocoder", "--sounded_speed", "0.5"]
subprocess.run(command)
assert(os.path.getsize("t.mp4") == 19991295)
os.remove("t.mp4")
print("edl test")
command = ["python3", "jumpcutter.py", "--input_file",
testfiles[2], "--output_file", "t.edl", "--edl"]
subprocess.run(command)
assert(os.path.getsize("t.edl") == 1464)
os.remove("t.edl") | 34.981818 | 116 | 0.616164 |
4ccaae347f91200918bc098d3757a691e3a3795a | 9,084 | py | Python | testproj/new/bin_parser1.py | musicwarez/testproj | 88a6f95f58849cca455865358cfd0bec8a4437a0 | [
"BSD-3-Clause"
] | null | null | null | testproj/new/bin_parser1.py | musicwarez/testproj | 88a6f95f58849cca455865358cfd0bec8a4437a0 | [
"BSD-3-Clause"
] | null | null | null | testproj/new/bin_parser1.py | musicwarez/testproj | 88a6f95f58849cca455865358cfd0bec8a4437a0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python2.7
import struct
import sys
from datetime import datetime
import time
protocol = {
"54": {"name": "Time", 'len': 6, 'bin': "<BBI"},
"45": {"name": "Order Executed", 'len': 26, 'bin': "<BBIQIQ"},
"43": {"name": "Order Executed With Price/Sized", 'len': 39, 'bin': "<BBIQIIQBQ"},
"50": {"name": "Trade", 'len': 33,'bin': "<BBIIIBBQQB"},
"51": {"name": "Auction Trade", 'len': 33,'bin': "<BBIIIBBQQB"},
"78": {"name": "Off-Book Trade", 'len': 70, 'bin': "<BBIIIBBQQ4sQQ4sQ5sB"},
"42": {"name": "Trade Break", 'len': 19, 'bin': "<BBIQBI"},
"49": {"name": "Auction Info", 'len': 30, 'bin': ""},
"77": {"name": "Statistics", 'len': 23, 'bin': ""},
"01": {"name": "Login Request", 'len': 18, 'bin': ""},
"02": {"name": "Login Response", 'len': 3, 'bin': ""},
"05": {"name": "Logout Request", 'len': 2, 'bin': ""},
"03": {"name": "Replay Request", 'len': 9, 'bin': ""},
"04": {"name": "Replay Response", 'len': 10, 'bin': ""},
"81": {"name": "Snapshot Request", 'len': 16, 'bin': ""},
"82": {"name": "Snapshot Response", 'len': 11, 'bin': ""},
"83": {"name": "Snapshot Complete", 'len': 17, 'bin': ""},
"41": {"name": "Add Order", 'len': 34, 'bin': ""},
"48": {"name": "Add Order", 'len': 28, 'bin': ""},
"53": {"name": "System Event", 'len': 7, 'bin': ""},
"46": {"name": "Add Attributed Order", 'len': 45, 'bin': ""},
"44": {"name": "Order Deleted", 'len': 19, 'bin': ""},
"55": {"name": "Order Modified", 'len': 27, 'bin': ""},
"79": {"name": "Order Book Clear", 'len': 13, 'bin': ""},
"52": {"name": "Symbol Directory", 'len': 65, 'bin': "" },
}
# Here Messages type witch we should check
need = ("45", "50", "51", "78")
look = ['10444957', '7528158', '10439722', '10444976', '10444975', '10438800', '10444958',
'9957331', '10445098', '10445299', '7440239', '10445151', '10444925', '10445295',
'10445294', '10445297', '10445149', '8163507', '10438909', '10445100', '10445103',
'10445102', '10445866', '9267735', '10445162', '10445161', '10439914', '10438799',
'10439823', '10441119', '10438910', '8652045', '10439424', '10444965', '10444966',
'10439718', '10444960', '10444961', '10439915', '10439416', '10439822', '10446537',
'3452601', '10444915', '10444980', '1278043', '10444078', '10444079', '3462787',
'9356961', '9356735']
look1 = ['2778', '2735', '2769', '2737', '2781', '2731', '2733', '2763', '2772', '2760', '2766',
'2729', '2775', '2741', '2752', '2744', '2750', '2746', '2739', '2748']
class filehandle(object):
def __init__(self, filename):
self.data = None
self.iter = 0
self.end_read = 0
self.data_str = None
#receive time seconds
self.rtmc = 0
self.filename = filename
self.year = None
self.sequence = 0
def find_nex_pay(self, payload_str):
toHex = lambda x: "".join([hex(ord(c))[2:].zfill(2) for c in x])
label = False
while payload_str:
m_type = toHex(payload_str[1:2])
le = protocol[m_type]['len']
bin = protocol[m_type]['bin']
real_len = len(payload_str)
#if self.sequence == 2748:
# print m_type
# exit()
#first positioh has message type payload_str[0:1]
if m_type in need:
label = True
#try:
tmp = struct.unpack(bin, payload_str[0:le])
#except struct.error as e:
# print >>sys.stderr, "Error. unpack payload"
# + message type [1]
self.data_str = self.data_str + ":" +str(m_type)
################################
#if str(self.sequence) in look1:
# print tmp
# #exit()
################################
if m_type == "54":
#Time!!! convert from sec to msec
self.rtmc = tmp[2] * 1000000
# Here tmp[2] in Nanosecond
ttt = self.rtmc + tmp[2] / 1000 + self.year * 1000000
exch_time = str(ttt)
if m_type == "45":
#0x45 Order Executed (Trade ID position 5) 1447413802 + nanosec / 1000000000
self.data_str = self.data_str + ":" + str(tmp[5]) + ":" + exch_time + ":" + str(tmp[3])
if m_type == "43":
#0x43 Order Executed With Price/ Size (Trade ID position 6)
self.data_str = self.data_str + ":" + str(tmp[6]) + ":" + exch_time + ":" + str(tmp[3])
if m_type == "50":
#0x50 Trade (Trade ID position 8)
self.data_str = self.data_str + ":" + str(tmp[8]) + ":" + exch_time
if m_type == "51":
#0x51 Auction Trade (Trade ID position 8)
self.data_str = self.data_str + ":" + str(tmp[8]) + ":" + exch_time
if m_type == "78":
###############################
#if str(self.sequence) in look:
# print tmp
#exit()
#if str(tmp[14]) in ("XOFF "):
# print tmp
###############################
if str(tmp[14]) not in ("XLON "):
#print tmp
#print "OKZ"
#exit()
#if m_type == "78":
#0x51 Auction Trade (Trade ID [8] Trade type [9] )
self.data_str = self.data_str + ":" + str(tmp[8]) + ":" + exch_time + ":" +str(tmp[9]) + ":"\
+str(tmp[14])
else:
#print str(tmp[14])
label = False
#delete used message
payload_str = payload_str[le:real_len]
if len(payload_str) == le or len(payload_str) == 0:
payload_str = False
else:
payload_str = payload_str[le:real_len]
if real_len == le or real_len == 0:
payload_str = False
if label:
print(self.data_str)
pass
def data_type(self, payload, mc):
#needs only
#0x54 Time,
#0x45 Order Executed
#0x43 Order Executed With Price/ Size,
#0x50 Trade,
#0x51 Auction Trade.
self.find_nex_pay(payload)
def read_log(self):
#Read RAW data from file
self.data = open(self.filename, "rb")
#Read RAW data from stdin
#self.data = sys.stdin
while True:
char = self.data.read(1)
if not char:
print("Data END. Bye!")
break
self.process(char)
self.iter += 1
self.data.close()
def process(self, char):
if char == "_":
#begin need accamulate firs 20 bytes BL. Header
#length, receive time seconds, receive time microseconds, write time seconds, write time microseconds
#here we need receive time seconds. This is first element in data string
head_bl = self.data.read(20)
try:
var0 = struct.unpack("!IIIII", head_bl)
except struct.error as e:
print >>sys.stderr, "Error. unpack Header \t"
#firs 8 bytes Unit Header
head_u = self.data.read(8)
try:
var1 = struct.unpack("HBBI", head_u)
except struct.error as e:
print >>sys.stderr, "Error. unpack Unit Header \t"
self.end_read = self.iter + 20
#Length var1[0]
#Message Count var1[1]))
#Market Data Group var1[2]
#Sequence Number var1[3]
self.sequence = var1[3]
#if var1[3] == 10444078:
# print var0
# print var1
rtmc = str(var0[1] * 1000000 + var0[2])
y = datetime.date(datetime.fromtimestamp(var0[3]))
y = y.timetuple()
self.year = int(time.mktime(y))
self.data_str = str(var0[1]) + ":" + rtmc + ":" + str(var1[3])
if (var1[0] == 8):
pass
else:
#Find netx position for firs payload
if var1[1] >= 1:
if var1[1] > 1:
pass
n = var1[0] - 8
payload = self.data.read(n)
#if var1[3] == 10444078:
# print payload
self.data_type(payload, var1[1])
if __name__ == "__main__":
req_version = (2,7)
cur_version = sys.version_info[0:2]
if cur_version != req_version:
sys.exit("This script requires Python 2.7!")
filez = filehandle("/work/testproj/testproj/new/lse_araw.log")
filez.read_log() | 43.464115 | 117 | 0.465214 |
46be087e37e5e8ecfbc5092cdf2dbb72633c1738 | 5,910 | py | Python | gui/kivy/uix/dialogs/__init__.py | stratisproject/electrum | c60fa543418c31ce7f5dcf5aa717d82a5c47e216 | [
"MIT"
] | 26 | 2017-06-09T04:13:13.000Z | 2021-11-15T11:35:30.000Z | gui/kivy/uix/dialogs/__init__.py | stratisproject/electrum | c60fa543418c31ce7f5dcf5aa717d82a5c47e216 | [
"MIT"
] | 29 | 2017-05-07T05:08:06.000Z | 2021-02-19T13:15:03.000Z | gui/kivy/uix/dialogs/__init__.py | stratisproject/electrum | c60fa543418c31ce7f5dcf5aa717d82a5c47e216 | [
"MIT"
] | 21 | 2017-05-31T14:24:20.000Z | 2021-01-30T17:35:43.000Z | from kivy.app import App
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.properties import NumericProperty, StringProperty, BooleanProperty
from kivy.core.window import Window
from electrum_stratis_gui.kivy.i18n import _
class AnimatedPopup(Factory.Popup):
''' An Animated Popup that animates in and out.
'''
anim_duration = NumericProperty(.36)
'''Duration of animation to be used
'''
__events__ = ['on_activate', 'on_deactivate']
def on_activate(self):
'''Base function to be overridden on inherited classes.
Called when the popup is done animating.
'''
pass
def on_deactivate(self):
'''Base function to be overridden on inherited classes.
Called when the popup is done animating.
'''
pass
def open(self):
'''Do the initialization of incoming animation here.
Override to set your custom animation.
'''
def on_complete(*l):
self.dispatch('on_activate')
self.opacity = 0
super(AnimatedPopup, self).open()
anim = Factory.Animation(opacity=1, d=self.anim_duration)
anim.bind(on_complete=on_complete)
anim.start(self)
def dismiss(self):
'''Do the initialization of incoming animation here.
Override to set your custom animation.
'''
def on_complete(*l):
super(AnimatedPopup, self).dismiss()
self.dispatch('on_deactivate')
anim = Factory.Animation(opacity=0, d=.25)
anim.bind(on_complete=on_complete)
anim.start(self)
class EventsDialog(Factory.Popup):
''' Abstract Popup that provides the following events
.. events::
`on_release`
`on_press`
'''
__events__ = ('on_release', 'on_press')
def __init__(self, **kwargs):
super(EventsDialog, self).__init__(**kwargs)
def on_release(self, instance):
pass
def on_press(self, instance):
pass
def close(self):
self.dismiss()
class SelectionDialog(EventsDialog):
def add_widget(self, widget, index=0):
if self.content:
self.content.add_widget(widget, index)
return
super(SelectionDialog, self).add_widget(widget)
class InfoBubble(Factory.Bubble):
'''Bubble to be used to display short Help Information'''
message = StringProperty(_('Nothing set !'))
'''Message to be displayed; defaults to "nothing set"'''
icon = StringProperty('')
''' Icon to be displayed along with the message defaults to ''
:attr:`icon` is a `StringProperty` defaults to `''`
'''
fs = BooleanProperty(False)
''' Show Bubble in half screen mode
:attr:`fs` is a `BooleanProperty` defaults to `False`
'''
modal = BooleanProperty(False)
''' Allow bubble to be hidden on touch.
:attr:`modal` is a `BooleanProperty` defauult to `False`.
'''
exit = BooleanProperty(False)
'''Indicates whether to exit app after bubble is closed.
:attr:`exit` is a `BooleanProperty` defaults to False.
'''
dim_background = BooleanProperty(False)
''' Indicates Whether to draw a background on the windows behind the bubble.
:attr:`dim` is a `BooleanProperty` defaults to `False`.
'''
def on_touch_down(self, touch):
if self.modal:
return True
self.hide()
if self.collide_point(*touch.pos):
return True
def show(self, pos, duration, width=None, modal=False, exit=False):
'''Animate the bubble into position'''
self.modal, self.exit = modal, exit
if width:
self.width = width
if self.modal:
from kivy.uix.modalview import ModalView
self._modal_view = m = ModalView(background_color=[.5, .5, .5, .2])
Window.add_widget(m)
m.add_widget(self)
else:
Window.add_widget(self)
# wait for the bubble to adjust it's size according to text then animate
Clock.schedule_once(lambda dt: self._show(pos, duration))
def _show(self, pos, duration):
def on_stop(*l):
if duration:
Clock.schedule_once(self.hide, duration + .5)
self.opacity = 0
arrow_pos = self.arrow_pos
if arrow_pos[0] in ('l', 'r'):
pos = pos[0], pos[1] - (self.height/2)
else:
pos = pos[0] - (self.width/2), pos[1]
self.limit_to = Window
anim = Factory.Animation(opacity=1, pos=pos, d=.32)
anim.bind(on_complete=on_stop)
anim.cancel_all(self)
anim.start(self)
def hide(self, now=False):
''' Auto fade out the Bubble
'''
def on_stop(*l):
if self.modal:
m = self._modal_view
m.remove_widget(self)
Window.remove_widget(m)
Window.remove_widget(self)
if self.exit:
App.get_running_app().stop()
import sys
sys.exit()
else:
App.get_running_app().is_exit = False
if now:
return on_stop()
anim = Factory.Animation(opacity=0, d=.25)
anim.bind(on_complete=on_stop)
anim.cancel_all(self)
anim.start(self)
class OutputItem(Factory.BoxLayout):
pass
class OutputList(Factory.GridLayout):
def __init__(self, **kwargs):
super(Factory.GridLayout, self).__init__(**kwargs)
self.app = App.get_running_app()
def update(self, outputs):
self.clear_widgets()
for (type, address, amount) in outputs:
self.add_output(address, amount)
def add_output(self, address, amount):
b = Factory.OutputItem()
b.address = address
b.value = self.app.format_amount_and_units(amount)
self.add_widget(b)
| 27.361111 | 80 | 0.604061 |
62b83a1b46805a7c242b2b895a198733bfd26eb9 | 93,899 | py | Python | support/markdown2.py | mobihunterz/ImageFactoryModuleBuild | f449b60dadd6a771ece2e368c19f9c7b246e8efa | [
"Apache-2.0"
] | null | null | null | support/markdown2.py | mobihunterz/ImageFactoryModuleBuild | f449b60dadd6a771ece2e368c19f9c7b246e8efa | [
"Apache-2.0"
] | null | null | null | support/markdown2.py | mobihunterz/ImageFactoryModuleBuild | f449b60dadd6a771ece2e368c19f9c7b246e8efa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
from __future__ import generators
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<https://github.com/trentm/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extra syntax options (see -x|--extras option below and
see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
* code-friendly: Disable _ and __ for em and strong.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* fenced-code-blocks: Allows a code block to not have to be indented
by fencing it with '```' on a line before and after. Based on
<http://github.github.com/github-flavored-markdown/> with support for
syntax highlighting.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* header-ids: Adds "id" attributes to headers. The id value is a slug of
the header text.
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports
"pre" and "code" tags. Add an issue if you require this for other tags.
* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
have markdown processing be done on its contents. Similar to
<http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
some limitations.
* metadata: Extract metadata from a leading '---'-fenced block.
See <https://github.com/trentm/python-markdown2/issues/77> for details.
* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
<http://en.wikipedia.org/wiki/Nofollow>.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* smarty-pants: Replaces ' and " with curly quotation marks or curly
apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
and ellipses.
* toc: The returned HTML string gets a new "toc_html" attribute which is
a Table of Contents for the document. (experimental)
* xml: Passes one-liner processing instructions and namespaced XML tags.
* wiki-tables: Google Code Wiki-style tables. See
<http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
"""
# Dev Notes:
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (2, 2, 2)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Trent Mick"
import os
import sys
from pprint import pprint
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
#---- Python version compat
try:
from urllib.parse import quote # python3
except ImportError:
from urllib import quote # python2
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
if sys.version_info[0] <= 2:
py3 = False
try:
bytes
except NameError:
bytes = str
base_string_type = basestring
elif sys.version_info[0] >= 3:
py3 = True
unicode = str
base_string_type = str
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
SECRET_SALT = bytes(randint(0, 1000000))
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_text(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
# Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and not "header-ids" in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
self._escape_table = g_escape_table.copy()
if "smarty-pants" in self.extras:
self._escape_table['"'] = _hash_text('"')
self._escape_table["'"] = _hash_text("'")
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
if "metadata" in self.extras:
self.metadata = {}
# Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
# should only be used in <a> tags with an "href" attribute.
_a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
# strip metadata from head and extract
if "metadata" in self.extras:
text = self._extract_metadata(text)
text = self.preprocess(text)
if "fenced-code-blocks" in self.extras:
text = self._do_fenced_code_blocks(text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
if "nofollow" in self.extras:
text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
if "metadata" in self.extras:
rv.metadata = self.metadata
return rv
def postprocess(self, text):
"""A hook for subclasses to do some postprocessing of the html, if
desired. This is called before unescaping of special chars and
unhashing of raw HTML spans.
"""
return text
def preprocess(self, text):
"""A hook for subclasses to do some preprocessing of the Markdown, if
desired. This is called after basic formatting of the text, but prior
to any extras, safe mode, etc. processing.
"""
return text
# Is metadata if the content starts with '---'-fenced `key: value`
# pairs. E.g. (indented for presentation):
# ---
# foo: bar
# another-var: blah blah
# ---
_metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
def _extract_metadata(self, text):
# fast test
if not text.startswith("---"):
return text
match = self._metadata_pat.match(text)
if not match:
return text
tail = text[len(match.group(0)):]
metadata_str = match.group(1).strip()
for line in metadata_str.split('\n'):
key, value = line.split(':', 1)
self.metadata[key.strip()] = value.strip()
return tail
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
# I broke out the html5 tags here and add them to _block_tags_a and
# _block_tags_b. This way html5 tags are easy to keep track of.
_html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_block_tags_a += _html5tags
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_block_tags_b += _html5tags
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
_html_markdown_attr_re = re.compile(
r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
elif 'markdown-in-html' in self.extras and 'markdown=' in html:
first_line = html.split('\n', 1)[0]
m = self._html_markdown_attr_re.search(first_line)
if m:
lines = html.split('\n')
middle = '\n'.join(lines[1:-1])
last_line = lines[-1]
first_line = first_line[:m.start()] + first_line[m.end():]
f_key = _hash_text(first_line)
self.html_blocks[f_key] = first_line
l_key = _hash_text(last_line)
self.html_blocks[l_key] = last_line
return ''.join(["\n\n", f_key,
"\n\n", middle, "\n\n",
l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
if "fenced-code-blocks" in self.extras:
text = self._do_fenced_code_blocks(text)
text = self._do_headers(text)
# Do Horizontal Rules:
# On the number of spaces in horizontal rules: The spec is fuzzy: "If
# you wish, you may use spaces between the hyphens or asterisks."
# Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
# hr chars to one or two. We'll reproduce that limit here.
hr = "\n<hr"+self.empty_element_suffix+"\n"
text = re.sub(self._hr_re, hr, text)
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
if "wiki-tables" in self.extras:
text = self._do_wiki_tables(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _wiki_table_sub(self, match):
ttext = match.group(0).strip()
#print 'wiki table: %r' % match.group(0)
rows = []
for line in ttext.splitlines(0):
line = line.strip()[2:-2].strip()
row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
rows.append(row)
#pprint(rows)
hlines = ['<table>', '<tbody>']
for row in rows:
hrow = ['<tr>']
for cell in row:
hrow.append('<td>')
hrow.append(self._run_span_gamut(cell))
hrow.append('</td>')
hrow.append('</tr>')
hlines.append(''.join(hrow))
hlines += ['</tbody>', '</table>']
return '\n'.join(hlines) + '\n'
def _do_wiki_tables(self, text):
# Optimization.
if "||" not in text:
return text
less_than_tab = self.tab_width - 1
wiki_table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line
(^\1\|\|.+?\|\|\n)* # any number of subsequent lines
''' % less_than_tab, re.M | re.X)
return wiki_table_re.sub(self._wiki_table_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
if "smarty-pants" in self.extras:
text = self._do_smart_punctuation(text)
# Do hard breaks:
if "break-on-newline" in self.extras:
text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
else:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in list(self.html_spans.items()):
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_inline_link_title = re.compile(r'''
( # \1
[ \t]+
(['"]) # quote char = \2
(?P<title>.*?)
\2
)? # title is optional
\)$
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
_whitespace = re.compile(r'\s*')
_strip_anglebrackets = re.compile(r'<(.*)>.*')
def _find_non_whitespace(self, text, start):
"""Returns the index of the first non-whitespace character in text
after (and including) start
"""
match = self._whitespace.match(text, start)
return match.end()
def _find_balanced(self, text, start, open_c, close_c):
"""Returns the index where the open_c and close_c characters balance
out - the same number of open_c and close_c are encountered - or the
end of string if it's reached before the balance point is found.
"""
i = start
l = len(text)
count = 1
while count > 0 and i < l:
if text[i] == open_c:
count += 1
elif text[i] == close_c:
count -= 1
i += 1
return i
def _extract_url_and_title(self, text, start):
"""Extracts the url and (optional) title from the tail of a link"""
# text[start] equals the opening parenthesis
idx = self._find_non_whitespace(text, start+1)
if idx == len(text):
return None, None, None
end_idx = idx
has_anglebrackets = text[idx] == "<"
if has_anglebrackets:
end_idx = self._find_balanced(text, end_idx+1, "<", ">")
end_idx = self._find_balanced(text, end_idx, "(", ")")
match = self._inline_link_title.search(text, idx, end_idx)
if not match:
return None, None, None
url, title = text[idx:match.start()], match.group("title")
if has_anglebrackets:
url = self._strip_anglebrackets.sub(r'\1', url)
return url, title, end_idx
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
url, title, url_end_idx = self._extract_url_and_title(text, p)
if url is not None:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
if title:
title_str = ' title="%s"' % (
_xml_escape_attr(title)
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (url.replace('"', '"'),
_xml_escape_attr(link_text),
title_str, img_class_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title = self.titles.get(link_id)
if title:
before = title
title = _xml_escape_attr(title) \
.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, img_class_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, base_string_type):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, self._unescape_special_chars(name)))
_h_re_base = r'''
(^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
|
(^(\#{1,6}) # \1 = string of #'s
[ \t]%s
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
)
'''
_h_re = re.compile(_h_re_base % '*', re.X | re.M)
_h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
def _h_sub(self, match):
if match.group(1) is not None:
# Setext header
n = {"=": 1, "-": 2}[match.group(3)[0]]
header_group = match.group(2)
else:
# atx header
n = len(match.group(5))
header_group = match.group(6)
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(header_group,
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(header_group)
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
if 'tag-friendly' in self.extras:
return self._h_re_tag_friendly.sub(self._h_sub, text)
return self._h_re.sub(self._h_sub, text)
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
# Iterate over each *non-overlapping* list match.
pos = 0
while True:
# Find the *first* hit for either list style (ul or ol). We
# match ul and ol separately to avoid adjacent lists of different
# types running into each other (see issue #16).
hits = []
for marker_pat in (self._marker_ul, self._marker_ol):
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
(?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
if self.list_level: # sub-list
list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
match = list_re.search(text, pos)
if match:
hits.append((match.start(), match))
if not hits:
break
hits.sort()
match = hits[0][1]
start, end = match.span()
text = text[:start] + self._list_sub(match) + text[end:]
pos = end
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter_opts.setdefault("cssclass", "codehilite")
formatter = HtmlCodeFormatter(**formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match, is_fenced_code_block=False):
lexer_name = None
if is_fenced_code_block:
lexer_name = match.group(1)
if lexer_name:
formatter_opts = self.extras['fenced-code-blocks'] or {}
codeblock = match.group(2)
codeblock = codeblock[:-1] # drop one trailing newline
else:
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
# Note: "code-color" extra is DEPRECATED.
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
formatter_opts = self.extras['code-color'] or {}
if lexer_name:
lexer = self._get_pygments_lexer(lexer_name)
if lexer:
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
# Lookahead to make sure this block isn't already in a code block.
# Needed when syntax highlighting is being used.
(?![^<]*\</code\>)
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
_fenced_code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
(.*?) # $2 = code block content
^```[ \t]*\n # closing fence
''', re.M | re.X | re.S)
def _fenced_code_block_sub(self, match):
return self._code_block_sub(match, is_fenced_code_block=True);
def _do_fenced_code_blocks(self, text):
"""Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
]
for before, after in replacements:
text = text.replace(before, after)
hashed = _hash_text(text)
self._escape_table[text] = hashed
return hashed
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
_opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
_closing_single_quote_re = re.compile(r"(?<=\S)'")
_closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "foo@example.com"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
metadata = None
_toc = None
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
toc_html = property(toc_html)
## {{{ http://code.activestate.com/recipes/577257/ (r1)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
value = _slugify_strip_re.sub('', value).strip().lower()
return _slugify_hyphenate_re.sub('-', value)
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<https://github.com/trentm/python-markdown2/wiki/Extras>")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
if not paths:
paths = ['-']
for path in paths:
if path == '-':
text = sys.stdin.read()
else:
fp = codecs.open(path, 'r', opts.encoding)
text = fp.read()
fp.close()
if opts.compare:
from subprocess import Popen, PIPE
print("==== Markdown.pl ====")
p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p.stdin.write(text.encode('utf-8'))
p.stdin.close()
perl_html = p.stdout.read().decode('utf-8')
if py3:
sys.stdout.write(perl_html)
else:
sys.stdout.write(perl_html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
print("==== markdown2.py ====")
html = markdown(text,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
if py3:
sys.stdout.write(html)
else:
sys.stdout.write(html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print("==== match? %r ====" % (norm_perl_html == norm_html))
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| 39.957021 | 121 | 0.523456 |
b45c9fe585bf109bac40d39eb3e8b55a4f519ebd | 3,307 | py | Python | Insert Delete GetRandom O(1) - Duplicates allowed.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Insert Delete GetRandom O(1) - Duplicates allowed.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Insert Delete GetRandom O(1) - Duplicates allowed.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | '''
Design a data structure that supports all following operations in average O(1) time.
Note: Duplicate elements are allowed.
insert(val): Inserts an item val to the collection.
remove(val): Removes an item val from the collection if present.
getRandom: Returns a random element from current collection of elements. The probability of each element being returned is linearly related to the number of same value the collection contains.
Example:
// Init an empty collection.
RandomizedCollection collection = new RandomizedCollection();
// Inserts 1 to the collection. Returns true as the collection did not contain 1.
collection.insert(1);
// Inserts another 1 to the collection. Returns false as the collection contained 1. Collection now contains [1,1].
collection.insert(1);
// Inserts 2 to the collection, returns true. Collection now contains [1,1,2].
collection.insert(2);
// getRandom should return 1 with the probability 2/3, and returns 2 with the probability 1/3.
collection.getRandom();
// Removes 1 from the collection, returns true. Collection now contains [1,2].
collection.remove(1);
// getRandom should return 1 and 2 both equally likely.
collection.getRandom();
'''
class RandomizedCollection(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.val = []
self.idx = {}
def insert(self, val):
"""
Inserts a value to the collection. Returns true if the collection did not already contain the specified element.
:type val: int
:rtype: bool
"""
if val in self.idx:
self.val.append(val)
self.idx[val].add(len(self.val)-1)
return False
else:
self.val.append(val)
self.idx[val] = set([len(self.val)-1])
return True
def remove(self, val):
"""
Removes a value from the collection. Returns true if the collection contained the specified element.
:type val: int
:rtype: bool
"""
if val in self.idx:
if val == self.val[-1]:
val_idx = len(self.val)-1
self.val.pop()
self.idx[val].remove(val_idx)
if not self.idx[val]:
del self.idx[val]
else:
last_val = self.val[-1]
last_idx = len(self.val)-1
val_idx = max(self.idx[val])
self.val[val_idx], self.val[last_idx] = self.val[last_idx], self.val[val_idx]
self.val.pop()
self.idx[last_val].remove(last_idx)
self.idx[val].remove(val_idx)
self.idx[last_val].add(val_idx)
if not self.idx[val]:
del self.idx[val]
return True
else:
return False
def getRandom(self):
"""
Get a random element from the collection.
:rtype: int
"""
return random.choice(self.val)
# Your RandomizedCollection object will be instantiated and called as such:
# obj = RandomizedCollection()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| 31.495238 | 196 | 0.596008 |
086fcb811d328ee7ba131ded15882342992845e3 | 1,535 | py | Python | scRFE/scRFEimplot.py | czbiohub/scRFE | 716b0f59b4b949e6842af3080276c7ea835618a9 | [
"MIT"
] | 11 | 2020-03-24T17:10:50.000Z | 2021-09-08T22:56:16.000Z | scRFE/scRFEimplot.py | czbiohub/scRFE | 716b0f59b4b949e6842af3080276c7ea835618a9 | [
"MIT"
] | null | null | null | scRFE/scRFEimplot.py | czbiohub/scRFE | 716b0f59b4b949e6842af3080276c7ea835618a9 | [
"MIT"
] | 1 | 2020-03-26T23:42:00.000Z | 2020-03-26T23:42:00.000Z | #!/usr/bin/env python
# coding: utf-8
# # scRFEimplot
# In[92]:
# import dependencies
import numpy as np
import pandas as pd
import scanpy as sc
import random
import logging as logg
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.metrics import accuracy_score
from sklearn.inspection import permutation_importance
import matplotlib.pyplot as plt
# In[65]:
def scRFEimplot(X_new,y):
"""
Plots permutation importance of each feature selected by scRFE.
Parameters
----------
X_new : sparse matrix
Transformed array of selected features.
y : pandas series
Target labels.
Returns
-------
plt : module matplotlib.pyplot
Can be pickled, then saved.
"""
rf = RandomForestClassifier(random_state=0).fit(X_new, y)
result = permutation_importance(rf, X_new.todense(), y, n_repeats=10, random_state=0,
n_jobs=-1)
fig, ax = plt.subplots()
sorted_idx = result.importances_mean.argsort()
ax.boxplot(result.importances[sorted_idx].T*100,
vert=False, labels=range(X_new.shape[1]))
ax.set_title("Permutation Importance of each feature")
ax.set_ylabel("Features")
fig.tight_layout()
plt.show()
return plt
# In[66]:
# test3 = scRFEimplot(X_new = test1[3], y = test1[4])
# In[48]:
# type(test3)
# In[ ]:
| 20.743243 | 89 | 0.711401 |
5c1093967379cad2ea57d4248b8bd972fe8be132 | 1,578 | py | Python | 10_Days_of_Statistics/Day_1_Interquartile_Range.py | nfabiop/HackerRank | 646007dd816f356b49ce05886984457403e9535c | [
"MIT"
] | null | null | null | 10_Days_of_Statistics/Day_1_Interquartile_Range.py | nfabiop/HackerRank | 646007dd816f356b49ce05886984457403e9535c | [
"MIT"
] | null | null | null | 10_Days_of_Statistics/Day_1_Interquartile_Range.py | nfabiop/HackerRank | 646007dd816f356b49ce05886984457403e9535c | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'interQuartile' function below.
#
# The function accepts following parameters:
# 1. INTEGER_ARRAY values
# 2. INTEGER_ARRAY freqs
#
def a_sorted(X):
l = len(X)
i = 0
while i < l:
minimo = X[i]
j = i+1
pos = i
while j < l:
if X[j] < minimo:
minimo = X[j]
pos = j
j = j+1
aux = X[pos]
X[pos] = X[i]
X[i] = aux
i = i+1
return X
def a_mediana(X):
l = len(X)
a = int((l-1)//2)
b = a+1
if l%2 != 0:
a_mediana = X[l//2]
else:
a_mediana = (X[int(l/2)-1] + X[int(l/2)])/2
return a_mediana
def interQuartile(values, freqs):
# Print your answer to 1 decimal place within this function
s = []
l = len(values)
pos = 0
i = 0
while i < l:
j=0
while j < freqs[i]:
s.append(values[i])
pos = pos + 1
j = j + 1
i = i + 1
ls = len(s)
s = a_sorted(s)
mp = int(ls//2)
if ls%2 != 0:
q1 = a_mediana(s[0:mp])
q3 = a_mediana(s[mp+1:ls])
else:
q1 = a_mediana(s[0:mp])
q3 = a_mediana(s[mp:ls])
interquart_range = '{:.1f}'.format(q3-q1)
print(interquart_range)
if __name__ == '__main__':
n = int(input().strip())
val = list(map(int, input().rstrip().split()))
freq = list(map(int, input().rstrip().split()))
interQuartile(val, freq)
| 19.481481 | 63 | 0.478454 |
3c0561115c0cd189225512b9ac0aa6b71780d450 | 1,046 | py | Python | python/akg/ops/poly_gpu/reduce_min.py | xqdan/akg | e28501611d73d3957a1f3c58eeb6b028f2f2765d | [
"Apache-2.0"
] | null | null | null | python/akg/ops/poly_gpu/reduce_min.py | xqdan/akg | e28501611d73d3957a1f3c58eeb6b028f2f2765d | [
"Apache-2.0"
] | null | null | null | python/akg/ops/poly_gpu/reduce_min.py | xqdan/akg | e28501611d73d3957a1f3c58eeb6b028f2f2765d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reduce_min"""
import akg
from akg.ops.math_gpu import reduce_min
import akg.topi as topi
@akg.schedule(topi.cuda.reduce_opt.schedule_reduce)
def reduce_min_manual(data, axis, keepdims):
"""Reduce min with manual schedule."""
return reduce_min.reduce_min(data, axis=axis, keepdims=keepdims)
def reduce_min_auto(data, axis, keepdims):
"""Reduce min with auto schedule."""
return reduce_min.reduce_min(data, axis=axis, keepdims=keepdims)
| 36.068966 | 74 | 0.762906 |
ea61e58871fe0505e52fb28a576425a1b97e7dd2 | 17,282 | py | Python | api_view_set/viewset.py | Pragnya-Rout/Task-Management | 16dcb2fbdef57b2f289d9d6681aba6443c066dc3 | [
"Apache-2.0"
] | null | null | null | api_view_set/viewset.py | Pragnya-Rout/Task-Management | 16dcb2fbdef57b2f289d9d6681aba6443c066dc3 | [
"Apache-2.0"
] | null | null | null | api_view_set/viewset.py | Pragnya-Rout/Task-Management | 16dcb2fbdef57b2f289d9d6681aba6443c066dc3 | [
"Apache-2.0"
] | null | null | null |
# import required modules
from flask import request,Response,jsonify,make_response,json
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_refresh_token_required,
get_jwt_identity,
jwt_required,
get_raw_jwt,
)
from datetime import datetime
from flask_restful import Resource, reqparse
from werkzeug.security import generate_password_hash, check_password_hash
from blacklist import BLACKLIST
from models.db_model import DbModel
USER_LOGGED_OUT = "User <{}> successfully logged out."
class Operation:
@staticmethod
def validate_client_auth(client_id):
""" validate authorized_client """
try:
res=DbModel.retrieve_data("clients","*","client_id='"+client_id+"'","")
if len(res) != 0:
return True, res[0]
return False
except Exception as err:
print(err, '-- occurred during fetching client details')
return False
@staticmethod
def validate_employee_auth(employee_id):
""" validate authorized_employee"""
try:
res=DbModel.retrieve_data("employees","*","employee_id='"+employee_id+"'","")
if len(res) != 0:
return True, res[0]
return False
except Exception as err:
print(err, '-- occurred during fetching employee details')
return False
@staticmethod
def validate_manager_auth(manager_id):
""" validate authorized_manager"""
try:
res=DbModel.retrieve_data("managers","*","manager_id='"+manager_id+"'","")
if len(res) != 0:
return True, res[0]
return False
except Exception as err:
print(err, '-- occurred during fetching manager details')
return False
class Login(Resource):
@staticmethod
def post():
"""
Log in as a client or an employee or a manager;
"""
try:
if not ('id' in request.headers and 'password' in request.headers and 'usertype' in request.headers):
return {'authenticated': '0', 'message': 'Missing required header.'}
_id,password,user_type = request.headers['id'],request.headers['password'],request.headers['usertype']
if (_id.isspace()==True or _id == '')or(password.isspace()==True or password=='')or(user_type.isspace()==True or user_type==''):
return {'message': 'Headers are missing'}, 401
if user_type=="employee":
emp_data=DbModel.retrieve_data("employees","*","employee_id='"+_id+"'","")
if len(emp_data)==1:
emp_pass=emp_data[0]['employee_password']
if check_password_hash(emp_pass,password):
access_token = create_access_token(identity=_id, fresh=True)
refresh_token = create_refresh_token(_id)
return ({"emp_access_token": access_token, "emp_refresh_token": refresh_token},200,)
return {"message": 'password does not match'}, 401
return {"message": 'invalid_empid'}, 401
elif user_type=="client":
client_data=DbModel.retrieve_data("clients","*","client_id='"+_id+"'","")
if len(client_data)==1:
client_pass=client_data[0]['client_password']
if check_password_hash(client_pass,password):
access_token = create_access_token(identity=_id, fresh=True)
refresh_token = create_refresh_token(_id)
return ({"client_access_token": access_token, "client_refresh_token": refresh_token},200,)
return {"message": 'password does not match'}, 401
return {"message": 'invalid_client'}, 401
elif user_type=="manager":
mngr_data=DbModel.retrieve_data("managers","*","manager_id='"+_id+"'","")
if len(mngr_data)==1:
manager_pass=mngr_data[0]['manager_password']
if check_password_hash(manager_pass,password):
access_token = create_access_token(identity=_id, fresh=True)
refresh_token = create_refresh_token(_id)
return ({"manager_access_token": access_token, "manager_refresh_token": refresh_token},200,)
return {"message": 'password does not match'}, 401
return {"message": 'invalid_manager'}, 401
else:
Response(status=401)
return {"message": 'invalid_user_type'}, 401
except Exception as err:
print(err, '-- occurred while trying to login')
return jsonify({"status": 0, "message": 'Something went wrong!'})
class Create_Clients(Resource):
@staticmethod
def post():
"""
Creates a new Client to create task;
"""
parser = reqparse.RequestParser()
parser.add_argument("client_id", type=str, required=True,help="client_id is required")
parser.add_argument("password", type=str, required=True,help="password is required")
arg_data = parser.parse_args()
try:
client_data=DbModel.retrieve_data("clients","*","client_id='"+arg_data['client_id']+"'","")
if len(client_data)>0:
return {"message": 'client_id already exists!'}, 401
password_hash = generate_password_hash(arg_data['password'])
values={"client_password":password_hash,"client_id":arg_data['client_id']}
flag=DbModel.insert_Data("clients",values)
if flag:
return ({"message": f"Client{arg_data['client_id']} created successfully"},200,)
return {"message": 'Client could not be created!'}, 401
except Exception as err:
print(err, '-- occurred while trying to create client')
return jsonify({"status": 0, "message": 'Something went wrong!'})
class Create_Managers(Resource):
@staticmethod
def post():
"""
Creates a new Manager to assign task;
"""
parser = reqparse.RequestParser()
parser.add_argument("manager_id", type=str, required=True,help="manager_id is required")
parser.add_argument("password", type=str, required=True,help="password is required")
arg_data = parser.parse_args()
try:
mngr_data=DbModel.retrieve_data("managers","*","manager_id='"+arg_data['manager_id']+"'","")
if len(mngr_data)>0:
return {"message": 'manager_id already exists!'}, 401
password_hash = generate_password_hash(arg_data['password'])
values={"manager_password":password_hash,"manager_id":arg_data['manager_id']}
flag=DbModel.insert_Data("managers",values)
if flag:
return ({"message": f"Manager_{arg_data['manager_id']} created successfully"},200,)
return {"message": 'Manager could not be created!'}, 401
except Exception as err:
print(err, '-- occurred while trying to create manager')
return jsonify({"status": 0, "message": 'Something went wrong!'})
class TokenRefresh(Resource):
@classmethod
@jwt_refresh_token_required
def post(cls):
current_user = get_jwt_identity()
new_token = create_access_token(identity=current_user, fresh=False)
return {"access_token": new_token}, 200
class Create_Edit_Task(Resource):
@staticmethod
@jwt_required
def post():
"""
Creates a new task;
"""
parser = reqparse.RequestParser()
parser.add_argument("title", type=str, required=True,help="title is required")
parser.add_argument("description", type=str, required=True,help="description is required")
arg_data = parser.parse_args()
try:
# check for missing headers
if not ('client_id' in request.headers):
return {'authenticated': '0', 'message': 'Missing required header.'}
client_id = request.headers['client_id']
res=Operation.validate_client_auth(client_id)
if res:
values={"title":arg_data['title'],"description":arg_data['description'],"client_id":client_id}
flag=DbModel.insert_Data("tasks",values)
if flag:
return {"message": f"Task_{arg_data['title']} has been created successfully!"}, 201
return jsonify({"status": 0, "message": "Task couldn't be created!"})
return {'authenticated': '0', 'message': 'Invalid client.'}
except Exception as err:
print(err, '-- occurred while trying to create task')
return {"status": 0, "message": "Something went wrong!"}
class Task_List(Resource):
@staticmethod
@jwt_required
def get():
"""
List of tasks ;
"""
try:
# check for missing headers
if not ('client_id' in request.headers):
return {'authenticated': '0', 'message': 'Missing required header.'}
client_id = request.headers['client_id']
res=Operation.validate_client_auth(client_id)
if res:
flag=DbModel.retrieve_data("tasks","*","client_id='"+client_id+"'","")
if flag:
response = make_response(jsonify({'message': f"Task List of client_{client_id}", 'list': flag},200))
return response
return jsonify({"status": 0, "message": "No task to show!"})
return {'authenticated': '0', 'message': 'Invalid client.'}
except Exception as err:
print(err, '-- occurred while trying to show task details')
return {"status": 0, "message": "Something went wrong!"}
class Edit_Task(Resource):
@staticmethod
@jwt_required
def put():
"""
Edit task ;
"""
parser = reqparse.RequestParser()
parser.add_argument("title", type=str, required=True,help="title is required")
parser.add_argument("description", type=str, required=True,help="description is required")
parser.add_argument("task_id", type=str, required=True,help="task_id is required")
arg_data = parser.parse_args()
try:
# check for missing headers
if not ('client_id' in request.headers):
return {'authenticated': '0', 'message': 'Missing required header.'}
client_id = request.headers['client_id']
res=Operation.validate_client_auth(client_id)
if res:
values={"title":arg_data['title'],"description":arg_data['description']}
print(f"id={arg_data['task_id']}")
flag=DbModel.update_data("tasks",values,f"id={arg_data['task_id']}")
if flag:
return {"message": f"Task_{arg_data['title']} has been edited successfully!"}, 201
return jsonify({"status": 0, "message": "Task couldn't be created!"})
return {'authenticated': '0', 'message': 'Invalid client.'}
except Exception as err:
print(err, '-- occurred while trying to edit task')
return {"status": 0, "message": "Something went wrong!"}
class Assign_Task(Resource):
@staticmethod
@jwt_required
def put():
"""
Manager assigns a task to employee;
"""
parser = reqparse.RequestParser()
parser.add_argument("employee_id", type=str, required=True,help="employee_id is required")
parser.add_argument("task_id", type=str, required=True,help="task_id is required")
arg_data = parser.parse_args()
try:
# check for missing headers
if not ('manager_id' in request.headers):
return {'authenticated': '0', 'message': 'Missing required header.'}
manager_id = request.headers['manager_id']
res=Operation.validate_manager_auth(manager_id)
if res:
now = datetime.now()
current_date = now.strftime('%Y-%m-%d')
values={
"task_id":arg_data['task_id'],
"manager_id":manager_id,
"status":"0",
"task_date":current_date
}
flag=DbModel.update_data("employees",values,"employee_id='"+arg_data['employee_id']+"'")
if flag:
return {"message": f"Task_{arg_data['task_id']} has been assigned to Employee_{arg_data['employee_id']} successfully!"}, 201
return jsonify({"status": 0, "message": "Task couldn't be assigned!"})
return {'authenticated': '0', 'message': 'Invalid manager.'}
except Exception as err:
print(err, '-- occurred while trying to assign task')
return {"status": 0, "message": "Something went wrong!"}
class Delete_Task(Resource):
@staticmethod
@jwt_required
def delete(task_id):
"""
Manager delete a task ;
"""
try:
# check for missing headers
if not ('manager_id' in request.headers):
return {'authenticated': '0', 'message': 'Missing required header.'}
manager_id = request.headers['manager_id']
res=Operation.validate_manager_auth(manager_id)
if res:
flag=DbModel.delete("tasks","id='"+task_id+"'")
if flag:
return {"message": f"Task_{task_id} has been deleted successfully!"}, 201
return jsonify({"status": 0, "message": "Task couldn't be deleted!"})
return {'authenticated': '0', 'message': 'Invalid manager.'}
except Exception as err:
print(err, '-- occurred while trying to delete task')
return {"status": 0, "message": "Something went wrong!"}
class Create_Employee(Resource):
@staticmethod
def post():
"""
Creates a new Employee to perform task;
"""
parser = reqparse.RequestParser()
parser.add_argument("employee_id", type=str, required=True,help="employee_id is required")
parser.add_argument("password", type=str, required=True,help="password is required")
arg_data = parser.parse_args()
try:
emp_data=DbModel.retrieve_data("employees","*","employee_id='"+arg_data['employee_id']+"'","")
if len(emp_data)>0:
return {"message": 'employee_id already exists!'}, 401
password_hash = generate_password_hash(arg_data['password'])
values={"employee_password":password_hash,"employee_id":arg_data['employee_id']}
flag=DbModel.insert_Data("employees",values)
if flag:
return ({"message": f"Employee_{arg_data['employee_id']} created successfully"},200,)
return {"message": 'Employee could not be created!'}, 401
except Exception as err:
print(err, '-- occurred while trying to create employee')
return jsonify({"status": 0, "message": 'Something went wrong!'})
class Complete_Task(Resource):
@staticmethod
@jwt_required
def put(task_id):
"""
Complete task;
"""
try:
# check for missing headers
if not ('employee_id' in request.headers):
return {'authenticated': '0', 'message': 'Missing required header.'}
employee_id = request.headers['employee_id']
res=Operation.validate_employee_auth(employee_id)
if res:
values={
"status":"1"
}
flag=DbModel.update_data("tasks",values,"id='"+task_id+"'")
if flag:
return {"message": f"Task_{task_id} has been completed by Employee_{employee_id} successfully!"}, 201
return jsonify({"status": 0, "message": "Task couldn't be completed!"})
return {'authenticated': '0', 'message': 'Invalid employee.'}
except Exception as err:
print(err, '-- occurred while trying to complete task')
return {"status": 0, "message": "Something went wrong!"}
class Logout(Resource):
@classmethod
@jwt_required
def post(self):
jti = get_raw_jwt()["jti"] # jti is "JWT ID", a unique identifier for a JWT.
user_id = get_jwt_identity()
BLACKLIST.add(jti)
return {"message": USER_LOGGED_OUT.format(user_id)}, 200 | 46.834688 | 145 | 0.56689 |
4e10c78b2fa74176d8f1f391cdac3a1e610c8a02 | 1,193 | py | Python | rabbitmq/routing/receive_logs_direct.py | calebgregory/scraps | cfc0ef608db4520c1a1e22fccdbcae73dfb00e39 | [
"MIT"
] | null | null | null | rabbitmq/routing/receive_logs_direct.py | calebgregory/scraps | cfc0ef608db4520c1a1e22fccdbcae73dfb00e39 | [
"MIT"
] | null | null | null | rabbitmq/routing/receive_logs_direct.py | calebgregory/scraps | cfc0ef608db4520c1a1e22fccdbcae73dfb00e39 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='direct_logs',
type='direct')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
# if you only want to save 'warning' and 'error' (and not 'info') log messages to a file,
# open a console and type:
# $ python receive_logs_direct.py warning error >> logs_from_rabbit.log
# if you want to see all of the messages on your screen, open a new terminal and
# $ python receive_logs_direct.py warning error info
severities = sys.argv[1:]
if not severities:
print >> sys.stderr, "Usage: %s [info] [warning] [error]" % \
(sys.argv[0],)
sys.exit(1)
for severity in severities:
channel.queue_bind(exchange='direct_logs',
queue=queue_name,
routing_key=severity)
print ' [*] Waiting for messages. To exit press CTRL+C'
def callback(ch, method, properties, body):
print " [x] %r:%r" % (method.routing_key, body,)
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
| 29.097561 | 89 | 0.702431 |
707c7ca55a77e4d9fa775b85f3e692e351af2331 | 1,788 | py | Python | test/functional/feature_filelock.py | estxcoin/estcore | 4398b1d944373fe25668469966fa2660da454279 | [
"MIT"
] | 1 | 2019-09-17T07:53:52.000Z | 2019-09-17T07:53:52.000Z | test/functional/feature_filelock.py | estxcoin/estcore | 4398b1d944373fe25668469966fa2660da454279 | [
"MIT"
] | null | null | null | test/functional/feature_filelock.py | estxcoin/estcore | 4398b1d944373fe25668469966fa2660da454279 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check that it's not possible to start a second bitcoind instance using the same datadir or wallet."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
class FilelockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=None)
self.nodes[0].start([])
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, 'regtest')
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second bitcoind instance using the same datadir")
expected_msg = "Error: Cannot obtain a lock on data directory {}. Estx Core is probably already running.".format(datadir)
self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
wallet_dir = os.path.join(datadir, 'wallets')
self.log.info("Check that we can't start a second bitcoind instance using the same wallet")
expected_msg = "Error: Error initializing wallet database environment"
self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if __name__ == '__main__':
FilelockTest().main()
| 48.324324 | 177 | 0.719799 |
a5f1e844b5769301d05d011ca77f34a4062a7eb4 | 899 | py | Python | iexfinance/stocks/movers.py | jto-d/iexfinance | 8bf958f269638b6f8d2dbdd857c0ef2ba324cdd4 | [
"Apache-2.0"
] | 653 | 2018-01-02T21:03:49.000Z | 2022-03-24T06:37:10.000Z | iexfinance/stocks/movers.py | jto-d/iexfinance | 8bf958f269638b6f8d2dbdd857c0ef2ba324cdd4 | [
"Apache-2.0"
] | 219 | 2017-12-09T21:44:43.000Z | 2022-03-23T20:21:46.000Z | iexfinance/stocks/movers.py | jto-d/iexfinance | 8bf958f269638b6f8d2dbdd857c0ef2ba324cdd4 | [
"Apache-2.0"
] | 155 | 2018-02-07T17:08:18.000Z | 2022-03-13T23:36:57.000Z | import pandas as pd
from iexfinance.base import _IEXBase
class MoversReader(_IEXBase):
"""
Base class for retrieving market movers from the Stocks List endpoint
Parameters
----------
mover: str
Desired mover
"""
_AVAILABLE_MOVERS = [
"mostactive",
"gainers",
"losers",
"iexvolume",
"iexpercent",
"infocus",
]
def __init__(self, mover=None, **kwargs):
super(MoversReader, self).__init__(**kwargs)
if mover in self._AVAILABLE_MOVERS:
self.mover = mover
else:
raise ValueError("Please input a valid market mover.")
@property
def url(self):
return "stock/market/list/" + self.mover
def _convert_output(self, out):
if out:
return pd.DataFrame(out).set_index("symbol")
else:
return pd.DataFrame()
| 21.926829 | 73 | 0.576196 |
588895082be9dfbc580142498024060350f37cbb | 3,427 | py | Python | src/primaires/combat/commandes/viser/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/combat/commandes/viser/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/combat/commandes/viser/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'viser'.
"""
from random import random
from primaires.interpreteur.commande.commande import Commande
class CmdViser(Commande):
"""Commande 'viser'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "viser", "target")
self.nom_categorie = "combat"
self.schema = "(<nombre>)"
self.aide_courte = "vise un personnage"
self.aide_longue = \
"Cette commande vous permet de viser un personnage que " \
"vous avez auparavant aperçu avec la commande %scruter%. " \
"Vous devez simplement préciser en paramètre un nombre " \
"(celui donné par %scruter% pour la cible particulière). " \
"Si vous souhaitez arrêter de viser qui que ce soit, " \
"entrez cette commande sans paramètre."
def interpreter(self, personnage, dic_masques):
"""Interprétation de la commande"""
if dic_masques["nombre"]:
nombre = dic_masques["nombre"].nombre
cibles = importeur.combat.cibles.get(personnage)
if cibles is None:
personnage << "|err|Vous ne voyez aucune cible " \
"pour l'heure.|ff|"
return
try:
chemin, cible = cibles[nombre - 1]
except IndexError:
personnage << "|err|Ce nombre est invalide.|ff|"
return
importeur.combat.cible[personnage] = cible
personnage.envoyer("Vous commencez à viser {}.", cible)
else:
if personnage in importeur.combat.cible:
del importeur.combat.cible[personnage]
personnage << "Vous ne visez plus personne."
else:
personnage << "Vous ne visez personne actuellement."
| 41.289157 | 79 | 0.670266 |
d119c62656e6e1724347d7700b1e745b335a2dee | 1,960 | py | Python | matches/views.py | asyler/betleague | 2ae43ae26d6a6c8582a831bc56c2144ed3134202 | [
"MIT"
] | null | null | null | matches/views.py | asyler/betleague | 2ae43ae26d6a6c8582a831bc56c2144ed3134202 | [
"MIT"
] | 1 | 2017-12-14T07:42:02.000Z | 2017-12-14T10:22:19.000Z | matches/views.py | asyler/betleague | 2ae43ae26d6a6c8582a831bc56c2144ed3134202 | [
"MIT"
] | null | null | null | import re
from collections import defaultdict
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.db.models import Q, F
from django.shortcuts import render, redirect
# Create your views here.
from matches.models import Match, Bet, EmptyBet
@login_required
def user_bets(request):
if request.method == 'POST':
data = defaultdict(dict)
for key, value in request.POST.items():
_match = re.match(r'^match_(\d+)$', key)
_match_shootout = re.match(r'^match_(\d+)_shootout_winner$', key)
if _match:
match_id = _match.group(1)
data[match_id]['result'] = value
if _match_shootout:
match_id = _match_shootout.group(1)
data[match_id]['shootout_winner'] = value
for match_id, match_result in data.items():
result = match_result['result']
shootout_result = match_result.get('shootout_winner', None)
try:
bet = Bet.objects.get(user=request.user, match__id=match_id)
except Bet.DoesNotExist:
bet = Bet(user=request.user, match_id=match_id)
try:
bet.set_bet(result, shootout_result)
bet.save()
except ValidationError as e:
messages.error(request, e.message, extra_tags=match_id)
except EmptyBet:
pass
messages.success(request, 'Saved', extra_tags='saved')
return redirect('user_bets')
matches = Match.objects.order_by('datetime').all()
for match in matches:
try:
match.bet = Bet.objects.get(match=match, user=request.user)
except Bet.DoesNotExist:
match.bet = Bet(user=request.user, match=match)
return render(request, 'user_bets.html', {
'matches': matches
})
| 33.793103 | 77 | 0.616837 |
5964889650fc64ddca00a835c9311e6857b708b3 | 373 | py | Python | Long Pressed Name.py | ngdeva99/Fulcrum | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | [
"MIT"
] | null | null | null | Long Pressed Name.py | ngdeva99/Fulcrum | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | [
"MIT"
] | null | null | null | Long Pressed Name.py | ngdeva99/Fulcrum | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | [
"MIT"
] | null | null | null | class Solution:
def isLongPressedName(self, name: str, typed: str) -> bool:
i=0
j=0
while i<len(name) and j<len(typed):
if name[i]==typed[j]:
i+=1
j+=1
else:
j+=1
if i==len(name):
return True
return False
| 20.722222 | 63 | 0.372654 |
9399c35d02603a8a8f992c028032d50779c5bdb1 | 1,617 | py | Python | reproductions/offline/plas.py | Mohan-Zhang-u/d3rlpy | 3ab3c0bbd6f86e73c171a6084f3130d60be85b5f | [
"MIT"
] | null | null | null | reproductions/offline/plas.py | Mohan-Zhang-u/d3rlpy | 3ab3c0bbd6f86e73c171a6084f3130d60be85b5f | [
"MIT"
] | null | null | null | reproductions/offline/plas.py | Mohan-Zhang-u/d3rlpy | 3ab3c0bbd6f86e73c171a6084f3130d60be85b5f | [
"MIT"
] | 2 | 2020-12-29T19:20:38.000Z | 2022-02-26T20:14:30.000Z | import argparse
import d3rlpy
from sklearn.model_selection import train_test_split
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='hopper-medium-v0')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', type=int)
args = parser.parse_args()
dataset, env = d3rlpy.datasets.get_dataset(args.dataset)
# fix seed
d3rlpy.seed(args.seed)
env.seed(args.seed)
_, test_episodes = train_test_split(dataset, test_size=0.2)
if 'medium-replay' in env.unwrapped.spec.id.lower():
vae_encoder = d3rlpy.models.encoders.VectorEncoderFactory([128, 128])
else:
vae_encoder = d3rlpy.models.encoders.VectorEncoderFactory([750, 750])
encoder = d3rlpy.models.encoders.VectorEncoderFactory([400, 300])
plas = d3rlpy.algos.PLAS(actor_encoder_factory=encoder,
critic_encoder_factory=encoder,
imitator_encoder_factory=vae_encoder,
lam=1.0,
warmup_steps=500000,
use_gpu=args.gpu)
plas.fit(
dataset.episodes,
eval_episodes=test_episodes,
n_steps=1000000, # RL starts at 500000 step
n_steps_per_epoch=1000,
save_interval=10,
scorers={
'environment': d3rlpy.metrics.evaluate_on_environment(env),
'value_scale': d3rlpy.metrics.average_value_estimation_scorer,
},
experiment_name=f"PLAS_{args.dataset}_{args.seed}")
if __name__ == '__main__':
main()
| 33 | 77 | 0.638219 |
92d6e7d97e4cf34dde15ec664b8d41d73d3471b7 | 158 | py | Python | python/code/dns/dnsa.py | hgfgood/note | f89febca3ce925cba4cd4c8068a4fa124f23c810 | [
"Apache-2.0"
] | null | null | null | python/code/dns/dnsa.py | hgfgood/note | f89febca3ce925cba4cd4c8068a4fa124f23c810 | [
"Apache-2.0"
] | null | null | null | python/code/dns/dnsa.py | hgfgood/note | f89febca3ce925cba4cd4c8068a4fa124f23c810 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
import dns.resolver
domain="www.google.com"
A=dns.resolver.query(domain,'A')
for x in A.response.answer:
for i in x:
print (i.address)
| 17.555556 | 32 | 0.708861 |
5d605a03410ace25a6b39e44508ef8d30db722ce | 2,703 | py | Python | flappy/game.py | skielred/FlappyTriangle | 5615ce66658bebfffe08db6949eb7743a822e330 | [
"MIT"
] | 1 | 2020-12-14T22:17:28.000Z | 2020-12-14T22:17:28.000Z | flappy/game.py | skielred/FlappyTriangle | 5615ce66658bebfffe08db6949eb7743a822e330 | [
"MIT"
] | null | null | null | flappy/game.py | skielred/FlappyTriangle | 5615ce66658bebfffe08db6949eb7743a822e330 | [
"MIT"
] | null | null | null | import logging
from pyglet.graphics import Batch
from pyglet.shapes import Rectangle
from pyglet.window import Window
from pyglet import app, clock, media
from flappy import Grid, Pipe, Player
class Game():
def __init__(self):
self.batch = Batch()
self.background = Rectangle(0, 0, 0, 0)
self.window = Window()
self.window.push_handlers(self)
self.on_resize(self.window.width, self.window.height)
clock.schedule_interval(self.update, 1 / 60)
clock.schedule_interval(self.log, 1)
self.sounds = {
'fail': media.load('resources/failure.mp3', streaming=False),
'jump': media.load('resources/jump.wav', streaming=False),
'score': media.load('resources/score.wav', streaming=False),
}
self.reset()
def log(self, delta):
logging.info('Current speed: {}'.format(self.speed))
logging.info('Current score: {}'.format(self.points))
def reset(self):
self.pipes = []
self.points = 0
self.speed = 2
self.player = Player(batch=self.batch)
self.create_pipe()
self.create_pipe(x=(Grid.columns / 2) + (Pipe.WIDTH / 2))
def fail(self):
self.sounds['fail'].play()
self.reset()
def create_pipe(self, *args, **kwargs):
kwargs['batch'] = kwargs.get('batch', self.batch)
self.pipes.append(Pipe(*args, **kwargs))
def update(self, delta):
self.player.update(delta)
self.speed += delta * 0.1
delta_x = delta * Grid.x() * self.speed
if self.player.is_offscreen():
self.fail()
return
for pipe in self.pipes:
pipe.scroll(delta_x)
if pipe.is_offscreen():
self.pipes.remove(pipe)
self.create_pipe()
if pipe.collides(self.player.center):
self.fail()
if self.player.cleared(pipe):
self.score()
def score(self):
self.sounds['score'].play()
self.points += 1
def jump(self):
logging.info('jumping')
self.sounds['jump'].play()
self.player.jump()
def on_key_press(self, symbol, modifiers):
self.jump()
def on_mouse_press(self, x, y, button, modifiers):
self.jump()
def on_draw(self):
self.window.clear()
self.background.draw()
self.batch.draw()
def on_resize(self, width, height):
Grid.update_factor(width, height)
self.background.width, self.background.height = width, height
if getattr(self, 'player', None):
self.player.resize()
def run():
game = Game()
app.run()
| 29.380435 | 73 | 0.579726 |
6f09ec972a6b7d3ae21c70a1c31b94e410bb29e4 | 756 | py | Python | server/migrations/0064_clean_friendly_names.py | lfaraone/sal | d0dff90cebcbc87f18c2c6957264f21566d52000 | [
"Apache-2.0"
] | 1 | 2019-11-01T20:54:47.000Z | 2019-11-01T20:54:47.000Z | server/migrations/0064_clean_friendly_names.py | grahamgilbert/sal | d247ec1ea8855e65e5855b0dd63eae93b40f86ca | [
"Apache-2.0"
] | null | null | null | server/migrations/0064_clean_friendly_names.py | grahamgilbert/sal | d247ec1ea8855e65e5855b0dd63eae93b40f86ca | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from server.models import *
from django.db import migrations, models
def clean_model_names(apps, schema_editor):
"""
Non-macos devices will have been given an incorrrect friendly
name. Let's get those and clean up
"""
Machine = apps.get_model("server", "Machine")
machines_to_clean = Machine.objects.exclude(os_family='Darwin')
for machine_to_clean in machines_to_clean:
machine_to_clean.machine_model_friendly = ''
machine_to_clean.save()
class Migration(migrations.Migration):
dependencies = [
('server', '0063_remove_machinedetailplugin_type'),
]
operations = [
migrations.RunPython(clean_model_names),
]
| 25.2 | 67 | 0.703704 |
988968595d106e8aea7cc502ae7ce9d6cd3cded6 | 12,544 | py | Python | Cued_Deep_Breathing.py | RayStick/BreathingTasks_PsychoPy | 207231cc6740eb11fbc32a8bb811f1fe86f30fd5 | [
"MIT"
] | 1 | 2021-07-01T12:26:17.000Z | 2021-07-01T12:26:17.000Z | Cued_Deep_Breathing.py | RayStick/BreathingTasks_PsychoPy | 207231cc6740eb11fbc32a8bb811f1fe86f30fd5 | [
"MIT"
] | 1 | 2022-02-22T20:21:17.000Z | 2022-02-22T23:42:48.000Z | Cued_Deep_Breathing.py | RayStick/BreathingTasks_PsychoPy | 207231cc6740eb11fbc32a8bb811f1fe86f30fd5 | [
"MIT"
] | 1 | 2021-07-01T12:26:22.000Z | 2021-07-01T12:26:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Cued Deep Breathing (CDB)
Instructions are displayed to the participant in this order: Get Ready, Deep Breaths, Breathe Normally
The script can be started by an MRI pulse trigger.
"""
# import necessary libraries & functions
from __future__ import absolute_import, division
from psychopy import gui, visual, core, data, event, logging
from psychopy.constants import (NOT_STARTED, STARTED, FINISHED)
import os
from psychopy.hardware.emulator import launchScan # this is to read in TTL pulse from MRI scanner as a trigger
######################
# CHANGE PARAMETERS #
######################
scan_trigger = 5 # value the MRI pulse trigger is read in as
doRest = 3 # 0 = no rest; 1 = rest before CDB task; 2 = rest after CDB task; 3= rest before AND after CDB task
tResting_start = 28 # duration of resting fixation at the start in seconds
tResting_end = 30 # duration of resting fixation at the end in seconds
# Task components : Rest, Get Ready, IN-OUT, Breathe Normally
trialnum = 2 # number of CDB repeats
tGetReady = 2 # duration of get ready warning before CDB section
tCDB = 8 # duration of CDB
tCDBPace = 4 # duration of each breath in/out in seconds e.g. 6.0 s would be 3s IN and 3s OUT (tCDB / tCDBPace needs to be integer )
tFree = 43 # duration of free breathing in between each CDB section
CDB_instructions = 'DEEP BREATHING task \n \nTake deep breaths IN and OUT when cued \n \nBreathe through your nose'
end_exp_key = 'escape' # key to press to end the experiment as it is running
#######
# RUN #
#######
# Define Timings
tLength = tGetReady + tCDB + tFree # total length of one trial
rCDBPace = tCDB/tCDBPace # number of repeats (breath in and out) in the CDB part- has to be an integer else script will terminate:
if rCDBPace != int(rCDBPace):
print('** WARNING:' + str(rCDBPace) + ' is not an integer, please change tCDBPace')
core.quit()
rCDBPace = int(rCDBPace)
# Define Paths & Data Saving
_thisDir = os.path.dirname(os.path.abspath(__file__)) # Get the full path this python script is saved to
os.chdir(_thisDir) # Change the current wd to the path above - to ensure relative paths start from the same directory
expName = os.path.basename(__file__) # name of this file
expInfo = {'Participant': '', 'Session': '001'}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK is False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['Participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=True,
dataFileName=filename)
expInfo['doRest'] = doRest
expInfo['tResting_start'] = tResting_start
expInfo['tResting_end'] = tResting_end
expInfo['trialnum'] = trialnum
expInfo['tFree'] = tFree
expInfo['tCDB'] = tCDB
expInfo['tCDBPace'] = tCDBPace
# save a log file for detailed info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
# Set-up the Window
win = visual.Window(size=(1440, 900), fullscr=True, screen=0,
allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0, 0, 0], colorSpace='rgb',
blendMode='avg', useFBO=True)
win.recordFrameIntervals = True
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] is not None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
print('**WARNING: could not retrieve frame rate, had to assume 60fps. Experiment timings may be wrong**')
# Define information for each routine in the experiment
# instructions
Instruct = visual.TextStim(win=win, name='Instruct',
text=CDB_instructions,
font=u'Arial',
pos=(0, 0), height=0.15, wrapWidth=1, ori=0,
color=u'white', colorSpace='rgb', opacity=1,
alignHoriz='center', depth=0.0)
# trial
trialClock = core.Clock()
free = visual.TextStim(win=win, name='free',
text=u'Breathe \nNormally',
font=u'Arial',
pos=(0, 0), height=0.3, wrapWidth=3, ori=0,
color=u'white', colorSpace='rgb', opacity=1,
alignHoriz='center', depth=0.0)
CDB = visual.TextStim(win=win, name='CDB',
text=u'CDB text - IN or OUT',
font=u'Arial',
pos=(0, 0), height=0.5, wrapWidth=None, ori=0,
color=u'white', colorSpace='rgb', opacity=1,
alignHoriz='center', depth=0.0)
getready = visual.TextStim(win=win, name='getready',
text='Get Ready',
font=u'Arial',
pos=(0, 0), height=0.3, wrapWidth=3, ori=0,
color=u'yellow', colorSpace='rgb', opacity=1,
alignHoriz='center', depth=0.0)
timedisplay = visual.TextStim(win=win, name='timedisplay',
text='',
font=u'Arial',
pos=(0, -0.3), height=0.2, wrapWidth=None, ori=0,
color=u'yellow', colorSpace='rgb', opacity=1,
alignHoriz='center', depth=0.0)
fixation = visual.TextStim(win=win, name='fixation',
text=u'+',
font=u'Arial',
pos=(0, 0), height=0.5, wrapWidth=None, ori=0,
color=u'white', colorSpace='rgb', opacity=1,
alignHoriz='center', depth=0.0)
# DISPLAY INSTRUCTIONS TO PARTICIPANT
Instruct.draw()
win.flip()
event.waitKeys(maxWait=300, keyList=['space'], timeStamped=False) # space key to start or after a long wait
# TRIGGER THE START OF THE TASK WITH MRI
MRinfo = {'sync': scan_trigger, 'TR': 3, 'volumes': 300} # TR and vols can be changed here if needed
globalClock = core.Clock() # to track the time since experiment started
launchScan(win, MRinfo, mode='scan', globalClock=globalClock)
globalClock = core.Clock() # to track the time since experiment started
# REST BLOCK TO START?
if doRest == 1 or doRest == 3:
fixation.draw()
win.flip()
event.waitKeys(maxWait=tResting_start, keyList=[end_exp_key], timeStamped=False)
# START CDB TASK
routineTimer = core.CountdownTimer() # to track time remaining of each routine
thisExp.nextEntry()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=trialnum, method='sequential',
originPath=-1,
trialList=[None],
seed=None, name='trials')
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
if thisTrial is not None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
if thisTrial is not None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(tLength)
# update component parameters for each repeat
# keep track of which components have finished
trialComponents = [getready, CDB, free]
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *getready* updates
if t >= 0.0 and t <= tGetReady and getready.status == NOT_STARTED:
# keep track of start time/frame for later
getready.tStart = t
getready.frameNStart = frameN # exact frame index
getready.setAutoDraw(True)
getreadytimer = core.CountdownTimer(tGetReady)
while getreadytimer.getTime() > 0:
if event.getKeys(keyList=[end_exp_key]):
core.quit()
getready.draw()
win.flip()
frameRemains = 0.0 + tGetReady - win.monitorFramePeriod * 0.75 # most of one frame period left
if getready.status == STARTED and t >= frameRemains:
getready.setAutoDraw(False)
# *CDB* updates
if t >= tGetReady and CDB.status == NOT_STARTED:
# keep track of start time/frame for later
CDB.tStart = t
CDB.frameNStart = frameN # exact frame index
CDB.setAutoDraw(True)
CDBtimer = core.CountdownTimer(tCDB)
for x in range(0, rCDBPace):
timer = core.CountdownTimer(tCDBPace)
half_timer = core.CountdownTimer(tCDBPace/2)
while timer.getTime() > tCDBPace/2:
if event.getKeys(keyList=[end_exp_key]):
core.quit()
# do stuff
CDB.text ='IN'
CDB.draw()
win.flip()
while (timer.getTime() > 0) and (timer.getTime() < tCDBPace/2):
if event.getKeys(keyList=[end_exp_key]):
core.quit()
# do stuff
CDB.text ='OUT'
CDB.draw()
win.flip()
frameRemains = tGetReady + tCDB - win.monitorFramePeriod * 0.75 # most of one frame period left
if CDB.status == STARTED and t >= frameRemains:
CDB.setAutoDraw(False)
# *free* updates
if t >= tGetReady + tCDB and free.status == NOT_STARTED:
# keep track of start time/frame for later
free.tStart = t
free.frameNStart = frameN # exact frame index
free.setAutoDraw(True)
freetimer = core.CountdownTimer(tFree)
while freetimer.getTime() > 0:
if event.getKeys(keyList=[end_exp_key]):
core.quit()
free.draw()
win.flip()
frameRemains = 0.0 + tLength- win.monitorFramePeriod * 0.75 # most of one frame period left
if free.status == STARTED and t >= frameRemains:
free.setAutoDraw(False)
getready.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# REST BLOCK TO FINISH?
if doRest == 2 or doRest == 3:
free.setAutoDraw(False)
fixation.draw()
win.flip()
event.waitKeys(maxWait=tResting_end, keyList=[end_exp_key], timeStamped=False)
# These should auto-save but just in case:
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# Close everything
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 39.696203 | 133 | 0.603077 |
26394a4fc76d07a0efb7ad4b441c211246f1b3b9 | 1,629 | py | Python | src/main/resources/autobahntestsuite/case/case7_1_2.py | olamy/autobahntestsuite-maven-plugin | 1d26f21cf7828d80d0e80bb783999d9283f023be | [
"Apache-2.0"
] | 3 | 2016-02-01T02:29:51.000Z | 2020-09-04T17:19:24.000Z | autobahntestsuite/autobahntestsuite/case/case7_1_2.py | MichalMazurek/AutobahnTestSuite | f9f39e0dca69ad66568fe6f608ec9e8b9fd2b60d | [
"Apache-2.0"
] | 4 | 2017-02-19T23:58:13.000Z | 2019-11-01T15:31:22.000Z | autobahntestsuite/autobahntestsuite/case/case7_1_2.py | MichalMazurek/AutobahnTestSuite | f9f39e0dca69ad66568fe6f608ec9e8b9fd2b60d | [
"Apache-2.0"
] | 6 | 2017-02-13T09:11:02.000Z | 2021-06-29T11:22:18.000Z | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case7_1_2(Case):
DESCRIPTION = """Send two close frames"""
EXPECTATION = """Clean close with normal code. Second close frame ignored."""
def init(self):
self.suppressClose = True
def onConnectionLost(self, failedByMe):
Case.onConnectionLost(self, failedByMe)
if self.behaviorClose == Case.WRONG_CODE:
self.behavior = Case.FAILED
self.passed = False
self.result = self.resultClose
def onOpen(self):
payload = "Hello World!"
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
self.p.sendClose(self.p.CLOSE_STATUS_CODE_NORMAL)
self.p.sendFrame(opcode = 8)
self.p.killAfter(1)
| 34.659574 | 113 | 0.594843 |
c37d3f53113b76bcc8e1bbbbab62228bd8f5068d | 14,527 | py | Python | attackgraph/deepgraph_runner.py | wyz2368/deepRL | b92c7dc9c6dbec5ff217162c4fcce35695eabcbb | [
"MIT"
] | null | null | null | attackgraph/deepgraph_runner.py | wyz2368/deepRL | b92c7dc9c6dbec5ff217162c4fcce35695eabcbb | [
"MIT"
] | null | null | null | attackgraph/deepgraph_runner.py | wyz2368/deepRL | b92c7dc9c6dbec5ff217162c4fcce35695eabcbb | [
"MIT"
] | null | null | null | # Packages import
import numpy as np
import os
import datetime
import sys
# sys.path.append('/home/wangyzh/exp')
import psutil
import warnings
# Modules import
from attackgraph import DagGenerator as dag
from attackgraph import file_op as fp
from attackgraph import json_op as jp
from attackgraph import sim_Series
from attackgraph import training
from attackgraph import util
from attackgraph import game_data
from attackgraph import gambit_analysis as ga
from attackgraph.simulation import series_sim
# from attackgraph.sim_MPI import do_MPI_sim
from attackgraph.sim_retrain import sim_retrain
# load_env: the name of env to be loaded.
# env_name: the name of env to be generated.
# MPI_flag: if running simulation in parallel or not.
# def initialize(load_env=None, env_name=None, MPI_flag = False):
def initialize(load_env=None, env_name=None):
print("=======================================================")
print("=======Begin Initialization and first epoch============")
print("=======================================================")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Create Environment
if isinstance(load_env,str):
path = os.getcwd() + '/env_data/' + load_env + '.pkl'
if not fp.isExist(path):
raise ValueError("The env being loaded does not exist.")
env = fp.load_pkl(path)
else:
# env is created and saved.
env = dag.env_rand_gen_and_save(env_name)
# save graph copy
env.save_graph_copy()
env.save_mask_copy() #TODO: change transfer
# create players and point to their env
env.create_players()
env.create_action_space()
#print root node
roots = env.get_Roots()
print("Root Nodes:", roots)
ed = env.get_ORedges()
print('Or edges:', ed)
# load param
param_path = os.getcwd() + '/network_parameters/param.json'
param = jp.load_json_data(param_path)
# initialize game data
game = game_data.Game_data(env, num_episodes=param['num_episodes'], threshold=param['threshold'])
game.set_hado_param(param=param['hado_param'])
game.set_hado_time_step(param['retrain_timesteps'])
game.env.defender.set_env_belong_to(game.env)
game.env.attacker.set_env_belong_to(game.env)
# make no sense
env.defender.set_env_belong_to(env)
env.attacker.set_env_belong_to(env)
# uniform strategy has been produced ahead of time
print("epoch 1:", datetime.datetime.now())
epoch = 1
act_att = 'att_str_epoch1.pkl'
act_def = 'def_str_epoch1.pkl'
game.add_att_str(act_att)
game.add_def_str(act_def)
print('Begin simulation for uniform strategy.')
# simulate using random strategies and initialize payoff matrix
# if MPI_flag:
# aReward, dReward = do_MPI_sim(act_att, act_def)
# else:
aReward, dReward = series_sim(game.env, game, act_att, act_def, game.num_episodes)
print('Done simulation for uniform strategy.')
game.init_payoffmatrix(dReward, aReward)
ne = {}
ne[0] = np.array([1], dtype=np.float32)
ne[1] = np.array([1], dtype=np.float32)
game.add_nasheq(epoch, ne)
# save a copy of game data
game_path = os.getcwd() + '/game_data/game.pkl'
fp.save_pkl(game, game_path)
sys.stdout.flush()
return game
# def EGTA(env, game, start_hado = 2, retrain=False, epoch = 1, game_path = os.getcwd() + '/game_data/game.pkl', MPI_flag = False):
def EGTA(env, game, start_hado=2, retrain=False, epoch=1, game_path=os.getcwd() + '/game_data/game.pkl'):
if retrain:
print("=======================================================")
print("==============Begin Running HADO-EGTA==================")
print("=======================================================")
else:
print("=======================================================")
print("===============Begin Running DO-EGTA===================")
print("=======================================================")
retrain_start = False
proc = psutil.Process(os.getpid())
count = 50
while count != 0:
# while True:
mem0 = proc.memory_info().rss
# fix opponent strategy
# mix_str_def = game.nasheq[epoch][0]
# mix_str_att = game.nasheq[epoch][1]
# Test mixed strategy
# rand = np.random.rand(len(game.nasheq[epoch][0]))
# mix_str_def = rand/np.sum(rand)
# mix_str_att = rand/np.sum(rand)
# Test first str
mix_str_def = np.zeros(len(game.nasheq[epoch][0]))
mix_str_def[0] = 1
mix_str_att = np.zeros(len(game.nasheq[epoch][1]))
mix_str_att[0] = 1
aPayoff, dPayoff = util.payoff_mixed_NE(game, epoch)
game.att_payoff.append(aPayoff)
game.def_payoff.append(dPayoff)
# increase epoch
epoch += 1
print("Current epoch is " + str(epoch))
print("epoch " + str(epoch) +':', datetime.datetime.now())
# train and save RL agents
if retrain and epoch > start_hado:
retrain_start = True
print("Begin training attacker......")
a_BD = training.training_att(game, mix_str_def, epoch, retrain=retrain_start)
print("Attacker training done......")
print("Begin training defender......")
d_BD = training.training_def(game, mix_str_att, epoch, retrain=retrain_start)
print("Defender training done......")
mem1 = proc.memory_info().rss
if retrain and epoch > start_hado:
print("Begin retraining attacker......")
training.training_hado_att(game)
print("Attacker retraining done......")
print("Begin retraining defender......")
training.training_hado_def(game)
print("Defender retraining done......")
# Simulation for retrained strategies and choose the best one as player's strategy.
print('Begin retrained sim......')
a_BD, d_BD = sim_retrain(env, game, mix_str_att, mix_str_def, epoch)
print('Done retrained sim......')
game.att_BD_list.append(a_BD)
game.def_BD_list.append(d_BD)
# else:
#
# # Judge beneficial deviation
# # one plays nn and another plays ne strategy
# print("Simulating attacker payoff. New strategy vs. mixed opponent strategy.")
# nn_att = "att_str_epoch" + str(epoch) + ".pkl"
# nn_def = mix_str_def
# # if MPI_flag:
# # a_BD, _ = do_MPI_sim(nn_att, nn_def)
# # else:
# a_BD, _ = series_sim(env, game, nn_att, nn_def, game.num_episodes)
# print("Simulation done for a_BD.")
#
# print("Simulating defender's payoff. New strategy vs. mixed opponent strategy.")
# nn_att = mix_str_att
# nn_def = "def_str_epoch" + str(epoch) + ".pkl"
# # if MPI_flag:
# # _, d_BD = do_MPI_sim(nn_att, nn_def)
# # else:
# _, d_BD = series_sim(env, game, nn_att, nn_def, game.num_episodes)
# print("Simulation done for d_BD.")
mem2 = proc.memory_info().rss
# #TODO: This may lead to early stop.
# if a_BD - aPayoff < game.threshold and d_BD - dPayoff < game.threshold:
# print("*************************")
# print("aPayoff=", aPayoff, " ", "dPayoff=", dPayoff)
# print("a_BD=", a_BD, " ", "d_BD=", d_BD)
# print("*************************")
# break
#
game.add_att_str("att_str_epoch" + str(epoch) + ".pkl")
game.add_def_str("def_str_epoch" + str(epoch) + ".pkl")
# simulate and extend the payoff matrix.
# game = sim_Series.sim_and_modifiy_Series_with_game(game, MPI_flag=MPI_flag)
game = sim_Series.sim_and_modifiy_Series_with_game(game)
mem3 = proc.memory_info().rss
#
# find nash equilibrium using gambit analysis
payoffmatrix_def = game.payoffmatrix_def
payoffmatrix_att = game.payoffmatrix_att
print("Begin Gambit analysis.")
nash_att, nash_def = ga.do_gambit_analysis(payoffmatrix_def, payoffmatrix_att)
ga.add_new_NE(game, nash_att, nash_def, epoch)
game.env.attacker.nn_att = None
game.env.defender.nn_def = None
fp.save_pkl(game, game_path)
print('a_BD_list', game.att_BD_list)
print('aPayoff', game.att_payoff)
print('d_BD_list', game.def_BD_list)
print('dPayoff', game.def_payoff)
print("Round_" + str(epoch) + " has done and game was saved.")
print("=======================================================")
# break
print("MEM:",(mem1 - mem0) / mem0, (mem2 - mem0) / mem0, (mem3 - mem0) / mem0)
count -= 1
sys.stdout.flush() #TODO: make sure this is correct.
print("END: " + str(epoch))
os._exit(os.EX_OK)
def EGTA_restart(restart_epoch, start_hado = 2, retrain=False, game_path = os.getcwd() + '/game_data/game.pkl'):
if retrain:
print("=======================================================")
print("============Continue Running HADO-EGTA=================")
print("=======================================================")
else:
print("=======================================================")
print("=============Continue Running DO-EGTA==================")
print("=======================================================")
epoch = restart_epoch - 1
game = fp.load_pkl(game_path)
env = game.env
retrain_start = False
count = 8 - restart_epoch
while count != 0:
# while True:
# fix opponent strategy
# mix_str_def = game.nasheq[epoch][0]
# mix_str_att = game.nasheq[epoch][1]
mix_str_def = np.zeros(len(game.nasheq[epoch][0]))
mix_str_def[0] = 1
mix_str_att = np.zeros(len(game.nasheq[epoch][1]))
mix_str_att[0] = 1
aPayoff, dPayoff = util.payoff_mixed_NE(game, epoch)
game.att_payoff.append(aPayoff)
game.def_payoff.append(dPayoff)
# increase epoch
epoch += 1
print("Current epoch is " + str(epoch))
print("epoch " + str(epoch) +':', datetime.datetime.now())
# train and save RL agents
if retrain and epoch > start_hado:
retrain_start = True
print("Begin training attacker......")
a_BD = training.training_att(game, mix_str_def, epoch, retrain=retrain_start)
print("Attacker training done......")
print("Begin training defender......")
d_BD = training.training_def(game, mix_str_att, epoch, retrain=retrain_start)
print("Defender training done......")
if retrain and epoch > start_hado:
print("Begin retraining attacker......")
training.training_hado_att(game)
print("Attacker retraining done......")
print("Begin retraining defender......")
training.training_hado_def(game)
print("Defender retraining done......")
# Simulation for retrained strategies and choose the best one as player's strategy.
print('Begin retrained sim......')
a_BD, d_BD = sim_retrain(env, game, mix_str_att, mix_str_def, epoch)
print('Done retrained sim......')
game.att_BD_list.append(a_BD)
game.def_BD_list.append(d_BD)
# else:
#
# # Judge beneficial deviation
# # one plays nn and another plays ne strategy
# print("Simulating attacker payoff. New strategy vs. mixed opponent strategy.")
# nn_att = "att_str_epoch" + str(epoch) + ".pkl"
# nn_def = mix_str_def
# # if MPI_flag:
# # a_BD, _ = do_MPI_sim(nn_att, nn_def)
# # else:
# a_BD, _ = series_sim(env, game, nn_att, nn_def, game.num_episodes)
# print('a_BD is ', a_BD)
# print("Simulation done for a_BD.")
#
# print("Simulating defender's payoff. New strategy vs. mixed opponent strategy.")
# nn_att = mix_str_att
# nn_def = "def_str_epoch" + str(epoch) + ".pkl"
# # if MPI_flag:
# # _, d_BD = do_MPI_sim(nn_att, nn_def)
# # else:
# _, d_BD = series_sim(env, game, nn_att, nn_def, game.num_episodes)
# print('d_BD is ', d_BD)
# print("Simulation done for d_BD.")
# #TODO: This may lead to early stop.
# if a_BD - aPayoff < game.threshold and d_BD - dPayoff < game.threshold:
# print("*************************")
# print("aPayoff=", aPayoff, " ", "dPayoff=", dPayoff)
# print("a_BD=", a_BD, " ", "d_BD=", d_BD)
# print("*************************")
# break
#
game.add_att_str("att_str_epoch" + str(epoch) + ".pkl")
game.add_def_str("def_str_epoch" + str(epoch) + ".pkl")
# simulate and extend the payoff matrix.
game = sim_Series.sim_and_modifiy_Series_with_game(game)
#
# find nash equilibrium using gambit analysis
payoffmatrix_def = game.payoffmatrix_def
payoffmatrix_att = game.payoffmatrix_att
print("Begin Gambit analysis.")
nash_att, nash_def = ga.do_gambit_analysis(payoffmatrix_def, payoffmatrix_att)
ga.add_new_NE(game, nash_att, nash_def, epoch)
game.env.attacker.nn_att = None
game.env.defender.nn_def = None
fp.save_pkl(game, game_path)
print('a_BD_list', game.att_BD_list)
print('aPayoff', game.att_payoff)
print('d_BD_list', game.def_BD_list)
print('dPayoff', game.def_payoff)
print("Round_" + str(epoch) + " has done and game was saved.")
print("=======================================================")
# break
count -= 1
sys.stdout.flush() #TODO: make sure this is correct.
print("END EPOCH: " + str(epoch))
print(datetime.datetime.now())
# os._exit(os.EX_OK)
if __name__ == '__main__':
warnings.filterwarnings("ignore")
game = initialize(load_env='run_env_B', env_name=None)
# game = initialize(load_env='run_env_sep_AND', env_name=None)
# game = initialize(load_env='run_env_modular', env_name=None)
# EGTA(env, game, retrain=True)
EGTA(game.env, game, retrain=False)
# EGTA_restart(restart_epoch=4)
| 36.777215 | 131 | 0.572589 |
c1d54b47ce039b23baad7aec7e8f9e0ddb40052c | 3,389 | py | Python | tests/test_360_heurist.py | vbsoftpl/libdyson | 70d890259fe7baaa106f8c3302cd42077697ddd7 | [
"Apache-2.0"
] | 28 | 2021-02-11T22:44:01.000Z | 2022-03-28T22:29:35.000Z | tests/test_360_heurist.py | vbsoftpl/libdyson | 70d890259fe7baaa106f8c3302cd42077697ddd7 | [
"Apache-2.0"
] | 20 | 2021-03-07T15:18:58.000Z | 2022-03-07T13:47:30.000Z | tests/test_360_heurist.py | vbsoftpl/libdyson | 70d890259fe7baaa106f8c3302cd42077697ddd7 | [
"Apache-2.0"
] | 25 | 2021-02-13T14:55:43.000Z | 2022-03-29T22:27:55.000Z | """Tests for Dyson 360 Heurist vacuum."""
from unittest.mock import patch
import pytest
from libdyson import (
DEVICE_TYPE_360_HEURIST,
CleaningMode,
Dyson360Heurist,
VacuumHeuristPowerMode,
)
from . import CREDENTIAL, HOST, SERIAL
from .mocked_mqtt import MockedMQTT
STATUS = {
"currentVacuumPowerMode": "1",
"defaultVacuumPowerMode": "2",
"currentCleaningMode": "zoneConfigured",
"defaultCleaningMode": "global",
}
@pytest.fixture(autouse=True)
def mqtt_client() -> MockedMQTT:
"""Return mocked mqtt client."""
mocked_mqtt = MockedMQTT(
HOST,
SERIAL,
CREDENTIAL,
f"{DEVICE_TYPE_360_HEURIST}/{SERIAL}/command",
f"{DEVICE_TYPE_360_HEURIST}/{SERIAL}/status",
STATUS,
)
with patch("libdyson.dyson_device.mqtt.Client", mocked_mqtt.refersh):
yield mocked_mqtt
def test_properties(mqtt_client: MockedMQTT):
"""Test properties of 360 Heurist."""
device = Dyson360Heurist(SERIAL, CREDENTIAL)
device.connect(HOST)
assert device.current_power_mode == VacuumHeuristPowerMode.QUIET
assert device.default_power_mode == VacuumHeuristPowerMode.HIGH
assert device.current_cleaning_mode == CleaningMode.ZONE_CONFIGURED
assert device.default_cleaning_mode == CleaningMode.GLOBAL
assert device.is_bin_full is False
new_status = {
"currentVacuumPowerMode": "2",
"defaultVacuumPowerMode": "3",
"currentCleaningMode": "global",
"defaultCleaningMode": "zoneConfigured",
"faults": {
"AIRWAYS": {"active": True, "description": "1.0.-1"},
},
}
mqtt_client.state_change(new_status)
assert device.current_power_mode == VacuumHeuristPowerMode.HIGH
assert device.default_power_mode == VacuumHeuristPowerMode.MAX
assert device.current_cleaning_mode == CleaningMode.GLOBAL
assert device.default_cleaning_mode == CleaningMode.ZONE_CONFIGURED
assert device.is_bin_full is True
@pytest.mark.parametrize(
"command,command_args,msg,msg_data",
[
(
"start_all_zones",
[],
"START",
{"cleaningMode": "global", "fullCleanType": "immediate"},
),
("pause", [], "PAUSE", {}),
("resume", [], "RESUME", {}),
("abort", [], "ABORT", {}),
(
"set_default_power_mode",
[VacuumHeuristPowerMode.QUIET],
"STATE-SET",
{"defaults": {"defaultVacuumPowerMode": "1"}},
),
(
"set_default_power_mode",
[VacuumHeuristPowerMode.HIGH],
"STATE-SET",
{"defaults": {"defaultVacuumPowerMode": "2"}},
),
(
"set_default_power_mode",
[VacuumHeuristPowerMode.MAX],
"STATE-SET",
{"defaults": {"defaultVacuumPowerMode": "3"}},
),
],
)
def test_command(
mqtt_client: MockedMQTT, command: str, command_args: list, msg: str, msg_data: dict
):
"""Test commands of 360 Heurist."""
device = Dyson360Heurist(SERIAL, CREDENTIAL)
device.connect(HOST)
func = getattr(device, command)
func(*command_args)
len(mqtt_client.commands) == 1
payload = mqtt_client.commands[0]
assert payload.pop("msg") == msg
assert payload.pop("mode-reason") == "LAPP"
payload.pop("time")
assert payload == msg_data
| 29.99115 | 87 | 0.63116 |
5382c244a7aac54a38a76d805231dd0cca37d454 | 35 | py | Python | src/lib/versionedLib/versions/ver_1_001/__init__.py | kvelon/neural-ode | d12ed53ca3366d2b5a898639e0cac04714c5285d | [
"MIT"
] | 23 | 2021-02-25T13:59:13.000Z | 2022-03-18T17:41:57.000Z | src/lib/versionedLib/versions/ver_1_001/__init__.py | kvelon/neural-ode | d12ed53ca3366d2b5a898639e0cac04714c5285d | [
"MIT"
] | 18 | 2020-03-24T17:41:36.000Z | 2022-03-12T00:13:39.000Z | src/lib/versionedLib/versions/ver_1_001/__init__.py | kvelon/neural-ode | d12ed53ca3366d2b5a898639e0cac04714c5285d | [
"MIT"
] | 8 | 2021-07-19T12:23:00.000Z | 2022-03-18T17:42:05.000Z | '''version 1.001 of the library
''' | 17.5 | 31 | 0.657143 |
47fd77b03a3dacc7ec57ca69bba03a90d37c7929 | 1,580 | py | Python | pytest_localstack/hookspecs.py | krigar1184/pytest-localstack | 47a69973c3efb8b469017f7bae9e72405aaeef98 | [
"MIT"
] | null | null | null | pytest_localstack/hookspecs.py | krigar1184/pytest-localstack | 47a69973c3efb8b469017f7bae9e72405aaeef98 | [
"MIT"
] | null | null | null | pytest_localstack/hookspecs.py | krigar1184/pytest-localstack | 47a69973c3efb8b469017f7bae9e72405aaeef98 | [
"MIT"
] | null | null | null | """
Much like `pytest <https://pytest.readthedocs.io/en/latest/writing_plugins.html>`_,
itself, pytest-localstack uses `pluggy <https://github.com/pytest-dev/pluggy>`_
to implement a plugin system. These plugins can be used to add additional
functionality to pytest-localstack and to trigger callbacks when the
Localstack container is started and stopped.
"""
import pluggy
pytest_localstack_hookspec = pluggy.HookspecMarker("pytest-localstack")
pytest_localstack_hookimpl = pluggy.HookimplMarker("pytest-localstack")
@pytest_localstack_hookspec(historic=True)
def contribute_to_module(pytest_localstack):
"""
Hook to add additional functionality to the :mod:`pytest_localstack`
module.
Primarially used to add importable fixture factories at a top level.
"""
@pytest_localstack_hookspec
def contribute_to_session(session):
"""Hook to add additional functionality to :class:`LocalstackSession`.
Primarially used to add test resource factories to sessions.
See :mod:`pytest_localstack.contrib.botocore` for an example of that.
"""
@pytest_localstack_hookspec
def session_starting(session):
"""Hook fired when :class:`LocalstackSession` is starting."""
@pytest_localstack_hookspec
def session_started(session):
"""Hook fired when :class:`LocalstackSession` has started."""
@pytest_localstack_hookspec
def session_stopping(session):
"""Hook fired when :class:`LocalstackSession` is stopping."""
@pytest_localstack_hookspec
def session_stopped(session):
"""Hook fired when :class:`LocalstackSession` has stopped."""
| 30.384615 | 83 | 0.775316 |
7ed26311a76a012545d11b65ec2f61dca60adea7 | 14,402 | py | Python | venv/lib/python3.5/site-packages/yapftests/reformatter_facebook_test.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/yapftests/reformatter_facebook_test.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/yapftests/reformatter_facebook_test.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Facebook tests for yapf.reformatter."""
import textwrap
import unittest
from yapf.yapflib import reformatter
from yapf.yapflib import style
from yapftests import yapf_test_helper
class TestsForFacebookStyle(yapf_test_helper.YAPFTest):
@classmethod
def setUpClass(cls):
style.SetGlobalStyle(style.CreateFacebookStyle())
def testNoNeedForLineBreaks(self):
unformatted_code = textwrap.dedent("""\
def overly_long_function_name(
just_one_arg, **kwargs):
pass
""")
expected_formatted_code = textwrap.dedent("""\
def overly_long_function_name(just_one_arg, **kwargs):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testDedentClosingBracket(self):
unformatted_code = textwrap.dedent("""\
def overly_long_function_name(
first_argument_on_the_same_line,
second_argument_makes_the_line_too_long):
pass
""")
expected_formatted_code = textwrap.dedent("""\
def overly_long_function_name(
first_argument_on_the_same_line, second_argument_makes_the_line_too_long
):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testBreakAfterOpeningBracketIfContentsTooBig(self):
unformatted_code = textwrap.dedent("""\
def overly_long_function_name(a, b, c, d, e, f, g, h, i, j, k, l, m,
n, o, p, q, r, s, t, u, v, w, x, y, z):
pass
""")
expected_formatted_code = textwrap.dedent("""\
def overly_long_function_name(
a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, \
v, w, x, y, z
):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testDedentClosingBracketWithComments(self):
unformatted_code = textwrap.dedent("""\
def overly_long_function_name(
# comment about the first argument
first_argument_with_a_very_long_name_or_so,
# comment about the second argument
second_argument_makes_the_line_too_long):
pass
""")
expected_formatted_code = textwrap.dedent("""\
def overly_long_function_name(
# comment about the first argument
first_argument_with_a_very_long_name_or_so,
# comment about the second argument
second_argument_makes_the_line_too_long
):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testDedentImportAsNames(self):
code = textwrap.dedent("""\
from module import (
internal_function as function,
SOME_CONSTANT_NUMBER1,
SOME_CONSTANT_NUMBER2,
SOME_CONSTANT_NUMBER3,
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testDedentTestListGexp(self):
# TODO(ambv): Arguably _DetermineMustSplitAnnotation shouldn't enforce
# breaks only on the basis of a trailing comma if the entire thing fits
# in a single line.
code = textwrap.dedent("""\
try:
pass
except (
IOError, OSError, LookupError, RuntimeError, OverflowError
) as exception:
pass
try:
pass
except (
IOError,
OSError,
LookupError,
RuntimeError,
OverflowError,
) as exception:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testBrokenIdempotency(self):
# TODO(ambv): The following behaviour should be fixed.
pass0_code = textwrap.dedent("""\
try:
pass
except (IOError, OSError, LookupError, RuntimeError, OverflowError) as exception:
pass
""")
pass1_code = textwrap.dedent("""\
try:
pass
except (
IOError, OSError, LookupError, RuntimeError, OverflowError
) as exception:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(pass0_code)
self.assertCodeEqual(pass1_code, reformatter.Reformat(uwlines))
pass2_code = textwrap.dedent("""\
try:
pass
except (
IOError, OSError, LookupError, RuntimeError, OverflowError
) as exception:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(pass1_code)
self.assertCodeEqual(pass2_code, reformatter.Reformat(uwlines))
def testIfExprHangingIndent(self):
unformatted_code = textwrap.dedent("""\
if True:
if True:
if True:
if not self.frobbies and (
self.foobars.counters['db.cheeses'] != 1 or
self.foobars.counters['db.marshmellow_skins'] != 1):
pass
""")
expected_formatted_code = textwrap.dedent("""\
if True:
if True:
if True:
if not self.frobbies and (
self.foobars.counters['db.cheeses'] != 1 or
self.foobars.counters['db.marshmellow_skins'] != 1
):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testSimpleDedenting(self):
unformatted_code = textwrap.dedent("""\
if True:
self.assertEqual(result.reason_not_added, "current preflight is still running")
""")
expected_formatted_code = textwrap.dedent("""\
if True:
self.assertEqual(
result.reason_not_added, "current preflight is still running"
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testDedentingWithSubscripts(self):
unformatted_code = textwrap.dedent("""\
class Foo:
class Bar:
@classmethod
def baz(cls, clues_list, effect, constraints, constraint_manager):
if clues_lists:
return cls.single_constraint_not(clues_lists, effect, constraints[0], constraint_manager)
""")
expected_formatted_code = textwrap.dedent("""\
class Foo:
class Bar:
@classmethod
def baz(cls, clues_list, effect, constraints, constraint_manager):
if clues_lists:
return cls.single_constraint_not(
clues_lists, effect, constraints[0], constraint_manager
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testDedentingCallsWithInnerLists(self):
code = textwrap.dedent("""\
class _():
def _():
cls.effect_clues = {
'effect': Clue((cls.effect_time, 'apache_host'), effect_line, 40)
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testDedentingListComprehension(self):
unformatted_code = textwrap.dedent("""\
class Foo():
def _pack_results_for_constraint_or():
self.param_groups = dict(
(
key + 1, ParamGroup(groups[key], default_converter)
) for key in six.moves.range(len(groups))
)
for combination in cls._clues_combinations(clues_lists):
if all(
cls._verify_constraint(combination, effect, constraint)
for constraint in constraints
):
pass
guessed_dict = dict(
(
key, guessed_pattern_matches[key]
) for key in six.moves.range(len(guessed_pattern_matches))
)
content = "".join(
itertools.chain(
(first_line_fragment, ), lines_between, (last_line_fragment, )
)
)
rule = Rule(
[self.cause1, self.cause2, self.cause1, self.cause2], self.effect, constraints1,
Rule.LINKAGE_AND
)
assert sorted(log_type.files_to_parse) == [
('localhost', os.path.join(path, 'node_1.log'), super_parser),
('localhost', os.path.join(path, 'node_2.log'), super_parser)
]
""")
expected_formatted_code = textwrap.dedent("""\
class Foo():
def _pack_results_for_constraint_or():
self.param_groups = dict(
(key + 1, ParamGroup(groups[key], default_converter))
for key in six.moves.range(len(groups))
)
for combination in cls._clues_combinations(clues_lists):
if all(
cls._verify_constraint(combination, effect, constraint)
for constraint in constraints
):
pass
guessed_dict = dict(
(key, guessed_pattern_matches[key])
for key in six.moves.range(len(guessed_pattern_matches))
)
content = "".join(
itertools.chain(
(first_line_fragment, ), lines_between, (last_line_fragment, )
)
)
rule = Rule(
[self.cause1, self.cause2, self.cause1, self.cause2], self.effect,
constraints1, Rule.LINKAGE_AND
)
assert sorted(log_type.files_to_parse) == [
('localhost', os.path.join(path, 'node_1.log'), super_parser),
('localhost', os.path.join(path, 'node_2.log'), super_parser)
]
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testMustSplitDedenting(self):
code = textwrap.dedent("""\
class _():
def _():
effect_line = FrontInput(
effect_line_offset, line_content,
LineSource('localhost', xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testDedentIfConditional(self):
code = textwrap.dedent("""\
class _():
def _():
if True:
if not self.frobbies and (
self.foobars.counters['db.cheeses'] != 1 or
self.foobars.counters['db.marshmellow_skins'] != 1
):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testDedentSet(self):
code = textwrap.dedent("""\
class _():
def _():
assert set(self.constraint_links.get_links()) == set(
[
(2, 10, 100),
(2, 10, 200),
(2, 20, 100),
(2, 20, 200),
]
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testDedentingInnerScope(self):
code = textwrap.dedent("""\
class Foo():
@classmethod
def _pack_results_for_constraint_or(cls, combination, constraints):
return cls._create_investigation_result(
(clue for clue in combination if not clue == Verifier.UNMATCHED),
constraints, InvestigationResult.OR
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
reformatted_code = reformatter.Reformat(uwlines)
self.assertCodeEqual(code, reformatted_code)
uwlines = yapf_test_helper.ParseAndUnwrap(reformatted_code)
reformatted_code = reformatter.Reformat(uwlines)
self.assertCodeEqual(code, reformatted_code)
def testCommentWithNewlinesInPrefix(self):
unformatted_code = textwrap.dedent("""\
def foo():
if 0:
return False
#a deadly comment
elif 1:
return True
print(foo())
""")
expected_formatted_code = textwrap.dedent("""\
def foo():
if 0:
return False
#a deadly comment
elif 1:
return True
print(foo())
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
if __name__ == '__main__':
unittest.main()
| 35.472906 | 112 | 0.576864 |
45c08ac3f82f4121983d8e8773fbeedf3f501cdc | 6,489 | py | Python | spytest/tests/security/test_security_save_reboot.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | 1 | 2021-09-15T17:09:13.000Z | 2021-09-15T17:09:13.000Z | spytest/tests/security/test_security_save_reboot.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | 1 | 2020-02-05T16:51:53.000Z | 2020-02-05T16:51:53.000Z | spytest/tests/security/test_security_save_reboot.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | null | null | null | import pytest
from spytest import st
from spytest.dicts import SpyTestDict
import apis.security.radius as radius
import apis.security.tacacs as security
from utilities.utils import ensure_service_params
import apis.system.reboot as reboot
import apis.security.tacacs as tacacs
import apis.system.switch_configuration as switchconf
security_data = SpyTestDict()
@pytest.fixture(scope="module", autouse=True)
def security_module_hooks(request):
global vars
vars = st.ensure_min_topology("D1")
security_variables()
security_module_prolog()
yield
security_module_epilog()
@pytest.fixture(scope="function", autouse=True)
def security_func_hooks(request):
yield
def security_variables():
security_data.clear()
security_data.hosts = ensure_service_params(vars.D1, "radius", "hosts")
security_data.radius_host_ip = ensure_service_params(vars.D1, "radius", "hosts", 0, "ip")
security_data.radius_host_passkey = ensure_service_params(vars.D1, "radius", "hosts", 0, "passkey")
security_data.radius_host_priority = ensure_service_params(vars.D1, "radius", "hosts", 0, "priority")
security_data.global_diff_passkey = ensure_service_params(vars.D1, "radius", "globals", 1, "passkey")
security_data.global_auth_type = ensure_service_params(vars.D1, "radius", "globals", 0, "auth_type")
security_data.global_timeout = ensure_service_params(vars.D1, "radius", "globals", 0, "timeout")
security_data.global_retransmit = ensure_service_params(vars.D1, "radius", "globals", 0, "retransmit")
security_data.tacacs_host_ip = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "ip")
security_data.tacacs_tcp_port = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "tcp_port")
security_data.tacacs_passkey = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "passkey")
security_data.tacacs_timeout = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "timeout")
security_data.tacacs_priority = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "priority")
security_data.tacacs_auth_type = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "auth_type")
def security_module_prolog():
tacacs_config()
tacacs_config_verify()
config_global_radius()
radius_config()
st.log("Verifying radius server details before save-reboot")
checking_radius_config(security_data.radius_host_ip)
def security_module_epilog():
radius.config_server(vars.D1, ip_address=security_data.radius_host_ip, action="delete", cli_type="click")
radius.config_global_server_params(vars.D1, skip_error_check=False,
params={"key": {"value": security_data.global_diff_passkey, "no_form": True},
"timeout": {"value": security_data.global_timeout, "no_form": True},
"auth_type": {"value": security_data.global_auth_type, "no_form": True},
"retransmit": {"value": security_data.global_retransmit,
"no_form": True}},
cli_type="click")
tacacs.set_tacacs_server(vars.D1, 'delete', security_data.tacacs_host_ip)
def verify_security_default_config(dut):
if not security.verify_aaa(dut, 'local (default)', 'False (default)'):
st.report_fail("authentication_default_configs_fail")
def config_global_radius():
if not radius.config_global_server_params(vars.D1, skip_error_check=False,
params={"key": {"value": security_data.global_diff_passkey},
"auth_type": {"value": security_data.global_auth_type},
"timeout": {"value": security_data.global_timeout},
"retransmit": {"value": security_data.global_retransmit}},
cli_type="click"):
st.report_fail("security_global_params_config_failed")
def radius_config():
radius.config_server(vars.D1, ip_address=security_data.radius_host_ip, key=security_data.radius_host_passkey,
priority=security_data.radius_host_priority, action="add", cli_type="click")
def checking_radius_config(ip):
st.log("Checking Radius server config after save and reboot")
if not radius.verify_config(vars.D1, params={"globals": {"global_retransmit": security_data.global_retransmit,
"global_timeout": security_data.global_timeout,
"global_passkey": security_data.global_diff_passkey},
"servers": [{'priority': security_data.radius_host_priority, 'address': ip,
'passkey': security_data.radius_host_passkey}]},
cli_type="click"):
st.report_fail("radius_server_config_not_successful")
else:
st.log("Radius configuration successful")
def tacacs_config():
tacacs.set_tacacs_server(vars.D1, 'add', security_data.tacacs_host_ip, security_data.tacacs_tcp_port,
security_data.tacacs_timeout, security_data.tacacs_passkey,
security_data.tacacs_auth_type, security_data.tacacs_priority)
def tacacs_config_verify():
st.log("Checking whether config is loaded to running config from config_db after save-reboot")
if not switchconf.verify_running_config(vars.D1, "TACPLUS_SERVER", security_data.tacacs_host_ip, "priority", "1"):
st.report_fail("running_config_failed", vars.D1, "TACPLUS_SERVER", security_data.tacacs_host_ip, "priority",
"1")
else:
st.log("tacacs server configuration is successful")
@pytest.mark.savereboot
def test_ft_security_config_mgmt_verifying_config_with_save_reboot():
'''
Author: Sai Durga <pchvsai.durga@broadcom.com>
FtOpSoScRaFn006: Verify that radius config retained after config save and reboot
'''
st.log("performing Config save and reloading the device")
reboot.config_save_reload(vars.D1)
tacacs_config_verify()
checking_radius_config(security_data.radius_host_ip)
st.report_pass("security_config_retained_after_save_reboot") | 51.5 | 124 | 0.661735 |
7d63d591f92d42a87bcb20b0ec1ac426a5cdcc34 | 14,598 | py | Python | scripts/gisaid_to_ena.py | enasequence/ena-content-dataflow | 4024f618bf7917e3c130563629a7d425fb076be3 | [
"Apache-2.0"
] | 1 | 2021-07-26T12:44:18.000Z | 2021-07-26T12:44:18.000Z | scripts/gisaid_to_ena.py | enasequence/ena-content-dataflow | 4024f618bf7917e3c130563629a7d425fb076be3 | [
"Apache-2.0"
] | 6 | 2021-05-21T10:15:58.000Z | 2022-01-12T18:36:24.000Z | scripts/gisaid_to_ena.py | enasequence/ena-content-dataflow | 4024f618bf7917e3c130563629a7d425fb076be3 | [
"Apache-2.0"
] | 4 | 2020-09-08T15:05:25.000Z | 2021-05-25T11:11:33.000Z | # Copyright [2020-2021] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, re, argparse, requests
import pandas as pd
import pycountry
import csv
from yattag import Doc, indent
doc, tag, text = Doc().tagtext()
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
ena_fields = [
'sample_alias', 'tax_id', 'scientific_name', 'common_name',
'sample_title', 'sample_description', 'collection date',
'geographic location (country and/or sea)', 'geographic location (region and locality)',
'sample capture status', 'host common name', 'host subject id', 'host age',
'host health state', 'host sex', 'host scientific name', 'virus identifier',
'collector name', 'collecting institution', 'isolate', 'isolation source host-associated',
'gisaid_accession_id'
]
'''
Mapping the GISAID metadata headers to ENA metadata headers form an external file
'''
def gisaid_spreadsheet_mapping(opts):
map_df = pd.read_csv(opts.map, sep="\t", header=0) #Attention, the headers in the GISAID column should be unique,
for item in map_df.set_index('GISAID').T.to_dict('records'):
gisaid_to_ena = {k: v for k, v in item.items()}
return gisaid_to_ena
default_sheet = 'ENA Submission'
"""
Handle command line arguments & helptext
"""
def parse_args(args):
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(
description="""
description:
Script to convert GISAID metadata spreadsheets into ENA formatted ones.
Can handle XLS/CSV input, or any format handled by the pandas.read_excel method
(https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_excel.html)
Output format can be in Microsoft Excel format (.xlsx) for interactive submissions,
or in XML format for programmatic submissions.
examples:
# convert GISAID spreadsheet in CSV format to ENA in excel format
gisaid_to_ena.py --csv gisaid.csv --outfile ena.xlsx --outformat excel
# convert GISAID metadata from sheet called 'Samples' to ENA spreadsheet
gisaid_to_ena.py --xls gisaid.xlsx --sheet Samples --outfile ena.xml --outformat xml
# convert using a custom metadata mapping file
gisaid_to_ena.py --xls gisaid.xlsx --outfile ena.xml --outformat xml --map path/to/mapping.tsv
""",
formatter_class=RawTextHelpFormatter
)
parser.add_argument('--csv', help="path to GISAID formatted CSV file")
parser.add_argument('--xls', help="path to GISAID formatted XLS file")
parser.add_argument('--sheet', help=f"(optional) name of excel sheet (default: 'Submissions')")
parser.add_argument('--outfile', help="output file name")
parser.add_argument('--taxon', help="taxon name or id of samples")
parser.add_argument('--map', help='the path for custom mapping file between GISAID and ENA headers (default: metadata_mapping.tsv)', type=str, default='{}/metadata_mapping.tsv'.format(os.path.abspath(os.path.dirname(__file__))))
parser.add_argument('--outformat', help='specify between xml or excel', type=str, required=True)
opts = parser.parse_args(sys.argv[1:])
return opts
"""
Read GISAID spreadsheet into pandas dataframe
"""
def parse_gisaid_metadata(opts):
gisaid_df = ''
if opts.csv:
gisaid_df = pd.read_csv(opts.csv)
elif opts.xls:
# 'Submissions' is the name of the sheet in the GISAID template
sheet = opts.sheet if opts.sheet else 'Submissions'
gisaid_df = pd.read_excel(opts.xls, sheet_name=sheet)
else:
sys.stderr.write("Must provide either --csv or --xls\n\n")
sys.exit(1)
# gisaid sheets have 2 headers - drop second one
gisaid_df = gisaid_df.drop([0])
return gisaid_df
"""
Convert the metadata fields using the GISAID->ENA mapping
"""
def convert_gisaid_to_ena(gisaid_df, gisaid_to_ena):
ena_data = {}
# is additional location info provided or should we infer some from `covv_location`?
infer_add_loc = True if pd.isnull(gisaid_df.at[1, 'covv_add_location']) else False
for gisaid_field in gisaid_df:
if gisaid_field == 'covv_add_location' and infer_add_loc:
continue
try:
ena_field = gisaid_to_ena[gisaid_field]
if gisaid_field == 'covv_location':
geo_info = [ extract_geographic_info(gl) for gl in list(gisaid_df[gisaid_field]) ]
ena_data['geographic location (country and/or sea)'] = [g[0] for g in geo_info]
if infer_add_loc:
ena_data['geographic location (region and locality)'] = [g[1] for g in geo_info]
else:
ena_data[ena_field] = list(gisaid_df[gisaid_field])
except KeyError:
continue
ena_data = smart_fill(ena_data, gisaid_df)
return pd.DataFrame(ena_data)
"""
Autofill as much stuff as possible, replace bad
values, general tidy up of data.
"""
def smart_fill(ena_data,gisaid_df):
# need num of rows to autofill missing data
num_rows = len(ena_data['collection date'])
# add taxon info if given
ena_data = add_taxonomic_information(ena_data,gisaid_df)
# add standard capture status
ena_data['sample capture status'] = ['active surveillance in response to outbreak' for i in range(num_rows)]
# make sure all other fields are in the spreadsheet
for field in ena_fields:
if field not in ena_data:
ena_data[field] = [' ' for i in range(num_rows)]
ena_data = fix_missing_values(ena_data, num_rows)
return ena_data
"""
Fill 'not provided' in place of 'unknown', except for
'host age' ('not provided' is not accepted)
"""
def fix_missing_values(dataframe, num_rows):
# if host age is empty, remove it
# it only causes issues with webin interactive later
if _check_empty_list(dataframe['host age']):
del dataframe['host age']
ena_fields.remove('host age')
# otherwise, autofill empty/unknowns
for field in dataframe:
if 'unknown' in dataframe[field] or ' ' in dataframe[field]:
# don't autofill sample_title
if field != 'sample_title':
if field[-1] in ['tax_id', 'scientific_name','collection date', 'geographic location (country and/or sea)', 'sample_alias', 'host common name', 'host sex', 'collector name', 'collecting institution', 'host health state','sample_description']:
dataframe[field] = ['not provided' if v in ['unknown', ' '] else v for v in dataframe[field]]
else:
dataframe[field] = ['not provided' if v == 'unknown' else v for v in dataframe[field]]
return dataframe
def _check_empty_list(list):
list_empty = True
list = [' ' if l == 'unknown' else l for l in list]
for elem in list:
if re.search("^\S+$", elem):
list_empty = False
break
return list_empty
"""
Split out geographic info from GISAID location string
"""
def extract_geographic_info(location_str):
loc_parts = [l.strip() for l in location_str.split('/')]
for i in range(len(loc_parts)):
loc = loc_parts[i]
if pycountry.countries.get(name=loc):
return loc, ", ".join(loc_parts[i+1:])
# return input string if country can't be found
return location_str, None
#---------------------------------------#
# taxonomy methods #
#---------------------------------------#
"""
Add extra taxonomic information to a given dataframe
"""
def add_taxonomic_information(dataframe,gisaid_df):
num_rows = len(dataframe['collection date'])
if opts.taxon:
this_taxon_id = taxon_id(opts.taxon)
dataframe['tax_id'] = [this_taxon_id for i in range(num_rows)]
this_sci_name = scientific_name(opts.taxon)
dataframe['scientific_name'] = [this_sci_name for i in range(num_rows)]
# in case betacoronavirus is indicated in the spreadsheet and the --taxon is not specified
elif opts.taxon == None:
dataframe['tax_id'] =[]
dataframe['scientific_name']=[]
for tax in gisaid_df['covv_type']:
if tax == 'betacoronavirus':
this_taxon_id= '2697049'
this_sci_name = 'Severe acute respiratory syndrome coronavirus 2'
dataframe['tax_id'].append (this_taxon_id)
dataframe['scientific_name'].append(this_sci_name)
else:
dataframe['tax_id'].append('not provided')
dataframe['scientific_name'].append('not provided')
if dataframe['host common name']:
host_tax = set(dataframe['host common name'])
host_tax_dict= {}
for i in host_tax:
k= i
v = scientific_name(i)
host_tax_dict[k]= v
dataframe['host scientific name'] = []
for x in dataframe['host common name']:
if x in host_tax_dict:
dataframe['host scientific name'].append(host_tax_dict[x])
else:
dataframe['host scientific name'].append(scientific_name(x))
return dataframe
"""
Return id from taxonomy
"""
def taxon_id(taxon_name_or_id):
return taxonomy(taxon_name_or_id)['id']
"""
Return scientific name from taxonomy
"""
def scientific_name(taxon_name_or_id):
return taxonomy(taxon_name_or_id)['scientific_name']
"""
Query EnsEMBL taxonomy REST endpoint
"""
def taxonomy(id_or_name):
endpoint = f"http://rest.ensembl.org/taxonomy/id/{id_or_name}?"
r = requests.get(endpoint, headers={ "Content-Type" : "application/json"})
if not r.ok:
r.raise_for_status()
sys.exit()
decoded = r.json()
return decoded
#---------------------------------------#
# spreadsheet methods #
#---------------------------------------#
"""
Misc formatting of the spreadsheet
"""
def format_sheet(writer, headers):
workbook = writer.book
worksheet = writer.sheets[default_sheet]
fmt_orange = workbook.add_format({'bold':True, 'font_color': 'orange'})
fmt_black = workbook.add_format({'bold':True, 'font_color': 'black'})
# first, add and format the essential header rows
worksheet.write(0, 0, '#Checklist', fmt_orange)
#worksheet.write(1, 0, '#unique_name_prefix', fmt_orange)
worksheet.write(0, 1, 'ERC000033', fmt_black)
worksheet.write(0, 2, 'ENA virus pathogen reporting standard checklist', fmt_black)
worksheet.write(2, 0, '#units', fmt_orange)
# second, add headers and highlight mandatory ones
for i in range(len(headers)):
if headers[i][-1] in ['tax_id', 'scientific_name','collection date', 'geographic location (country and/or sea)', 'sample_alias', 'host common name', 'host sex', 'collector name', 'collecting institution', 'host health state', 'sample_title', 'sample_description']:
worksheet.write(1, i, headers[i][:-1], fmt_orange)
else:
worksheet.write(1, i, headers[i], fmt_black)
return writer
"""
Write pandas dataframe object to excel spreadsheet
"""
def write_dataframe(df, outfile):
out_suffix = outfile.split('.')[-1]
if opts.outformat.lower() in ['excel', 'xls', 'xlsx']:
writer = pd.ExcelWriter(outfile, engine='xlsxwriter')
df.to_excel(writer, index=False, columns=ena_fields, header=False, sheet_name=default_sheet, startrow=3, startcol=0)
writer = format_sheet(writer, ena_fields)
writer.save()
"""
Write pandas dataframe object to xml file
"""
def xml_generator (dataframe):
doc.asis(xml_header)
modified_ena_df = dataframe.where(pd.notnull(dataframe), None)
with tag('SAMPLE_SET'):
for item in modified_ena_df.to_dict('records'):
cleaned_item_dict = {k: v for k, v in item.items() if v not in [None, ' ']}
with tag('SAMPLE', alias=cleaned_item_dict['sample_alias']):
with tag('TITLE'):
text(cleaned_item_dict['sample_title'])
with tag('SAMPLE_NAME'):
with tag("TAXON_ID"):
text(cleaned_item_dict['tax_id'])
with tag("SCIENTIFIC_NAME"):
text(cleaned_item_dict['scientific_name'])
with tag("DESCRIPTION"):
text(cleaned_item_dict['sample_description'])
with tag('SAMPLE_ATTRIBUTES'):
for header, object in cleaned_item_dict.items():
if header not in ['sample_alias', 'sample_title', 'tax_id', 'scientific_name',
'sample_description']:
with tag("SAMPLE_ATTRIBUTE"):
with tag("TAG"):
text(header)
with tag("VALUE"):
text(object)
with tag("SAMPLE_ATTRIBUTE"):
with tag("TAG"):
text("ENA-CHECKLIST")
with tag("VALUE"):
text("ERC000033")
result = indent(
doc.getvalue(),
indent_text=False
)
with open(opts.outfile, "w") as f:
f.write(result)
#------------------------#
# MAIN #
#------------------------#
if __name__ == "__main__":
opts = parse_args(sys.argv[1:])
gisaid_to_ena = gisaid_spreadsheet_mapping(opts)
gisaid_dataframe = parse_gisaid_metadata(opts)
ena_dataframe = convert_gisaid_to_ena(gisaid_dataframe, gisaid_to_ena)
if opts.outformat.lower() == 'xml':
if opts.outfile == None:
opts.outfile = 'ENA_output.xml'
ena_dataframe_rearranged = ena_dataframe[ena_fields]
xml_generator(ena_dataframe_rearranged)
elif opts.outformat.lower() in ['excel','xls','xlsx']:
if opts.outfile == None:
opts.outfile = 'ENA_output.xlsx'
write_dataframe(ena_dataframe, opts.outfile)
else:
sys.stderr.write(f'The file format "{opts.outformat}" is not supported, accepted values : [xml, xls, xlsx, excel]')
| 37.818653 | 272 | 0.638855 |
da2aac1a8792dcdf4ed2e1ab5bfe53fbd1635f3b | 128 | py | Python | manage.py | rohit0110/NOTEify | 60f3e7f20798967ec7394d8a51c35d4c2f5b59f5 | [
"MIT"
] | null | null | null | manage.py | rohit0110/NOTEify | 60f3e7f20798967ec7394d8a51c35d4c2f5b59f5 | [
"MIT"
] | null | null | null | manage.py | rohit0110/NOTEify | 60f3e7f20798967ec7394d8a51c35d4c2f5b59f5 | [
"MIT"
] | null | null | null | from app import create_app
app = create_app()
if __name__ == "__main__":
app.run(port=5000, debug=True, host="127.0.0.1")
| 18.285714 | 52 | 0.679688 |
1df1c327b9ef5ffa730a7780377b9aa3dd6fba12 | 639 | py | Python | Backend/core/management/commands/populate_data.py | saksham1991999/Yoga-Pose-Analyser | b1e9c4abb4882e20dd3d5f1b2ffca5464a675529 | [
"MIT"
] | null | null | null | Backend/core/management/commands/populate_data.py | saksham1991999/Yoga-Pose-Analyser | b1e9c4abb4882e20dd3d5f1b2ffca5464a675529 | [
"MIT"
] | null | null | null | Backend/core/management/commands/populate_data.py | saksham1991999/Yoga-Pose-Analyser | b1e9c4abb4882e20dd3d5f1b2ffca5464a675529 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils.crypto import get_random_string
from faker import Faker
from core.management.commands.populate import (
accounts,
)
fake = Faker()
Faker.seed(999)
class Command(BaseCommand):
help = 'Populate the database'
def add_arguments(self, parser):
parser.add_argument('total', type=int, help='Indicates the number of users to be created')
@transaction.atomic
def handle(self, *args, **kwargs):
total = kwargs['total']
accounts.populate_users(total)
| 24.576923 | 98 | 0.733959 |
a8fee1003104c06224c8cd30ccdbf20f98301e54 | 1,929 | py | Python | src/oplog_watcher.py | anmolsachan/monotif | 46c4672cfff432bd3fafcf6fa3a3162efbd0f4d3 | [
"MIT"
] | null | null | null | src/oplog_watcher.py | anmolsachan/monotif | 46c4672cfff432bd3fafcf6fa3a3162efbd0f4d3 | [
"MIT"
] | null | null | null | src/oplog_watcher.py | anmolsachan/monotif | 46c4672cfff432bd3fafcf6fa3a3162efbd0f4d3 | [
"MIT"
] | null | null | null | from pymongo.cursor import _QUERY_OPTIONS
import pymongo
from oplog_config import host,port,changes_db,changes_collection,sleep
from pymongo import MongoClient
import time
from global_congif import watch_db,watch_collection
def doc_insertor(doc):
""" Function to insert documents caught by oplog watcher whose updates have to notified to the client """
updated_client=doc["ns"]
watch_client=watch_db+"."+watch_collection
if updated_client==watch_client:
try:
client = MongoClient(host,port)
db=client[changes_db]
col=db[changes_collection]
doc=doc["o"]
doc["status"] ="unnotified"
doc["doc_id"] = doc.pop("_id")
col.insert(doc)
client.close()
except Exception as e:
print "Mongo changed doc Insertion failed"
print e
def watcher():
""" Function to monotor oplog and detect changes and update into the collection which contains updated documents """
try:
client = MongoClient(host,port)
db=client["local"]
tail_opts = { 'tailable': True, 'await_data': True }
# get the latest timestamp in the database
last_ts = db.oplog.rs.find().sort('$natural', -1)[0]['ts']
print last_ts
while True:
query = { 'ts': { '$gt': last_ts } } #To get the latest timestamp from the oplog
cursor = db.oplog.rs.find(query, **tail_opts) #To get the docs which have been changed from the oplog
cursor.add_option(_QUERY_OPTIONS['oplog_replay'])
while cursor.alive:
try:
doc=cursor.next()
#print doc
doc_insertor(doc) #Inserting found docs into a saperate database for leter processing for notifications.
except StopIteration as e:
#print e
time.sleep(sleep)
except Exception as e:
print("Unable to run oplog_watcher") #This error will mostly come if either mongo is not running or replica set is not configured for oplog
print e
if __name__ == '__main__':
print "Starting oplog_watcher"
watcher()
| 35.722222 | 168 | 0.711249 |
ef3c724e093df4ed308dff33fe557d88e2531b27 | 486 | py | Python | settings.py | erigones/api_ipf | 2f75bce7b15a409b38c9a8ed32a0d7af27c589f6 | [
"BSD-3-Clause"
] | null | null | null | settings.py | erigones/api_ipf | 2f75bce7b15a409b38c9a8ed32a0d7af27c589f6 | [
"BSD-3-Clause"
] | null | null | null | settings.py | erigones/api_ipf | 2f75bce7b15a409b38c9a8ed32a0d7af27c589f6 | [
"BSD-3-Clause"
] | null | null | null | from eszone_ipf.settings import BASE_DIR
# Directory for storing configuration files.
CONF_DIR = ''.join([BASE_DIR, '/conf/'])
# Directory for storing logs.
LOG_DIR = ''.join([BASE_DIR, '/log/'])
# Directory that contains backup configuration files.
BCK_DIR = ''.join([BASE_DIR, '/api_ipf/bck/'])
# Warning in the ippool.conf configuration file
# for a recognition of a user defined ippool and IP blacklist.
CONF_WARNING = '#CONFIGURATION UNDER THIS LINE WILL BE DELETED AT UPDATE'
| 32.4 | 73 | 0.746914 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.