hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0df5b44100d80eaec45ea0e6a2b4184f13e915
| 934
|
py
|
Python
|
tests/test-runners/subject/package/sub_package1/SubSomething1.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | 1
|
2019-03-31T09:56:11.000Z
|
2019-03-31T09:56:11.000Z
|
tests/test-runners/subject/package/sub_package1/SubSomething1.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | 1
|
2019-03-01T11:33:40.000Z
|
2019-03-01T11:33:40.000Z
|
tests/test-runners/subject/package/sub_package1/SubSomething1.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | 1
|
2019-03-26T16:56:21.000Z
|
2019-03-26T16:56:21.000Z
|
# Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def calledByTest1():
return 42, 1
| 40.608696
| 78
| 0.711991
|
4a0df671ba56b4dbe7c7b78865292dcbcfc7b455
| 4,642
|
py
|
Python
|
neutron/plugins/brocade/db/models.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | 3
|
2015-02-02T02:51:39.000Z
|
2015-02-23T10:20:23.000Z
|
neutron/plugins/brocade/db/models.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | 4
|
2015-02-23T10:21:11.000Z
|
2015-03-04T09:28:20.000Z
|
neutron/plugins/brocade/db/models.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Shiv Haris (sharis@brocade.com)
# Varma Bhupatiraju (vbhupati@#brocade.com)
"""Brocade specific database schema/model."""
import sqlalchemy as sa
from neutron.db import model_base
from neutron.db import models_v2
class BrocadeNetwork(model_base.BASEV2, models_v2.HasId):
"""Schema for brocade network."""
vlan = sa.Column(sa.String(10))
class BrocadePort(model_base.BASEV2):
"""Schema for brocade port."""
port_id = sa.Column(sa.String(36), primary_key=True, default="",
server_default='')
network_id = sa.Column(sa.String(36),
sa.ForeignKey("brocadenetworks.id"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean, nullable=False)
physical_interface = sa.Column(sa.String(36))
vlan_id = sa.Column(sa.String(36))
tenant_id = sa.Column(sa.String(36))
def create_network(context, net_id, vlan):
"""Create a brocade specific network/port-profiles."""
session = context.session
with session.begin(subtransactions=True):
net = BrocadeNetwork(id=net_id, vlan=vlan)
session.add(net)
return net
def delete_network(context, net_id):
"""Delete a brocade specific network/port-profiles."""
session = context.session
with session.begin(subtransactions=True):
net = (session.query(BrocadeNetwork).filter_by(id=net_id).first())
if net is not None:
session.delete(net)
def get_network(context, net_id, fields=None):
"""Get brocade specific network, with vlan extension."""
session = context.session
return (session.query(BrocadeNetwork).filter_by(id=net_id).first())
def get_networks(context, filters=None, fields=None):
"""Get all brocade specific networks."""
session = context.session
try:
nets = session.query(BrocadeNetwork).all()
return nets
except sa.exc.SQLAlchemyError:
return None
def create_port(context, port_id, network_id, physical_interface,
vlan_id, tenant_id, admin_state_up):
"""Create a brocade specific port, has policy like vlan."""
# port_id is truncated: since the linux-bridge tap device names are
# based on truncated port id, this enables port lookups using
# tap devices
port_id = port_id[0:11]
session = context.session
with session.begin(subtransactions=True):
port = BrocadePort(port_id=port_id,
network_id=network_id,
physical_interface=physical_interface,
vlan_id=vlan_id,
admin_state_up=admin_state_up,
tenant_id=tenant_id)
session.add(port)
return port
def get_port(context, port_id):
"""get a brocade specific port."""
port_id = port_id[0:11]
session = context.session
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
return port
def get_ports(context, network_id=None):
"""get a brocade specific port."""
session = context.session
ports = (session.query(BrocadePort).filter_by(network_id=network_id).all())
return ports
def delete_port(context, port_id):
"""delete brocade specific port."""
port_id = port_id[0:11]
session = context.session
with session.begin(subtransactions=True):
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
if port is not None:
session.delete(port)
def get_port_from_device(session, port_id):
"""get port from the tap device."""
# device is same as truncated port_id
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
return port
def update_port_state(context, port_id, admin_state_up):
"""Update port attributes."""
port_id = port_id[0:11]
session = context.session
session.query(BrocadePort).filter_by(
port_id=port_id).update({'admin_state_up': admin_state_up})
| 30.741722
| 79
| 0.672555
|
4a0df700b68eade1aa9dcdb363b83705fb8be0be
| 5,081
|
py
|
Python
|
conans/client/build/meson.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
conans/client/build/meson.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
conans/client/build/meson.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
import os
from conans import tools
from conans.client import join_arguments, defs_to_string
from conans.errors import ConanException
from conans.tools import args_to_string
from conans.util.files import mkdir, get_abs_path
class Meson(object):
def __init__(self, conanfile, backend=None, build_type=None):
"""
:param conanfile: Conanfile instance (or settings for retro compatibility)
:param backend: Generator name to use or none to autodetect.
Possible values: ninja,vs,vs2010,vs2015,vs2017,xcode
:param build_type: Overrides default build type comming from settings
"""
self._conanfile = conanfile
self._settings = conanfile.settings
self._os = self._settings.get_safe("os")
self._compiler = self._settings.get_safe("compiler")
self._compiler_version = self._settings.get_safe("compiler.version")
self._build_type = self._settings.get_safe("build_type")
self.backend = backend or "ninja" # Other backends are poorly supported, not default other.
self.build_dir = None
if build_type and build_type != self._build_type:
# Call the setter to warn and update the definitions if needed
self.build_type = build_type
@property
def build_type(self):
return self._build_type
@build_type.setter
def build_type(self, build_type):
settings_build_type = self._settings.get_safe("build_type")
if build_type != settings_build_type:
self._conanfile.output.warn(
'Set build type "%s" is different than the settings build_type "%s"'
% (build_type, settings_build_type))
self._build_type = build_type
@property
def build_folder(self):
return self.build_dir
@build_folder.setter
def build_folder(self, value):
self.build_dir = value
def _get_dirs(self, source_folder, build_folder, source_dir, build_dir, cache_build_folder):
if (source_folder or build_folder) and (source_dir or build_dir):
raise ConanException("Use 'build_folder'/'source_folder'")
if source_dir or build_dir: # OLD MODE
build_ret = build_dir or self.build_dir or self._conanfile.build_folder
source_ret = source_dir or self._conanfile.source_folder
else:
build_ret = get_abs_path(build_folder, self._conanfile.build_folder)
source_ret = get_abs_path(source_folder, self._conanfile.source_folder)
if self._conanfile.in_local_cache and cache_build_folder:
build_ret = get_abs_path(cache_build_folder, self._conanfile.build_folder)
return source_ret, build_ret
def configure(self, args=None, defs=None, source_dir=None, build_dir=None,
pkg_config_paths=None, cache_build_folder=None,
build_folder=None, source_folder=None):
if not self._conanfile.should_configure:
return
args = args or []
defs = defs or {}
source_dir, self.build_dir = self._get_dirs(source_folder, build_folder,
source_dir, build_dir,
cache_build_folder)
if pkg_config_paths:
pc_paths = os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
for f in pkg_config_paths)
else:
pc_paths = self._conanfile.install_folder
mkdir(self.build_dir)
bt = {"RelWithDebInfo": "debugoptimized",
"MinSizeRel": "release",
"Debug": "debug",
"Release": "release"}.get(str(self.build_type), "")
build_type = "--buildtype=%s" % bt
arg_list = join_arguments([
"--backend=%s" % self.backend,
args_to_string(args),
defs_to_string(defs),
build_type
])
command = 'meson "%s" "%s" %s' % (source_dir, self.build_dir, arg_list)
command = self._append_vs_if_needed(command)
with tools.environment_append({"PKG_CONFIG_PATH": pc_paths}):
self._conanfile.run(command)
def _append_vs_if_needed(self, command):
if self._compiler == "Visual Studio" and self.backend == "ninja":
command = "%s && %s" % (tools.vcvars_command(self._conanfile.settings), command)
return command
def build(self, args=None, build_dir=None, targets=None):
if not self._conanfile.should_build:
return
if self.backend != "ninja":
raise ConanException("Build only supported with 'ninja' backend")
args = args or []
build_dir = build_dir or self.build_dir or self._conanfile.build_folder
arg_list = join_arguments([
'-C "%s"' % build_dir,
args_to_string(args),
args_to_string(targets)
])
command = "ninja %s" % arg_list
command = self._append_vs_if_needed(command)
self._conanfile.run(command)
| 39.387597
| 100
| 0.635111
|
4a0df71ebd9cd51140ea272ede056d5b6ce012c7
| 40,688
|
py
|
Python
|
python/ray/tune/ray_trial_executor.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/ray_trial_executor.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/ray_trial_executor.py
|
jianoaix/ray
|
1701b923bc83905f8961c06a6a173e3eba46a936
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import copy
import inspect
import random
from collections import deque
from enum import Enum
from functools import partial
import logging
import os
import time
import traceback
from contextlib import contextmanager
from typing import (
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
)
import ray
from ray.exceptions import GetTimeoutError, RayTaskError
from ray.tune.error import (
_AbortTrialExecution,
TuneError,
_TuneStartTrialError,
_TuneNoNextExecutorEventError,
)
from ray.tune.logger import NoopLogger
from ray.tune.result import TRIAL_INFO, STDOUT_FILE, STDERR_FILE
from ray.tune.utils.placement_groups import _PlacementGroupManager, get_tune_pg_prefix
from ray.tune.utils.trainable import TrainableUtil
from ray.tune.trial import Trial, _Location, _TrialInfo
from ray.tune.utils import warn_if_slow
from ray.tune.utils.resource_updater import _ResourceUpdater
from ray.util import log_once
from ray.util.annotations import DeveloperAPI
from ray.util.ml_utils.checkpoint_manager import _TrackedCheckpoint, CheckpointStorage
from ray.util.placement_group import remove_placement_group, PlacementGroup
logger = logging.getLogger(__name__)
DEFAULT_GET_TIMEOUT = 60.0 # seconds
class _ActorClassCache:
"""Caches actor classes.
ray.remote is a registration call. It sends the serialized object to the
key value store (redis), and will be fetched at an arbitrary worker
later. Registration does not use any Ray scheduling resources.
Later, class.remote() actually creates the remote actor. The
actor will be instantiated on some arbitrary machine,
according to the underlying Ray scheduler.
Without this cache, you would register the same serialized object
over and over again. Naturally, since redis doesn’t spill to disk,
this can easily nuke the redis instance (and basically blow up Ray).
This cache instead allows us to register once and only once.
Note that we assume there can be multiple trainables in the
system at once.
"""
def __init__(self):
self._cache = {}
def get(self, trainable_cls):
"""Gets the wrapped trainable_cls, otherwise calls ray.remote."""
runtime_env = {"env_vars": {"TUNE_ORIG_WORKING_DIR": os.getcwd()}}
if trainable_cls not in self._cache:
remote_cls = ray.remote(runtime_env=runtime_env)(trainable_cls)
self._cache[trainable_cls] = remote_cls
return self._cache[trainable_cls]
_class_cache = _ActorClassCache()
class _LocalWrapper:
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
def post_stop_cleanup(future, pg):
"""Things to be done after a trial is stopped."""
assert isinstance(pg, PlacementGroup)
try:
# This should not be blocking as
# we are only here when triggered.
ray.get(future, timeout=0)
except GetTimeoutError:
if log_once("tune_trial_cleanup_timeout"):
logger.error(
"Timed out when trying to stop the Ray actor gracefully. "
"Consider making `stop` a faster operation."
)
except Exception:
if log_once("tune_trial_cleanup_exception"):
logger.error(
f"An exception occurred when trying to stop the Ray actor:"
f"{traceback.format_exc()}"
)
finally:
remove_placement_group(pg)
class _TrialCleanup:
"""Responsible for triggering force cleanup of remote actors,
without waiting for `Trainable.stop()` to finish.
Only instantiated when `TUNE_FORCE_TRIAL_CLEANUP_S` is set up.
"""
def __init__(self, force_cleanup):
assert force_cleanup
self._force_cleanup = force_cleanup
self._future_to_insert_time = deque()
def add(self, future):
self._future_to_insert_time.append((future, time.time()))
def get_next(self):
"""Get the next future that is eligible to be cleaned up forcibly."""
if (
len(self._future_to_insert_time) > 0
and self._future_to_insert_time[0][1] + self._force_cleanup < time.time()
):
return self._future_to_insert_time.popleft()
else:
return None
def is_empty(self):
return len(self._future_to_insert_time) == 0
def noop_logger_creator(config, logdir):
# Set the working dir in the remote process, for user file writes
os.makedirs(logdir, exist_ok=True)
if not ray.worker._mode() == ray.worker.LOCAL_MODE:
os.chdir(logdir)
return NoopLogger(config, logdir)
class _ExecutorEventType(Enum):
"""The executor event type.
Some of the events are internal events to executor while others
are handled by runner."""
NO_RUNNING_TRIAL_TIMEOUT = 1
PG_READY = 2
TRAINING_RESULT = 3
SAVING_RESULT = 4
RESTORING_RESULT = 5
STOP_RESULT = 6 # Internally to executor only.
ERROR = 7 # This is to signal to TrialRunner that there is an error.
YIELD = 8 # Yielding back to TrialRunner's main event loop.
class _ExecutorEvent:
"""A struct that describes the event to be processed by TrialRunner.
Attributes:
result: A dict with keys of "future_result" and "exception".
"future_result" is the corresponding result when future returns
successfully.
"exception" is the exception as caught during ``ray.get(future)``.
"""
KEY_FUTURE_RESULT = "future_result"
KEY_EXCEPTION = "exception"
def __init__(
self,
event_type: _ExecutorEventType,
trial: Optional[Trial] = None,
result: Optional[Dict] = None,
):
self.type = event_type
self.trial = trial
self.result = result
def __repr__(self):
return f"[{self.type}] for {self.trial}"
@DeveloperAPI
class RayTrialExecutor:
"""An implementation of TrialExecutor based on Ray."""
def __init__(
self,
reuse_actors: bool = False,
result_buffer_length: Optional[int] = None,
refresh_period: Optional[float] = None,
):
self._cached_trial_state = {}
self._trials_to_cache = set()
# future --> (type, trial/pg)
self._futures = {}
force_trial_cleanup = int(os.environ.get("TUNE_FORCE_TRIAL_CLEANUP_S", "0"))
self._get_next_event_wait = int(
os.environ.get("TUNE_GET_EXECUTOR_EVENT_WAIT_S", "5")
)
if force_trial_cleanup:
self._trial_cleanup = _TrialCleanup(force_trial_cleanup)
else:
self._trial_cleanup = None
self._resource_updater = _ResourceUpdater(refresh_period)
self._has_cleaned_up_pgs = False
self._reuse_actors = reuse_actors
# The maxlen will be updated when `set_max_pending_trials()` is called
self._cached_actor_pg = deque(maxlen=1)
self._pg_manager = _PlacementGroupManager(prefix=get_tune_pg_prefix())
self._staged_trials = set()
self._trial_just_finished = False
self._trial_just_finished_before = False
self.last_pg_recon = 0
self.pg_recon_interval = float(
os.environ.get("TUNE_PLACEMENT_GROUP_RECON_INTERVAL", "5")
)
self._buffer_length = result_buffer_length or int(
os.getenv("TUNE_RESULT_BUFFER_LENGTH", 1)
)
self._buffer_min_time_s = float(os.getenv("TUNE_RESULT_BUFFER_MIN_TIME_S", 0.0))
self._buffer_max_time_s = float(
os.getenv("TUNE_RESULT_BUFFER_MAX_TIME_S", 100.0)
)
def set_max_pending_trials(self, max_pending: int) -> None:
if len(self._cached_actor_pg) > 0:
logger.warning(
"Cannot update maximum number of queued actors for reuse "
"during a run."
)
else:
self._cached_actor_pg = deque(maxlen=max_pending)
self._pg_manager.set_max_staging(max_pending)
def set_status(self, trial: Trial, status: str) -> None:
"""Sets status and checkpoints metadata if needed.
Only checkpoints metadata if trial status is a terminal condition.
PENDING, PAUSED, and RUNNING switches have checkpoints taken care of
in the TrialRunner.
Args:
trial: Trial to checkpoint.
status: Status to set trial to.
"""
if trial.status == status:
logger.debug("Trial %s: Status %s unchanged.", trial, trial.status)
else:
logger.debug(
"Trial %s: Changing status from %s to %s.", trial, trial.status, status
)
trial.set_status(status)
if status in [Trial.TERMINATED, Trial.ERROR]:
self._trials_to_cache.add(trial)
def mark_trial_to_checkpoint(self, trial: Trial) -> None:
self._trials_to_cache.add(trial)
def get_checkpoints(self) -> Dict[str, str]:
"""Returns a copy of mapping of the trial ID to pickled metadata."""
for trial in self._trials_to_cache:
self._cached_trial_state[trial.trial_id] = trial.get_json_state()
self._trials_to_cache.clear()
return self._cached_trial_state
def _stage_and_update_status(self, trials: Iterable[Trial]):
"""Check and update statuses of scheduled placement groups.
Stages placement groups of all trials.
"""
if not self._has_cleaned_up_pgs:
# Clean up existing placement groups after trigger the tuning
# run step() method for the first time
self._pg_manager.cleanup_existing_pg()
self._has_cleaned_up_pgs = True
for trial in trials:
if trial.status not in (Trial.PENDING, Trial.PAUSED):
continue
if trial in self._staged_trials:
continue
if self._pg_manager.trial_in_use(trial):
continue
if not self._pg_manager.stage_trial_pg(trial):
# Break if we reached the limit of pending placement groups.
break
self._staged_trials.add(trial)
self._pg_manager.update_status()
def get_staged_trial(self):
"""Get a trial whose placement group was successfully staged.
Can also return None if no trial is available.
Returns:
Trial object or None.
"""
# TODO(xwjiang): This method should consider `self._cached_actor_pg`.
for trial in self._staged_trials:
if self._pg_manager.has_ready(trial):
return trial
return None
def _setup_remote_runner(self, trial):
trial.init_logdir()
# We checkpoint metadata here to try mitigating logdir duplication
self._trials_to_cache.add(trial)
logger_creator = partial(noop_logger_creator, logdir=trial.logdir)
if len(self._cached_actor_pg) > 0:
assert self._reuse_actors
existing_runner, pg = self._cached_actor_pg.popleft()
logger.debug(f"Trial {trial}: Reusing cached runner " f"{existing_runner}")
trial.set_runner(existing_runner)
if pg:
self._pg_manager.assign_cached_pg(pg, trial)
if not self.reset_trial(
trial, trial.config, trial.experiment_tag, logger_creator
):
raise _AbortTrialExecution(
"Trainable runner reuse requires reset_config() to be "
"implemented and return True."
)
return existing_runner
trainable_cls = trial.get_trainable_cls()
if not trainable_cls:
raise _AbortTrialExecution(
f"Invalid trainable: {trial.trainable_name}. If you passed "
f"a string, make sure the trainable was registered before."
)
_actor_cls = _class_cache.get(trainable_cls)
if not self._pg_manager.has_ready(trial):
return None
full_actor_class = self._pg_manager.get_full_actor_cls(trial, _actor_cls)
# Clear the Trial's location (to be updated later on result)
# since we don't know where the remote runner is placed.
trial.set_location(_Location())
logger.debug("Trial %s: Setting up new remote runner.", trial)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
trial_config = copy.deepcopy(trial.config)
trial_config[TRIAL_INFO] = _TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
trial_config[STDOUT_FILE] = stdout_file
trial_config[STDERR_FILE] = stderr_file
kwargs = {
"config": trial_config,
"logger_creator": logger_creator,
}
if trial.uses_cloud_checkpointing:
# We keep these kwargs separate for backwards compatibility
# with trainables that don't provide these keyword arguments
kwargs["remote_checkpoint_dir"] = trial.remote_checkpoint_dir
kwargs["custom_syncer"] = trial.custom_syncer
# Throw a meaningful error if trainable does not use the
# new API
sig = inspect.signature(trial.get_trainable_cls())
try:
sig.bind_partial(**kwargs)
except Exception as e:
raise RuntimeError(
"Your trainable class does not accept a "
"`remote_checkpoint_dir` or `custom_syncer` argument "
"in its constructor, but you've passed a "
"`upload_dir` to your SyncConfig. Without accepting "
"these parameters and passing them to the base trainable "
"constructor in the init call, cloud checkpointing is "
"effectively disabled. To resolve this issue, add the "
"parameters to your trainable class constructor or "
"disable cloud checkpointing by setting `upload_dir=None`."
) from e
with self._change_working_directory(trial):
return full_actor_class.remote(**kwargs)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
if self._find_future(trial):
logging.debug(
"Trial {} already has a queued future. Skipping this "
"`train` call. This may occur if a trial has "
"been unpaused within a scheduler callback.".format(str(trial))
)
return
assert trial.status == Trial.RUNNING, trial.status
buffer_time_s = max(
self._buffer_min_time_s,
min(self._buffer_max_time_s, len(self._futures) // 10),
)
with self._change_working_directory(trial):
buffer_length = self._buffer_length
if buffer_length > 1 and trial.checkpoint_at_end:
# If a trial checkpoint can be triggered externally,
# it is not safe to buffer results.
if log_once("trial_executor_buffer_checkpoint"):
logger.warning(
"Disabling buffered training as you passed "
"`checkpoint_at_end` to `tune.run()`."
)
buffer_length = 1
if buffer_length > 1:
if trial.checkpoint_freq > 0:
buffer_length = min(buffer_length, trial.checkpoint_freq)
remote = trial.runner.train_buffered.remote(
buffer_time_s, buffer_length
)
else:
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._futures[remote] = (_ExecutorEventType.TRAINING_RESULT, trial)
trial_item = self._find_future(trial)
assert len(trial_item) < 2, trial_item
def _start_trial(self, trial: Trial) -> bool:
"""Starts trial and restores last result if trial was paused.
Args:
trial: The trial to start.
Returns:
True if trial was started successfully, False otherwise.
See `RayTrialExecutor.restore` for possible errors raised.
"""
self.set_status(trial, Trial.PENDING)
runner = self._setup_remote_runner(trial)
if not runner:
return False
trial.set_runner(runner)
self.restore(trial)
self.set_status(trial, Trial.RUNNING)
self._staged_trials.discard(trial)
if not trial.is_restoring:
self._train(trial)
return True
def _stop_trial(
self,
trial: Trial,
error: bool = False,
exc: Optional[Union[TuneError, RayTaskError]] = None,
):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error: Whether to mark this trial as terminated in error.
exc: Optional exception.
"""
self.set_status(trial, Trial.ERROR if error or exc else Trial.TERMINATED)
self._trial_just_finished = True
trial.set_location(_Location())
try:
trial.write_error_log(exc=exc)
if hasattr(trial, "runner") and trial.runner:
if (
not error
and self._reuse_actors
and (
len(self._cached_actor_pg)
< (self._cached_actor_pg.maxlen or float("inf"))
)
):
logger.debug("Reusing actor for %s", trial.runner)
# Move PG into cache (disassociate from trial)
pg = self._pg_manager.cache_trial_pg(trial)
if pg:
# True if a placement group was replaced
self._cached_actor_pg.append((trial.runner, pg))
should_destroy_actor = False
else:
# False if no placement group was replaced. This should
# only be the case if there are no more trials with
# this placement group factory to run
logger.debug(
f"Could not cache actor of trial {trial} for "
"reuse, as there are no pending trials "
"requiring its resources."
)
should_destroy_actor = True
else:
should_destroy_actor = True
if should_destroy_actor:
logger.debug("Trial %s: Destroying actor.", trial)
with self._change_working_directory(trial):
future = trial.runner.stop.remote()
pg = self._pg_manager.remove_from_in_use(trial)
self._futures[future] = (_ExecutorEventType.STOP_RESULT, pg)
if self._trial_cleanup: # force trial cleanup within a deadline
self._trial_cleanup.add(future)
self._staged_trials.discard(trial)
except Exception:
logger.exception("Trial %s: Error stopping runner.", trial)
self.set_status(trial, Trial.ERROR)
finally:
trial.set_runner(None)
def start_trial(self, trial: Trial) -> bool:
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial: Trial to be started.
Returns:
True if the remote runner has been started. False if trial was
not started (e.g. because of lacking resources/pending PG).
"""
try:
return self._start_trial(trial)
except _AbortTrialExecution as e:
logger.exception("Trial %s: Error starting runner, aborting!", trial)
time.sleep(2)
self._stop_trial(trial, exc=e)
return False
except Exception as e:
logger.exception("Trial %s: Unexpected error starting runner.", trial)
time.sleep(2)
if isinstance(e, TuneError):
self._stop_trial(trial, exc=e)
else:
self._stop_trial(
trial, exc=_TuneStartTrialError(traceback.format_exc())
)
# Note that we don't return the resources, since they may
# have been lost. TODO(ujvl): is this the right thing to do?
return False
def _find_future(self, trial):
out = [rid for rid, t in self._futures.items() if t[1] is trial]
assert (
len(out) <= 1
), "Expecting one future for any given trial at any given time."
return out
def stop_trial(
self,
trial: Trial,
error: bool = False,
exc: Optional[Union[TuneError, RayTaskError]] = None,
) -> None:
prior_status = trial.status
self._stop_trial(trial, error=error or exc, exc=exc)
if prior_status == Trial.RUNNING:
logger.debug("Trial %s: Returning resources.", trial)
out = self._find_future(trial)
for result_id in out:
self._futures.pop(result_id)
def continue_training(self, trial: Trial) -> None:
"""Continues the training of this trial."""
self._train(trial)
def pause_trial(self, trial: Trial) -> None:
"""Pauses the trial.
We want to release resources (specifically GPUs) when pausing an
experiment. This results in PAUSED state that similar to TERMINATED.
"""
assert trial.status == Trial.RUNNING, trial.status
try:
self.save(trial, CheckpointStorage.MEMORY)
self.stop_trial(trial)
self.set_status(trial, Trial.PAUSED)
except Exception:
logger.exception("Error pausing runner.")
self.set_status(trial, Trial.ERROR)
def reset_trial(
self,
trial: Trial,
new_config: Dict,
new_experiment_tag: str,
logger_creator: Optional[Callable[[Dict], "ray.tune.Logger"]] = None,
) -> bool:
"""Tries to invoke `Trainable.reset()` to reset trial.
Args:
trial: Trial to be reset.
new_config: New configuration for Trial trainable.
new_experiment_tag: New experiment name for trial.
logger_creator: Function that instantiates a logger on the
actor process.
Returns:
True if `reset_config` is successful else False.
"""
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
trainable = trial.runner
# Pass magic variables
extra_config = copy.deepcopy(new_config)
extra_config[TRIAL_INFO] = _TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
extra_config[STDOUT_FILE] = stdout_file
extra_config[STDERR_FILE] = stderr_file
with self._change_working_directory(trial):
with warn_if_slow("reset"):
try:
reset_val = ray.get(
trainable.reset.remote(extra_config, logger_creator),
timeout=DEFAULT_GET_TIMEOUT,
)
except GetTimeoutError:
logger.exception("Trial %s: reset timed out.", trial)
return False
return reset_val
def has_resources_for_trial(self, trial: Trial) -> bool:
"""Returns whether there are resources available for this trial.
This will return True as long as we didn't reach the maximum number
of pending trials. It will also return True if the trial placement
group is already staged.
Args:
trial: Trial object which should be scheduled.
Returns:
boolean
"""
return (
trial in self._staged_trials
or (
len(self._cached_actor_pg) > 0
and (self._pg_manager.has_cached_pg(trial.placement_group_factory))
)
or self._pg_manager.can_stage()
or self._pg_manager.has_ready(trial, update=True)
or self._pg_manager.has_staging(trial)
)
def debug_string(self) -> str:
"""Returns a human readable message for printing to the console."""
total_resources = self._pg_manager.occupied_resources()
return self._resource_updater.debug_string(total_resources)
def on_step_begin(self, trials: List[Trial]) -> None:
"""Before step() is called, update the available resources."""
self._resource_updater.update_avail_resources()
self._trial_just_finished_before = self._trial_just_finished
self._trial_just_finished = False
def on_step_end(self, trials: List[Trial]) -> None:
self._do_force_trial_cleanup()
if time.time() > self.last_pg_recon + self.pg_recon_interval:
# Only do this every now and then - usually the placement groups
# should not get out of sync, and calling this often is inefficient
self._pg_manager.reconcile_placement_groups(trials)
self.last_pg_recon = time.time()
self._pg_manager.cleanup()
def _do_force_trial_cleanup(self) -> None:
if self._trial_cleanup:
while True:
next_future_to_clean = self._trial_cleanup.get_next()
if not next_future_to_clean:
break
if next_future_to_clean in self._futures.keys():
_, pg = self._futures.pop(next_future_to_clean)
post_stop_cleanup(next_future_to_clean, pg)
else:
# This just means that before the deadline reaches,
# the future is already cleaned up.
pass
def force_reconcilation_on_next_step_end(self) -> None:
self.last_pg_recon = -float("inf")
def save(
self,
trial: Trial,
storage: CheckpointStorage = CheckpointStorage.PERSISTENT,
result: Optional[Dict] = None,
) -> _TrackedCheckpoint:
"""Saves the trial's state to a checkpoint asynchronously.
Args:
trial: The trial to be saved.
storage: Where to store the checkpoint. Defaults to
PERSISTENT.
result: The state of this trial as a dictionary to be saved.
If result is None, the trial's last result will be used.
Returns:
Checkpoint object, or None if an Exception occurs.
"""
logger.debug(f"saving trial {trial}")
result = result or trial.last_result
with self._change_working_directory(trial):
if storage == CheckpointStorage.MEMORY:
value = trial.runner.save_to_object.remote()
checkpoint = _TrackedCheckpoint(
dir_or_data=value, storage_mode=storage, metrics=result
)
trial.on_checkpoint(checkpoint)
else:
value = trial.runner.save.remote()
checkpoint = _TrackedCheckpoint(
dir_or_data=value, storage_mode=storage, metrics=result
)
trial.saving_to = checkpoint
self._futures[value] = (_ExecutorEventType.SAVING_RESULT, trial)
return checkpoint
def restore(self, trial: Trial) -> None:
"""Restores training state from a given model checkpoint.
Args:
trial: The trial to be restored.
Raises:
RuntimeError: This error is raised if no runner is found.
AbortTrialExecution: This error is raised if the trial is
ineligible for restoration, given the Tune input arguments.
"""
checkpoint = trial.checkpoint
if checkpoint.dir_or_data is None:
return
if trial.runner is None:
raise RuntimeError(
"Trial {}: Unable to restore - no runner found.".format(trial)
)
checkpoint_dir = checkpoint.dir_or_data
node_ip = checkpoint.node_ip
if checkpoint.storage_mode == CheckpointStorage.MEMORY:
logger.debug("Trial %s: Attempting restore from object", trial)
# Note that we don't store the remote since in-memory checkpoints
# don't guarantee fault tolerance and don't need to be waited on.
with self._change_working_directory(trial):
trial.runner.restore_from_object.remote(checkpoint_dir)
else:
logger.debug("Trial %s: Attempting restore from %s", trial, checkpoint_dir)
if (
trial.uses_cloud_checkpointing
or not trial.sync_on_checkpoint
or not os.path.exists(checkpoint_dir)
):
# If using cloud checkpointing, trial will get cp from cloud.
# If not syncing to driver, assume it has access to the cp
# on the local fs.
with self._change_working_directory(trial):
remote = trial.runner.restore.remote(checkpoint_dir, node_ip)
elif trial.sync_on_checkpoint:
# This provides FT backwards compatibility in the
# case where no cloud checkpoints are provided.
logger.debug("Trial %s: Reading checkpoint into memory", trial)
obj = TrainableUtil.checkpoint_to_object(checkpoint_dir)
with self._change_working_directory(trial):
remote = trial.runner.restore_from_object.remote(obj)
else:
raise _AbortTrialExecution(
"Pass in `sync_on_checkpoint=True` for driver-based trial"
"restoration. Pass in an `upload_dir` for remote "
"storage-based restoration"
)
self._futures[remote] = (_ExecutorEventType.RESTORING_RESULT, trial)
trial.restoring_from = checkpoint
def export_trial_if_needed(self, trial: Trial) -> Dict:
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
with self._change_working_directory(trial):
return ray.get(
trial.runner.export_model.remote(trial.export_formats),
timeout=DEFAULT_GET_TIMEOUT,
)
return {}
def has_gpus(self) -> bool:
return self._resource_updater.get_num_gpus() > 0
def cleanup(self, trials: List[Trial]) -> None:
while True:
if self._trial_cleanup and self._trial_cleanup.is_empty():
break
elif not self._trial_cleanup and len(self._futures) == 0:
break
self._do_force_trial_cleanup()
ready, _ = ray.wait(list(self._futures.keys()), timeout=0)
if not ready:
continue
event_type, trial_or_pg = self._futures.pop(ready[0])
if event_type == _ExecutorEventType.STOP_RESULT:
post_stop_cleanup(ready[0], trial_or_pg)
self._pg_manager.reconcile_placement_groups(trials)
self._pg_manager.cleanup(force=True)
self._pg_manager.cleanup_existing_pg(block=True)
@contextmanager
def _change_working_directory(self, trial):
"""Context manager changing working directory to trial logdir.
Used in local mode.
For non-local mode it is no-op.
"""
if ray.worker._mode() == ray.worker.LOCAL_MODE:
old_dir = os.getcwd()
try:
os.chdir(trial.logdir)
yield
finally:
os.chdir(old_dir)
else:
yield
def get_next_executor_event(
self, live_trials: Set[Trial], next_trial_exists: bool
) -> _ExecutorEvent:
"""Get the next executor event to be processed in TrialRunner.
In case there are multiple events available for handling, the next
event is determined by the following priority:
1. if there is `next_trial_exists`, and if there is cached resources
to use, PG_READY is emitted.
2. if there is `next_trial_exists` and there is no cached resources
to use, wait on pg future and randomized other futures. If multiple
futures are ready, pg future will take priority to be handled first.
3. if there is no `next_trial_exists`, wait on just randomized other
futures.
An example of #3 would be synchronous hyperband. Although there are pgs
ready, the scheduler is holding back scheduling new trials since the
whole band of trials is waiting for the slowest trial to finish. In
this case, we prioritize handling training result to avoid deadlock
situation.
This is a blocking wait with a timeout (specified with env var).
The reason for the timeout is
we still want to print status info periodically in TrialRunner for
better user experience.
The handle of `ExecutorEvent.STOP_RESULT` is purely internal to
RayTrialExecutor itself. All the other future results are handled by
TrialRunner.
In the future we may want to do most of the handle of
`ExecutorEvent.RESTORE_RESULT` and `SAVING_RESULT` in
RayTrialExecutor itself and only notify TrialRunner to invoke
corresponding callbacks. This view is more consistent with our goal
of TrialRunner responsible for external facing Trial state transition,
while RayTrialExecutor responsible for internal facing transitions,
namely, `is_saving`, `is_restoring` etc.
Also you may notice that the boundary between RayTrialExecutor and
PlacementGroupManager right now is really blurry. This will be
improved once we move to an ActorPool abstraction.
`next_trial_exists` means that there is a trial to run - prioritize
returning PG_READY in this case.
"""
# First update status of staged placement groups
self._stage_and_update_status(live_trials)
while True:
###################################################################
# when next_trial_exists and there are cached resources
###################################################################
# There could be existing PGs from either `self._cached_actor_pg`
# or from `self._pg_manager._ready`. If so and if there is indeed
# a next trial to run, we return `PG_READY` future for trial
# runner. The next trial can then be scheduled on this PG.
if next_trial_exists:
if len(self._cached_actor_pg) > 0:
return _ExecutorEvent(_ExecutorEventType.PG_READY)
# TODO(xwjiang): Expose proper API when we decide to do
# ActorPool abstraction.
if any(len(r) > 0 for r in self._pg_manager._ready.values()):
return _ExecutorEvent(_ExecutorEventType.PG_READY)
###################################################################
# Prepare for futures to wait
###################################################################
futures_to_wait = list(self._futures.keys())
random.shuffle(futures_to_wait)
if next_trial_exists:
# Only wait for pg explicitly if there is next trial to run.
# In which case, handling PG_READY triumphs handling other events.
# Since we want to place pending trial ASAP.
futures_to_wait = (
self._pg_manager.get_staging_future_list() + futures_to_wait
)
logger.debug(
f"get_next_executor_event before wait with futures "
f"{futures_to_wait} and "
f"next_trial_exists={next_trial_exists}"
)
ready_futures, _ = ray.wait(
futures_to_wait, num_returns=1, timeout=self._get_next_event_wait
)
###################################################################
# Dealing with no future returned case.
###################################################################
if len(ready_futures) == 0:
if len(self._futures) == 0:
# No running trial and timing out with wait, could be we may
# have insufficient cluster resources that makes tune run
# infeasible.
# TODO: Move InsufficientResourceManager's logic
# to TrialExecutor. It is not Runner's responsibility!
return _ExecutorEvent(_ExecutorEventType.NO_RUNNING_TRIAL_TIMEOUT)
else:
# Training simply takes long time, yield the control back to main
# event loop to print progress info etc.
return _ExecutorEvent(_ExecutorEventType.YIELD)
###################################################################
# If there is future returned.
###################################################################
assert len(ready_futures) == 1
ready_future = ready_futures[0]
###################################################################
# If it is a PG_READY event.
###################################################################
if ready_future not in self._futures.keys():
self._pg_manager.handle_ready_future(ready_future)
return _ExecutorEvent(_ExecutorEventType.PG_READY)
###################################################################
# non PG_READY event
###################################################################
result_type, trial_or_pg = self._futures.pop(ready_future)
if result_type == _ExecutorEventType.STOP_RESULT:
pg = trial_or_pg
post_stop_cleanup(ready_future, pg)
else:
trial = trial_or_pg
assert isinstance(trial, Trial)
try:
future_result = ray.get(ready_future)
# For local mode
if isinstance(future_result, _LocalWrapper):
future_result = future_result.unwrap()
if result_type in (
_ExecutorEventType.TRAINING_RESULT,
_ExecutorEventType.SAVING_RESULT,
_ExecutorEventType.RESTORING_RESULT,
):
logger.debug(f"Returning [{result_type}] for trial {trial}")
return _ExecutorEvent(
result_type,
trial,
result={_ExecutorEvent.KEY_FUTURE_RESULT: future_result},
)
else:
raise TuneError(f"Unexpected future type - [{result_type}]")
except RayTaskError as e:
return _ExecutorEvent(
_ExecutorEventType.ERROR,
trial,
result={_ExecutorEvent.KEY_EXCEPTION: e.as_instanceof_cause()},
)
except Exception:
return _ExecutorEvent(
_ExecutorEventType.ERROR,
trial,
result={
_ExecutorEvent.KEY_EXCEPTION: _TuneNoNextExecutorEventError(
traceback.format_exc()
)
},
)
| 39.69561
| 88
| 0.590346
|
4a0df8b9a2422abaa28a6fc4ad9cc74b54ba98f1
| 4,204
|
py
|
Python
|
tests/datatools/test_dataset.py
|
aya-miyazaki/XenonPy
|
90971cc362402715ba15c63f5d75070f9680fd78
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T04:43:45.000Z
|
2021-01-29T04:43:45.000Z
|
tests/datatools/test_dataset.py
|
aya-miyazaki/XenonPy
|
90971cc362402715ba15c63f5d75070f9680fd78
|
[
"BSD-3-Clause"
] | null | null | null |
tests/datatools/test_dataset.py
|
aya-miyazaki/XenonPy
|
90971cc362402715ba15c63f5d75070f9680fd78
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2019. TsumiNa. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from os import remove
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
import pytest
from xenonpy.datatools import Dataset
@pytest.fixture(scope='module')
def test_data():
# ignore numpy warning
import warnings
print('ignore NumPy RuntimeWarning\n')
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
file_path = Path(__file__).parent
file_name = 'rename.txt'
file_url = 'https://raw.githubusercontent.com/yoshida-lab/XenonPy/master/.github/fetch_test.txt'
# create data
ary = [[1, 2], [3, 4]]
df = pd.DataFrame(ary)
pkl_path = str(file_path / 'test.pkl.z')
df_path = str(file_path / 'test.pd.xz')
csv_path = str(file_path / 'test.csv')
joblib.dump(ary, pkl_path)
df.to_csv(csv_path)
df.to_pickle(df_path)
yield file_name, file_url, file_path
tmp = file_path / file_name
if tmp.exists():
remove(str(tmp))
tmp = file_path / 'fetch_test.txt'
if tmp.exists():
remove(str(tmp))
tmp = file_path / 'test.pd'
if tmp.exists():
remove(str(tmp))
tmp = file_path / 'test.str'
if tmp.exists():
remove(str(tmp))
tmp = file_path / 'test.pkl'
if tmp.exists():
remove(str(tmp))
remove(pkl_path)
remove(df_path)
remove(csv_path)
print('test over')
def test_dataset_1(test_data):
path = Path(__file__).parents[0]
ds = Dataset()
assert ds._backend == 'pandas'
assert ds._paths == ('.',)
assert ds._prefix == ()
with pytest.warns(RuntimeWarning):
Dataset(str(path), str(path))
with pytest.raises(RuntimeError):
Dataset('no_exist_dir')
ds = Dataset(str(path), backend='pickle', prefix=('datatools',))
assert hasattr(ds, 'datatools_test')
tmp = '%s' % ds
assert 'Dataset' in tmp
def test_dataset_2(test_data):
path = Path(__file__).parents[0]
ds = Dataset(str(path), backend='pickle')
assert hasattr(ds, 'test')
tmp = ds.test
assert isinstance(tmp, list)
assert tmp == [[1, 2], [3, 4]]
tmp = ds.csv
assert hasattr(tmp, 'test')
tmp = tmp.test
assert isinstance(tmp, pd.DataFrame)
assert np.all(np.array([[0, 1, 2], [1, 3, 4]]) == tmp.values)
tmp = ds.csv(str(path / 'test.csv'))
assert np.all(np.array([[0, 1, 2], [1, 3, 4]]) == tmp.values)
tmp = ds.pandas
assert hasattr(tmp, 'test')
tmp = tmp.test
assert isinstance(tmp, pd.DataFrame)
assert np.all(np.array([[1, 2], [3, 4]]) == tmp.values)
tmp = ds.pandas(str(path / 'test.pd.xz'))
assert np.all(np.array([[1, 2], [3, 4]]) == tmp.values)
tmp = ds.pickle
assert hasattr(tmp, 'test')
tmp = tmp.test
assert isinstance(tmp, list)
assert [[1, 2], [3, 4]] == tmp
tmp = ds.pickle(str(path / 'test.pkl.z'))
assert [[1, 2], [3, 4]] == tmp
def test_dataset_3(test_data):
with pytest.raises(RuntimeError, match='is not a legal path'):
Dataset.from_http(test_data[1], 'not_exist')
tmp = Dataset.from_http(test_data[1], save_to=test_data[2])
assert tmp == str(test_data[2] / 'fetch_test.txt')
assert Path(tmp).exists()
with open(tmp, 'r') as f:
assert f.readline() == 'Test xenonpy.utils.Loader._fetch_data'
tmp = Dataset.from_http(test_data[1], save_to=test_data[2], filename=test_data[0])
assert tmp == str(test_data[2] / 'rename.txt')
assert Path(tmp).exists()
with open(tmp, 'r') as f:
assert f.readline() == 'Test xenonpy.utils.Loader._fetch_data'
def test_dataset_4(test_data):
file_path = test_data[2]
data = pd.DataFrame([[1, 2], [3, 4]])
file = file_path / 'test.pd'
Dataset.to(data, file)
assert file.exists()
file = file_path / 'test.str'
Dataset.to(data, str(file))
assert file.exists()
file = file_path / 'test.pkl'
Dataset.to(data.values, file)
assert file.exists()
if __name__ == "__main__":
pytest.main()
| 25.634146
| 100
| 0.627022
|
4a0df8d77d95007484b79850a48c9ba37441a170
| 2,423
|
py
|
Python
|
di/_utils/state.py
|
adriangb/anydep
|
6ab38c1d4befe8783431e0b954848488ea21eae2
|
[
"MIT"
] | 5
|
2021-07-30T10:10:16.000Z
|
2021-09-23T11:23:15.000Z
|
di/_utils/state.py
|
adriangb/anydep
|
6ab38c1d4befe8783431e0b954848488ea21eae2
|
[
"MIT"
] | 3
|
2021-07-26T06:22:09.000Z
|
2021-09-24T16:11:08.000Z
|
di/_utils/state.py
|
adriangb/anydep
|
6ab38c1d4befe8783431e0b954848488ea21eae2
|
[
"MIT"
] | 1
|
2021-09-17T07:22:23.000Z
|
2021-09-17T07:22:23.000Z
|
from __future__ import annotations
from contextlib import AsyncExitStack, ExitStack
from types import TracebackType
from typing import Any, Dict, Optional, Type, Union
from di._utils.scope_map import ScopeMap
from di._utils.types import FusedContextManager
from di.api.providers import DependencyProvider
from di.api.scopes import Scope
class ContainerState:
__slots__ = ("cached_values", "stacks")
def __init__(
self,
cached_values: ScopeMap[DependencyProvider, Any],
stacks: Dict[Scope, Union[AsyncExitStack, ExitStack]],
) -> None:
self.cached_values = cached_values
self.stacks = stacks
@staticmethod
def initialize() -> ContainerState:
return ContainerState(
cached_values=ScopeMap(),
stacks={},
)
def copy(self) -> ContainerState:
return ContainerState(
cached_values=ScopeMap(self.cached_values.copy()),
stacks=self.stacks.copy(),
)
def enter_scope(self, scope: Scope) -> FusedContextManager[None]:
"""Enter a scope and get back a new ContainerState object that you can use to execute dependencies."""
return ScopeContext(self, scope)
class ScopeContext(FusedContextManager[None]):
__slots__ = ("state", "scope", "stack")
stack: Union[AsyncExitStack, ExitStack]
def __init__(self, state: ContainerState, scope: Scope) -> None:
self.state = state
self.scope = scope
def __enter__(self) -> None:
self.state.stacks[self.scope] = self.stack = ExitStack()
self.state.cached_values.add_scope(self.scope)
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Union[None, bool]:
return self.stack.__exit__(exc_type, exc_value, traceback) # type: ignore[union-attr,no-any-return]
async def __aenter__(self) -> None:
self.state.stacks[self.scope] = self.stack = AsyncExitStack()
self.state.cached_values.add_scope(self.scope)
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Union[None, bool]:
return await self.stack.__aexit__(exc_type, exc_value, traceback) # type: ignore[union-attr,no-any-return]
| 33.191781
| 115
| 0.674371
|
4a0df8e6008eeb7517728e4d446bc0cfa35ee25c
| 1,762
|
py
|
Python
|
lib/rucio/db/sqla/__init__.py
|
Pranay144/rucio
|
40bd5e4458342736e5ca37f565f8cb7ae8cb0850
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/__init__.py
|
Pranay144/rucio
|
40bd5e4458342736e5ca37f565f8cb7ae8cb0850
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/__init__.py
|
Pranay144/rucio
|
40bd5e4458342736e5ca37f565f8cb7ae8cb0850
|
[
"Apache-2.0"
] | null | null | null |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
from sqlalchemy.sql.expression import bindparam, text
def filter_thread_work(session, query, total_threads, thread_id, hash_variable=None):
""" Filters a query to partition thread workloads based on the thread id and total number of threads """
if thread_id and total_threads and (total_threads - 1) > 0:
if session.bind.dialect.name == 'oracle':
bindparams = [bindparam('thread_id', thread_id), bindparam('total_threads', total_threads - 1)]
if not hash_variable:
query = query.filter(text('ORA_HASH(id, :total_threads) = :thread_id', bindparams=bindparams))
else:
query = query.filter(text('ORA_HASH(%s, :total_threads) = :thread_id' % (hash_variable), bindparams=bindparams))
elif session.bind.dialect.name == 'mysql':
if not hash_variable:
query = query.filter(text('mod(md5(id), %s) = %s' % (total_threads, thread_id)))
else:
query = query.filter(text('mod(md5(%s), %s) = %s' % (hash_variable, total_threads, thread_id)))
elif session.bind.dialect.name == 'postgresql':
if not hash_variable:
query = query.filter(text('mod(abs((\'x\'||md5(id::text))::bit(32)::int), %s) = %s' % (total_threads, thread_id)))
else:
query = query.filter(text('mod(abs((\'x\'||md5(%s::text))::bit(32)::int), %s) = %s' % (hash_variable, total_threads, thread_id)))
return query
| 58.733333
| 145
| 0.638479
|
4a0df9bd1911568eb3a4b0ac866564fc33ac6483
| 267
|
py
|
Python
|
src/common/video_recorder.py
|
brianmcera/DRL_ObservationalDropout
|
2d6b7c88700b3de455cc2ca70ccde5fc7b52c490
|
[
"MIT"
] | 1
|
2020-03-19T22:01:01.000Z
|
2020-03-19T22:01:01.000Z
|
src/common/video_recorder.py
|
brianmcera/DRL_ObservationalDropout
|
2d6b7c88700b3de455cc2ca70ccde5fc7b52c490
|
[
"MIT"
] | null | null | null |
src/common/video_recorder.py
|
brianmcera/DRL_ObservationalDropout
|
2d6b7c88700b3de455cc2ca70ccde5fc7b52c490
|
[
"MIT"
] | null | null | null |
import cv2
def record_from_RGB_array(img_array, filepath, fps=60, width=256, height=256):
out = cv2.VideoWriter(filepath, cv2.VideoWriter_fourcc('MJPG'), fps, (width, height))
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
| 33.375
| 89
| 0.700375
|
4a0dfa3248ac99941c9e8b376999dcdd14d245ae
| 4,464
|
py
|
Python
|
pyrobud/custom_modules/base.py
|
look416/pyrobud
|
0387021963be4a145d812903db7faf048c7b39c2
|
[
"MIT"
] | null | null | null |
pyrobud/custom_modules/base.py
|
look416/pyrobud
|
0387021963be4a145d812903db7faf048c7b39c2
|
[
"MIT"
] | 15
|
2021-11-02T17:39:21.000Z
|
2022-03-28T20:01:04.000Z
|
pyrobud/custom_modules/base.py
|
look416/pyrobud
|
0387021963be4a145d812903db7faf048c7b39c2
|
[
"MIT"
] | null | null | null |
import asyncio
import io
from pathlib import PurePosixPath
from typing import IO
import dataclasses as dc
import copy
import telethon as tg
from telethon.tl.types import PeerUser, PeerChat, PeerChannel
import re
import decimal
from fxparser import ParserHelper
from .. import command, module, util, mt4
class BaseModule(module.Module):
name = "BaseModel"
zmqHost = "192.168.88.204"
channelId = 1480231253
debugChannel = 1347732970
# tt = 1179400979 1480231253
disabled = True
magicNumber = 80000000
parserHelper = ParserHelper()
allowMedia = False
prefix = ""
suffix = ""
tmpOrder = None
orderVolume = 0.3
_zmq = None
db: util.db.AsyncDB
async def on_load(self) -> None:
self.db = self.bot.get_db(self.name.replace(" ", "").lower())
self._zmq = mt4.DWX_ZeroMQ_Connector(
_host=self.zmqHost, _pulldata_handlers=[self])
async def on_start(self, time_us: int) -> None:
await self.bot.client.send_message(PeerChannel(channel_id=self.debugChannel), f"bot - {self.name} started....")
async def on_message(self, event: tg.events.NewMessage.Event) -> None:
if isinstance(event.message.peer_id, PeerChannel) and event.message.peer_id.channel_id == self.channelId:
self.log.info(f"Received message: {event.message}")
if self.allowMedia or event.message.media == None:
order = self.parseMessage(event.message.message)
await self.bot.client.send_message(PeerChannel(channel_id=self.debugChannel), f"Attempted Order {self.name}:{order}")
if order.symbol and order.type:
await self.order(order)
await self.db.inc("messages_received")
async def on_message_edit(self, event: tg.events.NewMessage.Event) -> None:
if isinstance(event.message.peer_id, PeerChannel) and event.message.peer_id.channel_id == self.channelId:
self.log.info(f"Received edited message: {event.message}")
order = self.parseMessage(event.message.message)
await self.bot.client.send_message(PeerChannel(channel_id=self.debugChannel), f"Attempted Order {self.name}:{order}")
if order.symbol and order.sl:
await self.order(order)
await self.db.inc("messages_received")
def parseMessage(self, message):
return self.parserHelper.parse_text(message)
def onPullData(self, data):
self.log.info(data)
if '_response' in data.keys() and data['_response'] == '130' and self.tmpOrder is not None:
asyncio.run(self.order(self.tmpOrder, True))
async def order(self, order, isMarket=False, halfVol = False):
self.tmpOrder = copy.copy(order)
vol = self.orderVolume if not halfVol else self.orderVolume / 2
# get the base decimal numbers
exponent = 1000 if 'JPY' in order.symbol else 100000
if "GOLD" in order.symbol or "XAU" in order.symbol:
exponent = 100
order.symbol = "XAUUSD"
tp = 0
if len(order.tpList) > 0:
tp = float(order.tpList[0])
if len(order.tpList) > 1:
order.tpList = [order.tpList[1]]
await self.order(order, halfVol = True)
vol = vol/2
if order.price == 0.0:
order.price = order.sl + \
(400 / exponent * (1 if order.type == 1 else -1))
if self.suffix:
order.symbol = f"{order.symbol}.{self.suffix}"
if self.prefix:
order.symbol = f"{self.prefix}.{order.symbol}"
_trade = self._zmq._generate_default_order_dict()
_trade['_type'] = order.type - 1
_trade['_TP'] = int(abs(order.price - tp) *
exponent) if tp > 0 else 500
_trade['_SL'] = int(abs(order.price - order.sl) *
exponent) if order.sl > 0 else 500
_trade['_lots'] = vol
_trade['_symbol'] = order.symbol
_trade['_comment'] = self.name
_trade['_magic'] = self.magicNumber
if not order.market and not isMarket:
_trade["_price"] = order.price
_trade['_type'] = 2 if _trade['_type'] == 0 else 3
self._zmq._DWX_MTX_NEW_TRADE_(_order=_trade)
await self.bot.client.send_message(PeerChannel(channel_id=self.debugChannel), f"{_trade}")
self.log.info(f"Order created: {_trade}")
| 39.157895
| 133
| 0.622312
|
4a0dfb98063d6ae4066c076c37f14fe15e355d66
| 1,792
|
py
|
Python
|
tabnet/download_prepare_covertype.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | 1
|
2020-02-10T12:50:17.000Z
|
2020-02-10T12:50:17.000Z
|
tabnet/download_prepare_covertype.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | 7
|
2021-08-25T16:15:53.000Z
|
2022-02-10T03:26:55.000Z
|
tabnet/download_prepare_covertype.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | 1
|
2021-02-11T06:11:31.000Z
|
2021-02-11T06:11:31.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads and prepares the Forest Covertype dataset."""
import gzip
import os
import shutil
import pandas as pd
from sklearn.model_selection import train_test_split
import wget
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz'
os.mkdir('./data')
filename = wget.download(url)
with gzip.open(filename, 'rb') as f_in:
with open('data/covtype.csv', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
df = pd.read_csv('data/covtype.csv')
n_total = len(df)
# Train, val and test split follows
# Rory Mitchell, Andrey Adinets, Thejaswi Rao, and Eibe Frank.
# Xgboost: Scalable GPU accelerated learning. arXiv:1806.11248, 2018.
train_val_indices, test_indices = train_test_split(
range(n_total), test_size=0.2, random_state=0)
train_indices, val_indices = train_test_split(
train_val_indices, test_size=0.2 / 0.6, random_state=0)
traindf = df.iloc[train_indices]
valdf = df.iloc[val_indices]
testdf = df.iloc[test_indices]
traindf = traindf.sample(frac=1)
traindf.to_csv('data/train.csv', index=False, header=False)
valdf.to_csv('data/val.csv', index=False, header=False)
testdf.to_csv('data/test.csv', index=False, header=False)
| 34.461538
| 89
| 0.760045
|
4a0dfc9c9b6bb4c320d993cbd5d004b0dd026e83
| 444
|
py
|
Python
|
django_react_notes/users/urls.py
|
RommelTJ/django_react_notes
|
670f4d08cadd1b14bd8f2e47002bcf81874dd036
|
[
"MIT"
] | null | null | null |
django_react_notes/users/urls.py
|
RommelTJ/django_react_notes
|
670f4d08cadd1b14bd8f2e47002bcf81874dd036
|
[
"MIT"
] | 2
|
2019-04-06T03:30:03.000Z
|
2019-04-06T23:11:04.000Z
|
django_react_notes/users/urls.py
|
RommelTJ/django_react_notes
|
670f4d08cadd1b14bd8f2e47002bcf81874dd036
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django_react_notes.users.views import (
user_list_view,
user_redirect_view,
user_update_view,
user_detail_view,
)
app_name = "users"
urlpatterns = [
path("", view=user_list_view, name="list"),
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 26.117647
| 66
| 0.70045
|
4a0e020a36979115a77b1f399b81482adf1b2efd
| 1,857
|
py
|
Python
|
tint/friends.py
|
bmuller/tint
|
e74a3e4c46f71dfcb2574920467ad791d29de6fe
|
[
"MIT"
] | 1
|
2015-02-18T18:33:44.000Z
|
2015-02-18T18:33:44.000Z
|
tint/friends.py
|
8468/tint
|
e74a3e4c46f71dfcb2574920467ad791d29de6fe
|
[
"MIT"
] | null | null | null |
tint/friends.py
|
8468/tint
|
e74a3e4c46f71dfcb2574920467ad791d29de6fe
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from twisted.internet import defer
from tint.ssl.keymagic import PublicKey
from tint.log import Logger
from tint.resolution import KeyNotResolvedError
from tint.storage.addressing import Path
Friend = namedtuple('Friend', ['id', 'name', 'key'])
class FriendsList(object):
def __init__(self, storage, keyStore, resolver):
self.storage = storage
self.keyStore = keyStore
self.resolver = resolver
self.log = Logger(system=self)
def addFriendById(self, name, keyId):
"""
Lookup a public key with the given keyId and save if found.
"""
d = self.resolver.getPublicKey(keyId)
return d.addCallback(self._addFriendById, name)
def _addFriendById(self, keyvalue, name):
if keyvalue is None:
raise KeyNotResolvedError("Could not find key for %s" % name)
return self.addFriend(keyvalue, name)
def addFriend(self, publicKey, name):
"""
Add a friend with the given public key.
"""
self.log.debug("Adding key belonging to %s: %s" % (name, publicKey))
pk = PublicKey(publicKey)
path = str(Path(pk.getKeyId()))
self.storage.grantAccess(pk.getKeyId(), path)
self.keyStore.setAuthorizedKey(pk, name)
f = Friend(pk.getKeyId(), name, publicKey)
return defer.succeed(f)
def removeFriend(self, name):
return self.keyStore.removeAuthorizedKey(name)
def getFriends(self):
friends = []
for key in self.keyStore.getAuthorizedKeysList():
friend = Friend(key.getKeyId(), key.name, str(key))
friends.append(friend)
return friends
def __iter__(self):
for friend in self.getFriends():
yield friend
def __len__(self):
return len(self.getFriends())
| 30.95
| 76
| 0.642434
|
4a0e04d3956060fddd5d24f9cc75c8a1c1f14814
| 3,800
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/password_policy/tests/test_forms.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/password_policy/tests/test_forms.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/password_policy/tests/test_forms.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Test password policy forms
"""
from unittest import mock
import pytest
from django.forms import ValidationError
from django.test import TestCase
from django.test.utils import override_settings
from openedx.core.djangoapps.password_policy.compliance import (
NonCompliantPasswordException, NonCompliantPasswordWarning
)
from openedx.core.djangoapps.password_policy.forms import PasswordPolicyAwareAdminAuthForm
from common.djangoapps.student.tests.factories import UserFactory
class PasswordPolicyAwareAdminAuthFormTests(TestCase):
"""
Tests the custom form for enforcing password policy rules
"""
def setUp(self):
super().setUp()
self.auth_form = PasswordPolicyAwareAdminAuthForm()
self.user = UserFactory.create(username='test_user', password='test_password', is_staff=True)
self.auth_form.cleaned_data = {
'username': 'test_user',
'password': 'test_password'
}
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': False})
def test_auth_form_policy_disabled(self):
"""
Verify that the username and password are returned when compliance is disabled
"""
cleaned_data = self.auth_form.clean()
assert cleaned_data.get('username') == 'test_user'
assert cleaned_data.get('password'), 'test_password'
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_auth_form_policy_enabled(self):
"""
Verify that the username and password are returned when compliance is enabled
"""
with mock.patch(
'openedx.core.djangoapps.password_policy.forms.password_policy_compliance.enforce_compliance_on_login'
) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.return_value = True
cleaned_data = self.auth_form.clean()
assert cleaned_data.get('username') == self.user.username
assert cleaned_data.get('password'), self.user.password
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_auth_form_policy_enabled_with_warning(self):
"""
Verify that the username and password are returned when compliance is
enabled despite a NonCompliantPasswordWarning being thrown
"""
# Need to mock messages here as it will fail due to a lack of requests on this unit test
with mock.patch('openedx.core.djangoapps.password_policy.forms.messages') as mock_messages:
mock_messages.return_value = True
with mock.patch(
'openedx.core.djangoapps.password_policy.forms.password_policy_compliance.enforce_compliance_on_login'
) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordWarning('Test warning')
cleaned_data = self.auth_form.clean()
assert cleaned_data.get('username') == self.user.username
assert cleaned_data.get('password')
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_auth_form_policy_enabled_with_exception(self):
"""
Verify that an exception is raised when enforce_compliance_on_login throws a NonCompliantPasswordException
"""
with mock.patch(
'openedx.core.djangoapps.password_policy.forms.password_policy_compliance.enforce_compliance_on_login'
) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordException('Test exception')
pytest.raises(ValidationError, self.auth_form.clean)
| 47.5
| 118
| 0.731842
|
4a0e0500fd3b17fc3702631bd49cacff063ead2f
| 557
|
py
|
Python
|
read text convert json.py
|
bbbirkan/txt_format_data_create_python_dict-
|
71d5cd1b655c634e05a4597f1085d0b3c252ce45
|
[
"MIT"
] | 1
|
2021-01-25T03:01:34.000Z
|
2021-01-25T03:01:34.000Z
|
read text convert json.py
|
bbbirkan/txt_format_data_create_python_dict-
|
71d5cd1b655c634e05a4597f1085d0b3c252ce45
|
[
"MIT"
] | null | null | null |
read text convert json.py
|
bbbirkan/txt_format_data_create_python_dict-
|
71d5cd1b655c634e05a4597f1085d0b3c252ce45
|
[
"MIT"
] | null | null | null |
import json
import re
f = open("source code.txt", "r")
data = {}
for line in f:
try:
line = (line.replace("\"", "").replace("\n\n", "").replace("\n", "").strip().split(":"))
line[0] = re.sub("[^\w]+", "", line[0])
line[1] = re.sub("[^\w]+", " ", line[1])
data[line[0]] = line[1]
except:
pass
data = {x:v[1:-1]for x, v in data.items()}
print(data)
with open("export file.json", 'w', encoding='utf8') as f3:
json.dump(data, f3, ensure_ascii=False, indent=1)
my_dict = json.dumps(data, ensure_ascii=False)
| 27.85
| 96
| 0.536804
|
4a0e060a47c8c2fe932a24689c08eec1e7af2fc0
| 274
|
py
|
Python
|
emailauth/management/commands/cleanupemailauth.py
|
redvasily/django-emailauth
|
1c3c977f361e63eb6e4bd2fa32f6d8af78f74f31
|
[
"BSD-3-Clause"
] | 2
|
2015-01-25T01:47:03.000Z
|
2016-05-09T14:08:50.000Z
|
emailauth/management/commands/cleanupemailauth.py
|
redvasily/django-emailauth
|
1c3c977f361e63eb6e4bd2fa32f6d8af78f74f31
|
[
"BSD-3-Clause"
] | null | null | null |
emailauth/management/commands/cleanupemailauth.py
|
redvasily/django-emailauth
|
1c3c977f361e63eb6e4bd2fa32f6d8af78f74f31
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.management.base import NoArgsCommand
from emailauth.models import UserEmail
class Command(NoArgsCommand):
help = "Delete expired UserEmail objects from the database"
def handle_noargs(self, **options):
UserEmail.objects.delete_expired()
| 24.909091
| 63
| 0.770073
|
4a0e0705f38160dd8f66d968ea384d4a618a7dba
| 3,383
|
py
|
Python
|
sphinx/source/docs/user_guide/examples/extensions_example_latex.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 1
|
2015-01-31T14:42:39.000Z
|
2015-01-31T14:42:39.000Z
|
sphinx/source/docs/user_guide/examples/extensions_example_latex.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 2
|
2021-05-08T11:43:21.000Z
|
2021-05-10T19:16:43.000Z
|
sphinx/source/docs/user_guide/examples/extensions_example_latex.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 1
|
2020-01-21T12:03:58.000Z
|
2020-01-21T12:03:58.000Z
|
""" The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
import numpy as np
from bokeh.models import Label
from bokeh.plotting import figure, show
from bokeh.util.compiler import TypeScript
TS_CODE = """
import * as p from "core/properties"
import {Label, LabelView} from "models/annotations/label"
declare const katex: any
export class LatexLabelView extends LabelView {
model: LatexLabel
render(): void {
//--- Start of copied section from ``Label.render`` implementation
// Here because AngleSpec does units tranform and label doesn't support specs
let angle: number
switch (this.model.angle_units) {
case "rad": {
angle = -this.model.angle
break
}
case "deg": {
angle = (-this.model.angle * Math.PI) / 180.0
break
}
default:
throw new Error("unreachable code")
}
const panel = this.panel != null ? this.panel : this.plot_view.frame
const xscale = this.plot_view.frame.xscales[this.model.x_range_name]
const yscale = this.plot_view.frame.yscales[this.model.y_range_name]
let sx = this.model.x_units == "data" ? xscale.compute(this.model.x) : panel.xview.compute(this.model.x)
let sy = this.model.y_units == "data" ? yscale.compute(this.model.y) : panel.yview.compute(this.model.y)
sx += this.model.x_offset
sy -= this.model.y_offset
//--- End of copied section from ``Label.render`` implementation
// Must render as superpositioned div (not on canvas) so that KaTex
// css can properly style the text
this._css_text(this.plot_view.canvas_view.ctx, "", sx, sy, angle)
// ``katex`` is loaded into the global window at runtime
// katex.renderToString returns a html ``span`` element
katex.render(this.model.text, this.el, {displayMode: true})
}
}
export namespace LatexLabel {
export type Attrs = p.AttrsOf<Props>
export type Props = Label.Props
}
export interface LatexLabel extends LatexLabel.Attrs {}
export class LatexLabel extends Label {
properties: LatexLabel.Props
constructor(attrs?: Partial<LatexLabel.Attrs>) {
super(attrs)
}
static init_LatexLabel() {
this.prototype.default_view = LatexLabelView
}
}
"""
class LatexLabel(Label):
"""A subclass of the Bokeh built-in `Label` that supports rendering
LaTex using the KaTex typesetting library.
Only the render method of LabelView is overloaded to perform the
text -> latex (via katex) conversion. Note: ``render_mode="canvas``
isn't supported and certain DOM manipulation happens in the Label
superclass implementation that requires explicitly setting
`render_mode='css'`).
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.css"]
__implementation__ = TypeScript(TS_CODE)
x = np.arange(0.0, 1.0 + 0.01, 0.01)
y = np.cos(2 * 2 * np.pi * x) + 2
p = figure(title="LaTex Demonstration", plot_width=500, plot_height=500)
p.line(x, y)
# Note: must set ``render_mode="css"``
latex = LatexLabel(text="f = \sum_{n=1}^\infty\\frac{-e^{i\pi}}{2^n}!",
x=40, y=420, x_units='screen', y_units='screen',
render_mode='css', text_font_size='16pt',
background_fill_alpha=0)
p.add_layout(latex)
show(p)
| 30.754545
| 108
| 0.67987
|
4a0e07d8edaf575e1c227948a892907e70574d9c
| 23,688
|
py
|
Python
|
core/migrations/0001_initial.py
|
bruno-zaccariello/sgeheroku
|
c3d1a0292a33ffc3296746838dc8324c1496ff7e
|
[
"Apache-2.0"
] | null | null | null |
core/migrations/0001_initial.py
|
bruno-zaccariello/sgeheroku
|
c3d1a0292a33ffc3296746838dc8324c1496ff7e
|
[
"Apache-2.0"
] | 4
|
2020-02-11T23:12:36.000Z
|
2021-11-15T17:47:44.000Z
|
core/migrations/0001_initial.py
|
bruno-zaccariello/sgeheroku
|
c3d1a0292a33ffc3296746838dc8324c1496ff7e
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.2 on 2018-10-31 16:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Categoriaproduto',
fields=[
('pkid_categoria', models.AutoField(primary_key=True, serialize=False)),
('nomecategoria', models.CharField(max_length=100, unique=True, verbose_name='Nome da Categoria')),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Categoria',
'verbose_name_plural': 'Categorias (Produto)',
'managed': True,
},
),
migrations.CreateModel(
name='Cotacaocompra',
fields=[
('pkid_cotacao', models.AutoField(primary_key=True, serialize=False)),
('fornecedor', models.CharField(blank=True, max_length=100, null=True)),
('dt_cotacao', models.DateTimeField(blank=True, null=True)),
('dt_entrega', models.DateTimeField(blank=True, null=True)),
('formapamento', models.CharField(blank=True, max_length=45, null=True)),
('pedidocompra_pkid_compra', models.IntegerField()),
('statuscotacao_pkid_status', models.IntegerField()),
('dt_cadastro', models.DateTimeField(blank=True, null=True)),
('dt_alteracao', models.DateTimeField(blank=True, null=True)),
('hide', models.BooleanField(default=0)),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='Endereco',
fields=[
('pkid_endereco', models.AutoField(primary_key=True, serialize=False)),
('logradouro', models.CharField(max_length=100, verbose_name='Endereço')),
('endereco_numero', models.CharField(blank=True, max_length=7, null=True, verbose_name='Número')),
('complemento', models.CharField(blank=True, max_length=45, null=True, verbose_name='Complemento')),
('cep', models.CharField(max_length=9, verbose_name='CEP')),
('bairro', models.CharField(max_length=100, verbose_name='Bairro')),
('cidade', models.CharField(max_length=150, verbose_name='Cidade')),
('uf', models.CharField(max_length=2, verbose_name='Estado')),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Endereço (Pessoa : CEP)',
'verbose_name_plural': 'Endereços',
'managed': True,
},
),
migrations.CreateModel(
name='Entrega',
fields=[
('pkid_entrega', models.AutoField(primary_key=True, serialize=False)),
('dataentrega', models.DateTimeField(blank=True, null=True)),
('valorfrete', models.TextField(blank=True, null=True)),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Forma de Entrega',
'verbose_name_plural': 'Formas de Entrega',
'managed': True,
},
),
migrations.CreateModel(
name='Formapagamento',
fields=[
('pkid_formapag', models.AutoField(primary_key=True, serialize=False)),
('formapagamento', models.CharField(max_length=50)),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Forma de Pagamento',
'verbose_name_plural': 'Formas de Pagamento',
'managed': True,
},
),
migrations.CreateModel(
name='Formulamateria',
fields=[
('pkid_formula_materia', models.AutoField(primary_key=True, serialize=False)),
('quantidade', models.FloatField(verbose_name='Quantidade')),
],
options={
'verbose_name': 'Matéria Prima : Fórmula',
'verbose_name_plural': 'Matérias Fórmulas (Relação)',
},
),
migrations.CreateModel(
name='Formulaproduto',
fields=[
('pkid_formula', models.AutoField(primary_key=True, serialize=False)),
('tempomaturacao', models.TimeField(verbose_name='Tempo de Maturação')),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Fórmula Produto',
'verbose_name_plural': 'Fórmulas (Produto)',
'managed': True,
},
),
migrations.CreateModel(
name='Itemcompra',
fields=[
('pkid_item', models.AutoField(primary_key=True, serialize=False)),
('produto', models.CharField(blank=True, max_length=45, null=True)),
('descricaoproduto', models.CharField(blank=True, max_length=45, null=True)),
('quantidade', models.IntegerField(blank=True, null=True)),
('precounitario', models.TextField(blank=True, null=True)),
('totalvenda', models.TextField(blank=True, null=True)),
('dt_cadastro', models.DateTimeField(blank=True, null=True)),
('dt_alteracao', models.DateTimeField(blank=True, null=True)),
('hide', models.BooleanField(default=0)),
('pedidocompra_pkid_compra', models.IntegerField()),
('statuscompra_pkid_status', models.IntegerField()),
('unidademedidacompra_pkid_unidademedida', models.IntegerField()),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='Itemcotacao',
fields=[
('pkid_item', models.AutoField(primary_key=True, serialize=False)),
('produto', models.CharField(blank=True, max_length=45, null=True)),
('descricaoproduto', models.CharField(blank=True, max_length=100, null=True)),
('quantidade', models.IntegerField(blank=True, null=True)),
('precounitario', models.TextField(blank=True, null=True)),
('totalvenda', models.TextField(blank=True, null=True)),
('dt_cadastro', models.DateTimeField(blank=True, null=True)),
('dt_alteracao', models.DateTimeField(blank=True, null=True)),
('hide', models.BooleanField(default=0)),
('cotacaocompra_pkid_cotacao', models.IntegerField()),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='Itemvenda',
fields=[
('pkid_itemvenda', models.AutoField(primary_key=True, serialize=False)),
('quantidade', models.IntegerField(blank=True, null=True)),
('vl_total', models.DecimalField(decimal_places=2, max_digits=10)),
('vl_unitario', models.DecimalField(decimal_places=2, max_digits=10)),
('hide', models.BooleanField(default=0)),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='Materiaprima',
fields=[
('pkid_materiaprima', models.AutoField(primary_key=True, serialize=False)),
('materiaprima', models.CharField(max_length=60, verbose_name='Matéria Prima')),
('marca', models.CharField(blank=True, max_length=50, null=True, verbose_name='Marca')),
('totalestoque', models.IntegerField(verbose_name='Qtd. em Estoque')),
('hide', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Matéria Prima',
'verbose_name_plural': 'Matérias Primas',
'managed': True,
},
),
migrations.CreateModel(
name='Movimentacao',
fields=[
('pkid_movimentacao', models.AutoField(primary_key=True, serialize=False)),
('fkid_produto', models.IntegerField()),
('tipomovimentacao', models.TextField()),
('numentradas', models.IntegerField()),
('numsaidas', models.IntegerField()),
('dt_cadastro', models.DateTimeField()),
('dt_alteracao', models.DateTimeField()),
('hide', models.BooleanField(default=0)),
('fkid_linhavenda1', models.IntegerField(blank=True, null=True)),
('fkid_venda', models.IntegerField(blank=True, null=True)),
('fkid_pedidofabri', models.IntegerField(blank=True, null=True)),
('fkid_estoque', models.IntegerField()),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='Pedidocompra',
fields=[
('pkid_compra', models.AutoField(primary_key=True, serialize=False)),
('fornecedor', models.CharField(blank=True, max_length=100, null=True)),
('dt_pedido', models.DateTimeField(blank=True, null=True)),
('dt_compra', models.DateTimeField(blank=True, null=True)),
('dt_pagamento', models.DateTimeField(blank=True, null=True)),
('dt_recebimento', models.DateTimeField(blank=True, null=True)),
('cotacaocompra_pkid_cotacao', models.IntegerField()),
('dt_cadastro', models.DateTimeField(blank=True, null=True)),
('dt_alteracao', models.DateTimeField(blank=True, null=True)),
('hide', models.BooleanField(default=0)),
('statuscompra_pkid_status', models.IntegerField()),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='Pedidofabricacao',
fields=[
('pkid_pedidofabricacao', models.AutoField(primary_key=True, serialize=False)),
('lote', models.CharField(blank=True, max_length=8, null=True, unique=True)),
('quantidade', models.IntegerField(verbose_name='Quantidade')),
('dt_fim_maturacao', models.DateTimeField()),
('hide', models.BooleanField(default=0)),
('fkid_formula', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Formulaproduto')),
],
options={
'verbose_name': 'Pedido de Fabricação',
'verbose_name_plural': 'Pedidos de Fabricação',
},
),
migrations.CreateModel(
name='Pedidovenda',
fields=[
('pkid_venda', models.AutoField(primary_key=True, serialize=False)),
('dt_pedido', models.DateTimeField(blank=True, null=True)),
('dt_pagamento', models.DateTimeField(blank=True, null=True)),
('dt_preventrega', models.DateTimeField(blank=True, null=True)),
('pago', models.BooleanField(default=0)),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Pedido de Venda',
'verbose_name_plural': 'Pedidos de Venda',
'managed': True,
},
),
migrations.CreateModel(
name='Pessoa',
fields=[
('pkid_pessoa', models.AutoField(primary_key=True, serialize=False)),
('nomecompleto_razaosocial', models.CharField(max_length=100, verbose_name='Nome / Razão Social')),
('apelido_nomefantasia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Apelido / Nome Fantasia')),
('email', models.CharField(blank=True, max_length=100, null=True, unique=True, verbose_name='E-mail')),
('cpf_cnpj', models.CharField(blank=True, max_length=19, null=True, unique=True, verbose_name='CPF / CNPJ')),
('rg_ie', models.CharField(blank=True, max_length=50, null=True, verbose_name='RG / IE')),
('genero', models.CharField(max_length=1, verbose_name='Gênero')),
('dt_nascimento', models.DateField(blank=True, null=True, verbose_name='Data de Nascimento')),
('st_pessoajuridica', models.BooleanField(default=0, max_length=1, verbose_name='Pessoa Jurídica')),
('tipopessoa', models.CharField(max_length=15, verbose_name='Tipo da Pessoa')),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Pessoa',
'verbose_name_plural': 'Pessoas',
'managed': True,
},
),
migrations.CreateModel(
name='Produto',
fields=[
('pkid_produto', models.AutoField(primary_key=True, serialize=False)),
('codproduto', models.CharField(max_length=8, unique=True, verbose_name='Código')),
('nomeproduto', models.CharField(max_length=50, verbose_name='Nome do Produto')),
('preco', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Preço')),
('precocusto', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Preço de Custo')),
('totalestoque', models.IntegerField(default=0, verbose_name='Qtd. em Estoque')),
('descricao', models.CharField(blank=True, max_length=300, null=True, verbose_name='Descrição')),
('sabor', models.CharField(blank=True, max_length=45, null=True, verbose_name='Sabor')),
('marca', models.CharField(blank=True, max_length=50, null=True, verbose_name='Marca')),
('altura', models.IntegerField(verbose_name='Altura')),
('largura', models.IntegerField(verbose_name='Largura')),
('profundidade', models.IntegerField(verbose_name='Profundidade')),
('peso', models.DecimalField(decimal_places=3, max_digits=10, verbose_name='Peso')),
('fotoproduto', models.FileField(blank=True, max_length=1000, null=True, upload_to='uploads/%Y/%m', verbose_name='Foto do Produto')),
('vendivel', models.BooleanField(default=1)),
('hide', models.BooleanField(default=0)),
('fkid_categoria', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Categoriaproduto', verbose_name='Categoria')),
],
options={
'verbose_name': 'Produto',
'verbose_name_plural': 'Produtos',
'managed': True,
},
),
migrations.CreateModel(
name='Statuscompra',
fields=[
('pkid_status', models.AutoField(primary_key=True, serialize=False)),
('descricaostatus', models.CharField(blank=True, max_length=45, null=True)),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='Statusfabricacao',
fields=[
('pkid_status', models.AutoField(primary_key=True, serialize=False)),
('order', models.IntegerField(verbose_name='Ordem')),
('status', models.CharField(blank=True, max_length=45, null=True, verbose_name='Estado')),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Status de Fabricação',
'verbose_name_plural': 'Status de Fabricação',
'managed': True,
},
),
migrations.CreateModel(
name='Statusvenda',
fields=[
('pkid_status', models.AutoField(primary_key=True, serialize=False)),
('descricao', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'verbose_name': 'Status de Venda',
'verbose_name_plural': 'Status de Venda',
'managed': True,
},
),
migrations.CreateModel(
name='Telefone',
fields=[
('pkid_telefone', models.AutoField(primary_key=True, serialize=False)),
('ddi', models.CharField(blank=True, default=55, max_length=3, null=True, verbose_name='DDI')),
('ddd', models.CharField(blank=True, default=11, max_length=2, null=True, verbose_name='DDD')),
('numero', models.CharField(max_length=15, verbose_name='Número')),
('hide', models.BooleanField(default=0)),
('fkid_pessoa', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Pessoa', verbose_name='Pessoa')),
],
options={
'verbose_name': 'Telefone',
'verbose_name_plural': 'Telefones',
'managed': True,
},
),
migrations.CreateModel(
name='TipoEntrega',
fields=[
('pkid_tipoentrega', models.AutoField(primary_key=True, serialize=False)),
('descricao', models.CharField(max_length=30, verbose_name='TipoEntrega')),
],
),
migrations.CreateModel(
name='Unidademedida',
fields=[
('pkid_unidademedida', models.AutoField(primary_key=True, serialize=False)),
('unidademedida', models.CharField(max_length=50, verbose_name='Unidade')),
('hide', models.BooleanField(default=0)),
],
options={
'verbose_name': 'Unidade de Medida',
'verbose_name_plural': 'Unidades de Medida',
'managed': True,
},
),
migrations.CreateModel(
name='Usuario',
fields=[
('pkid_usuario', models.AutoField(primary_key=True, serialize=False)),
('nomecompleto', models.CharField(max_length=45)),
('login', models.CharField(max_length=45, unique=True)),
('senha', models.CharField(max_length=45)),
('dt_importacao', models.DateTimeField()),
('dt_alteracao', models.DateTimeField()),
('hide', models.BooleanField(default=0)),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='Usuarioalteracao',
fields=[
('pkid_usuario_alteracao', models.AutoField(primary_key=True, serialize=False)),
('dt_alteracao', models.DateTimeField()),
('tipo_alteracao', models.TextField()),
],
options={
'managed': True,
},
),
migrations.AddField(
model_name='produto',
name='fkid_unidademedida',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Unidademedida', verbose_name='Unidade de Medida'),
),
migrations.AddField(
model_name='pedidovenda',
name='fkid_cliente',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Pessoa'),
),
migrations.AddField(
model_name='pedidovenda',
name='fkid_formapag',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.Formapagamento'),
),
migrations.AddField(
model_name='pedidovenda',
name='fkid_status',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='core.Statusvenda'),
),
migrations.AddField(
model_name='pedidovenda',
name='fkid_usuario',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='pedidofabricacao',
name='fkid_statusfabricacao',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Statusfabricacao'),
),
migrations.AddField(
model_name='materiaprima',
name='unidade',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Unidademedida', verbose_name='Unidade'),
),
migrations.AddField(
model_name='itemvenda',
name='fkid_pedidovenda',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Pedidovenda'),
),
migrations.AddField(
model_name='itemvenda',
name='fkid_produto',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Produto'),
),
migrations.AlterUniqueTogether(
name='itemcotacao',
unique_together={('pkid_item', 'cotacaocompra_pkid_cotacao')},
),
migrations.AlterUniqueTogether(
name='itemcompra',
unique_together={('pkid_item', 'pedidocompra_pkid_compra')},
),
migrations.AddField(
model_name='formulaproduto',
name='fkid_produto',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='core.Produto', verbose_name='Produto'),
),
migrations.AddField(
model_name='formulamateria',
name='fkid_formulaproduto',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Formulaproduto', verbose_name='Fórmula'),
),
migrations.AddField(
model_name='formulamateria',
name='fkid_materiaprima',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Materiaprima', verbose_name='Matéria Prima'),
),
migrations.AddField(
model_name='formulamateria',
name='unidade',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Unidademedida', verbose_name='Unidade'),
),
migrations.AddField(
model_name='entrega',
name='fkid_tipoentrega',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.TipoEntrega'),
),
migrations.AddField(
model_name='entrega',
name='fkid_venda',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Pedidovenda'),
),
migrations.AddField(
model_name='endereco',
name='fkid_pessoa',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Pessoa', verbose_name='Pessoa'),
),
migrations.AlterUniqueTogether(
name='itemvenda',
unique_together={('pkid_itemvenda', 'fkid_pedidovenda')},
),
]
| 48.048682
| 153
| 0.559313
|
4a0e07ff7440c2667500a1a36c7e3873c0037c6e
| 16,701
|
py
|
Python
|
network.py
|
Anat37/Parser-v1
|
d40ef549f589af95aed2b86740bbfbb6ce7aa68c
|
[
"Apache-2.0"
] | null | null | null |
network.py
|
Anat37/Parser-v1
|
d40ef549f589af95aed2b86740bbfbb6ce7aa68c
|
[
"Apache-2.0"
] | null | null | null |
network.py
|
Anat37/Parser-v1
|
d40ef549f589af95aed2b86740bbfbb6ce7aa68c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import pickle as pkl
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from lib import models
from lib import optimizers
from lib import rnn_cells
from configurable import Configurable
from vocab import Vocab
from dataset import Dataset
#***************************************************************
class Network(Configurable):
""""""
#=============================================================
def __init__(self, model, *args, **kwargs):
""""""
if args:
if len(args) > 1:
raise TypeError('Parser takes at most one argument')
kwargs['name'] = kwargs.pop('name', model.__name__)
super(Network, self).__init__(*args, **kwargs)
if not os.path.isdir(self.save_dir):
os.mkdir(self.save_dir)
with open(os.path.join(self.save_dir, 'config.cfg'), 'w') as f:
self._config.write(f)
self._global_step = tf.Variable(0., trainable=False)
self._global_epoch = tf.Variable(0., trainable=False)
self._model = model(self._config, global_step=self.global_step)
self._vocabs = []
vocab_files = [(self.word_file, 1, 'Words'),
(self.tag_file, [3, 4], 'Tags'),
(self.rel_file, 7, 'Rels')]
for i, (vocab_file, index, name) in enumerate(vocab_files):
vocab = Vocab(vocab_file, index, self._config,
name=name,
cased=self.cased if not i else True,
use_pretrained=(not i),
global_step=self.global_step)
self._vocabs.append(vocab)
self._trainset = Dataset(self.train_file, self._vocabs, model, self._config, name='Trainset')
self._validset = Dataset(self.valid_file, self._vocabs, model, self._config, name='Validset')
self._testset = Dataset(self.test_file, self._vocabs, model, self._config, name='Testset')
self._ops = self._gen_ops()
self._save_vars = filter(lambda x: u'Pretrained' not in x.name, tf.all_variables())
self.history = {
'train_loss': [],
'train_accuracy': [],
'valid_loss': [],
'valid_accuracy': [],
'test_acuracy': 0
}
return
#=============================================================
def train_minibatches(self):
""""""
return self._trainset.get_minibatches(self.train_batch_size,
self.model.input_idxs,
self.model.target_idxs)
#=============================================================
def valid_minibatches(self):
""""""
return self._validset.get_minibatches(self.test_batch_size,
self.model.input_idxs,
self.model.target_idxs,
shuffle=False)
#=============================================================
def test_minibatches(self):
""""""
return self._testset.get_minibatches(self.test_batch_size,
self.model.input_idxs,
self.model.target_idxs,
shuffle=False)
#=============================================================
# assumes the sess has already been initialized
def train(self, sess):
""""""
save_path = os.path.join(self.save_dir, self.name.lower() + '-pretrained')
saver = tf.train.Saver(self.save_vars, max_to_keep=1)
n_bkts = self.n_bkts
train_iters = self.train_iters
print_every = self.print_every
validate_every = self.validate_every
save_every = self.save_every
try:
train_time = 0
train_loss = 0
n_train_sents = 0
n_train_correct = 0
n_train_tokens = 0
n_train_iters = 0
total_train_iters = sess.run(self.global_step)
valid_time = 0
valid_loss = 0
valid_accuracy = 0
while total_train_iters < train_iters:
for j, (feed_dict, _) in enumerate(self.train_minibatches()):
train_inputs = feed_dict[self._trainset.inputs]
train_targets = feed_dict[self._trainset.targets]
start_time = time.time()
_, loss, n_correct, n_tokens = sess.run(self.ops['train_op'], feed_dict=feed_dict)
train_time += time.time() - start_time
train_loss += loss
n_train_sents += len(train_targets)
n_train_correct += n_correct
n_train_tokens += n_tokens
n_train_iters += 1
total_train_iters += 1
self.history['train_loss'].append(loss)
self.history['train_accuracy'].append(100 * n_correct / n_tokens)
if total_train_iters == 1 or total_train_iters % validate_every == 0:
valid_time = 0
valid_loss = 0
n_valid_sents = 0
n_valid_correct = 0
n_valid_tokens = 0
with open(os.path.join(self.save_dir, 'sanitycheck.txt'), 'w') as f:
for k, (feed_dict, _) in enumerate(self.valid_minibatches()):
inputs = feed_dict[self._validset.inputs]
targets = feed_dict[self._validset.targets]
start_time = time.time()
loss, n_correct, n_tokens, predictions = sess.run(self.ops['valid_op'], feed_dict=feed_dict)
valid_time += time.time() - start_time
valid_loss += loss
n_valid_sents += len(targets)
n_valid_correct += n_correct
n_valid_tokens += n_tokens
self.model.sanity_check(inputs, targets, predictions, self._vocabs, f, feed_dict=feed_dict)
valid_loss /= k+1
valid_accuracy = 100 * n_valid_correct / n_valid_tokens
valid_time = n_valid_sents / valid_time
self.history['valid_loss'].append(valid_loss)
self.history['valid_accuracy'].append(valid_accuracy)
if print_every and total_train_iters % print_every == 0:
train_loss /= n_train_iters
train_accuracy = 100 * n_train_correct / n_train_tokens
train_time = n_train_sents / train_time
print('%6d) Train loss: %.4f Train acc: %5.2f%% Train rate: %6.1f sents/sec\n\tValid loss: %.4f Valid acc: %5.2f%% Valid rate: %6.1f sents/sec' % (total_train_iters, train_loss, train_accuracy, train_time, valid_loss, valid_accuracy, valid_time))
train_time = 0
train_loss = 0
n_train_sents = 0
n_train_correct = 0
n_train_tokens = 0
n_train_iters = 0
sess.run(self._global_epoch.assign_add(1.))
if save_every and (total_train_iters % save_every == 0):
saver.save(sess, os.path.join(self.save_dir, self.name.lower() + '-trained'),
latest_filename=self.name.lower(),
global_step=self.global_epoch,
write_meta_graph=False)
with open(os.path.join(self.save_dir, 'history.pkl'), 'w') as f:
pkl.dump(self.history, f)
self.test(sess, validate=True)
except KeyboardInterrupt:
try:
raw_input('\nPress <Enter> to save or <Ctrl-C> to exit.')
except:
print('\r', end='')
sys.exit(0)
saver.save(sess, os.path.join(self.save_dir, self.name.lower() + '-trained'),
latest_filename=self.name.lower(),
global_step=self.global_epoch,
write_meta_graph=False)
with open(os.path.join(self.save_dir, 'history.pkl'), 'w') as f:
pkl.dump(self.history, f)
with open(os.path.join(self.save_dir, 'scores.txt'), 'w') as f:
pass
self.test(sess, validate=True)
return
#=============================================================
# TODO make this work if lines_per_buff isn't set to 0
def test(self, sess, validate=False):
""""""
if validate:
filename = self.valid_file
minibatches = self.valid_minibatches
dataset = self._validset
op = self.ops['test_op'][0]
else:
filename = self.test_file
minibatches = self.test_minibatches
dataset = self._testset
op = self.ops['test_op'][1]
all_predictions = [[]]
all_sents = [[]]
bkt_idx = 0
for (feed_dict, sents) in minibatches():
mb_inputs = feed_dict[dataset.inputs]
mb_targets = feed_dict[dataset.targets]
mb_probs = sess.run(op, feed_dict=feed_dict)
all_predictions[-1].extend(self.model.validate(mb_inputs, mb_targets, mb_probs))
all_sents[-1].extend(sents)
if len(all_predictions[-1]) == len(dataset[bkt_idx]):
bkt_idx += 1
if bkt_idx < len(dataset._metabucket):
all_predictions.append([])
all_sents.append([])
with open(os.path.join(self.save_dir, os.path.basename(filename)), 'w') as f:
for bkt_idx, idx in dataset._metabucket.data:
data = dataset._metabucket[bkt_idx].data[idx][1:]
preds = all_predictions[bkt_idx][idx]
words = all_sents[bkt_idx][idx]
for i, (datum, word, pred) in enumerate(zip(data, words, preds)):
tup = (
i+1,
word,
self.tags[pred[3]] if pred[3] != -1 else self.tags[datum[2]],
self.tags[pred[4]] if pred[4] != -1 else self.tags[datum[3]],
str(pred[5]) if pred[5] != -1 else str(datum[4]),
self.rels[pred[6]] if pred[6] != -1 else self.rels[datum[5]],
str(pred[7]) if pred[7] != -1 else '_',
self.rels[pred[8]] if pred[8] != -1 else '_',
)
f.write('%s\t%s\t_\t%s\t%s\t_\t%s\t%s\t%s\t%s\n' % tup)
f.write('\n')
with open(os.path.join(self.save_dir, 'scores.txt'), 'a') as f:
s, _ = self.model.evaluate(os.path.join(self.save_dir, os.path.basename(filename)), punct=self.model.PUNCT)
f.write(s)
return
#=============================================================
def savefigs(self, sess, optimizer=False):
""""""
import gc
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
matdir = os.path.join(self.save_dir, 'matrices')
if not os.path.isdir(matdir):
os.mkdir(matdir)
for var in self.save_vars:
if optimizer or ('Optimizer' not in var.name):
print(var.name)
mat = sess.run(var)
if len(mat.shape) == 1:
mat = mat[None,:]
plt.figure()
try:
plt.pcolor(mat, cmap='RdBu')
plt.gca().invert_yaxis()
plt.colorbar()
plt.clim(vmin=-1, vmax=1)
plt.title(var.name)
plt.savefig(os.path.join(matdir, var.name.replace('/', '-')))
except ValueError:
pass
plt.close()
del mat
gc.collect()
#=============================================================
def _gen_ops(self):
""""""
optimizer = optimizers.RadamOptimizer(self._config, global_step=self.global_step)
train_output = self._model(self._trainset)
train_op = optimizer.minimize(train_output['loss'])
# These have to happen after optimizer.minimize is called
valid_output = self._model(self._validset, moving_params=optimizer)
test_output = self._model(self._testset, moving_params=optimizer)
ops = {}
ops['train_op'] = [train_op,
train_output['loss'],
train_output['n_correct'],
train_output['n_tokens']]
ops['valid_op'] = [valid_output['loss'],
valid_output['n_correct'],
valid_output['n_tokens'],
valid_output['predictions']]
ops['test_op'] = [valid_output['probabilities'],
test_output['probabilities']]
ops['optimizer'] = optimizer
return ops
#=============================================================
@property
def global_step(self):
return self._global_step
@property
def global_epoch(self):
return self._global_epoch
@property
def model(self):
return self._model
@property
def words(self):
return self._vocabs[0]
@property
def tags(self):
return self._vocabs[1]
@property
def rels(self):
return self._vocabs[2]
@property
def ops(self):
return self._ops
@property
def save_vars(self):
return self._save_vars
#***************************************************************
if __name__ == '__main__':
""""""
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('--test', action='store_true')
argparser.add_argument('--load', action='store_true')
argparser.add_argument('--model', default='Parser')
argparser.add_argument('--matrix', action='store_true')
args, extra_args = argparser.parse_known_args()
cargs = {k: v for (k, v) in vars(Configurable.argparser.parse_args(extra_args)).iteritems() if v is not None}
print('*** '+args.model+' ***')
model = getattr(models, args.model)
if 'save_dir' in cargs and os.path.isdir(cargs['save_dir']) and not (args.test or args.matrix or args.load):
raw_input('Save directory already exists. Press <Enter> to overwrite or <Ctrl-C> to exit.')
if (args.test or args.load or args.matrix) and 'save_dir' in cargs:
cargs['config_file'] = os.path.join(cargs['save_dir'], 'config.cfg')
network = Network(model, **cargs)
os.system('echo Model: %s > %s/MODEL' % (network.model.__class__.__name__, network.save_dir))
#print([v.name for v in network.save_vars])
config_proto = tf.ConfigProto()
config_proto.gpu_options.per_process_gpu_memory_fraction = network.per_process_gpu_memory_fraction
with tf.Session(config=config_proto) as sess:
sess.run(tf.initialize_all_variables())
if not (args.test or args.matrix):
if args.load:
os.system('echo Training: > %s/HEAD' % network.save_dir)
os.system('git rev-parse HEAD >> %s/HEAD' % network.save_dir)
saver = tf.train.Saver(var_list=network.save_vars)
saver.restore(sess, tf.train.latest_checkpoint(network.save_dir, latest_filename=network.name.lower()))
if os.path.isfile(os.path.join(network.save_dir, 'history.pkl')):
with open(os.path.join(network.save_dir, 'history.pkl')) as f:
network.history = pkl.load(f)
else:
os.system('echo Loading: >> %s/HEAD' % network.save_dir)
os.system('git rev-parse HEAD >> %s/HEAD' % network.save_dir)
network.train(sess)
elif args.matrix:
saver = tf.train.Saver(var_list=network.save_vars)
saver.restore(sess, tf.train.latest_checkpoint(network.save_dir, latest_filename=network.name.lower()))
# TODO make this save pcolor plots of all matrices to a directory in save_dir
#with tf.variable_scope('RNN0/BiRNN_FW/LSTMCell/Linear', reuse=True):
# pkl.dump(sess.run(tf.get_variable('Weights')), open('mat0.pkl', 'w'))
#with tf.variable_scope('RNN1/BiRNN_FW/LSTMCell/Linear', reuse=True):
# pkl.dump(sess.run(tf.get_variable('Weights')), open('mat1.pkl', 'w'))
#with tf.variable_scope('RNN2/BiRNN_FW/LSTMCell/Linear', reuse=True):
# pkl.dump(sess.run(tf.get_variable('Weights')), open('mat2.pkl', 'w'))
#with tf.variable_scope('MLP/Linear', reuse=True):
# pkl.dump(sess.run(tf.get_variable('Weights')), open('mat3.pkl', 'w'))
network.savefigs(sess)
else:
os.system('echo Testing: >> %s/HEAD' % network.save_dir)
os.system('git rev-parse HEAD >> %s/HEAD' % network.save_dir)
saver = tf.train.Saver(var_list=network.save_vars)
saver.restore(sess, tf.train.latest_checkpoint(network.save_dir, latest_filename=network.name.lower()))
network.test(sess, validate=True)
start_time = time.time()
network.test(sess, validate=False)
print('Parsing took %f seconds' % (time.time() - start_time))
| 40.243373
| 270
| 0.591222
|
4a0e0892d57a322211c53bbceef0c75d3b690ce5
| 3,477
|
py
|
Python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2021_09_01/models/__init__.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2021_09_01/models/__init__.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2021_09_01/models/__init__.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import ActionDetail
from ._models_py3 import ActionGroupList
from ._models_py3 import ActionGroupPatchBody
from ._models_py3 import ActionGroupResource
from ._models_py3 import ArmRoleReceiver
from ._models_py3 import AutomationRunbookReceiver
from ._models_py3 import AzureAppPushReceiver
from ._models_py3 import AzureFunctionReceiver
from ._models_py3 import AzureResource
from ._models_py3 import Context
from ._models_py3 import EmailReceiver
from ._models_py3 import EnableRequest
from ._models_py3 import ErrorResponse
from ._models_py3 import EventHubReceiver
from ._models_py3 import ItsmReceiver
from ._models_py3 import LogicAppReceiver
from ._models_py3 import NotificationRequestBody
from ._models_py3 import SmsReceiver
from ._models_py3 import TestNotificationDetailsResponse
from ._models_py3 import TestNotificationResponse
from ._models_py3 import VoiceReceiver
from ._models_py3 import WebhookReceiver
except (SyntaxError, ImportError):
from ._models import ActionDetail # type: ignore
from ._models import ActionGroupList # type: ignore
from ._models import ActionGroupPatchBody # type: ignore
from ._models import ActionGroupResource # type: ignore
from ._models import ArmRoleReceiver # type: ignore
from ._models import AutomationRunbookReceiver # type: ignore
from ._models import AzureAppPushReceiver # type: ignore
from ._models import AzureFunctionReceiver # type: ignore
from ._models import AzureResource # type: ignore
from ._models import Context # type: ignore
from ._models import EmailReceiver # type: ignore
from ._models import EnableRequest # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import EventHubReceiver # type: ignore
from ._models import ItsmReceiver # type: ignore
from ._models import LogicAppReceiver # type: ignore
from ._models import NotificationRequestBody # type: ignore
from ._models import SmsReceiver # type: ignore
from ._models import TestNotificationDetailsResponse # type: ignore
from ._models import TestNotificationResponse # type: ignore
from ._models import VoiceReceiver # type: ignore
from ._models import WebhookReceiver # type: ignore
from ._monitor_management_client_enums import (
ReceiverStatus,
)
__all__ = [
'ActionDetail',
'ActionGroupList',
'ActionGroupPatchBody',
'ActionGroupResource',
'ArmRoleReceiver',
'AutomationRunbookReceiver',
'AzureAppPushReceiver',
'AzureFunctionReceiver',
'AzureResource',
'Context',
'EmailReceiver',
'EnableRequest',
'ErrorResponse',
'EventHubReceiver',
'ItsmReceiver',
'LogicAppReceiver',
'NotificationRequestBody',
'SmsReceiver',
'TestNotificationDetailsResponse',
'TestNotificationResponse',
'VoiceReceiver',
'WebhookReceiver',
'ReceiverStatus',
]
| 40.905882
| 94
| 0.722174
|
4a0e092a5adeea0194770bba6d783b1704a855ab
| 3,375
|
py
|
Python
|
homeassistant/components/weather/zamg.py
|
adolfoeliazat/voidhomecontrol
|
6d733253811c553912e46e24debec818b28b0688
|
[
"Apache-2.0"
] | 2
|
2017-02-25T00:27:06.000Z
|
2017-02-25T03:09:30.000Z
|
homeassistant/components/weather/zamg.py
|
adolfoeliazat/voidhomecontrol
|
6d733253811c553912e46e24debec818b28b0688
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/weather/zamg.py
|
adolfoeliazat/voidhomecontrol
|
6d733253811c553912e46e24debec818b28b0688
|
[
"Apache-2.0"
] | 2
|
2018-06-03T11:14:44.000Z
|
2018-11-04T18:18:12.000Z
|
"""
Sensor for data from Austrian "Zentralanstalt für Meteorologie und Geodynamik".
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/weather.zamg/
"""
import logging
import voluptuous as vol
from homeassistant.components.weather import (
WeatherEntity, ATTR_WEATHER_HUMIDITY, ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE, ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED, PLATFORM_SCHEMA)
from homeassistant.const import \
CONF_NAME, TEMP_CELSIUS, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.helpers import config_validation as cv
# Reuse data and API logic from the sensor implementation
from homeassistant.components.sensor.zamg import (
ATTRIBUTION, closest_station, CONF_STATION_ID, zamg_stations, ZamgData)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION_ID): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the ZAMG sensor platform."""
station_id = config.get(CONF_STATION_ID) or closest_station(
config.get(CONF_LATITUDE),
config.get(CONF_LONGITUDE),
hass.config.config_dir)
if station_id not in zamg_stations(hass.config.config_dir):
_LOGGER.error("Configured ZAMG %s (%s) is not a known station",
CONF_STATION_ID, station_id)
return False
probe = ZamgData(station_id=station_id, logger=_LOGGER)
try:
probe.update()
except ValueError as err:
_LOGGER.error("Received error from ZAMG: %s", err)
return False
add_devices([ZamgWeather(probe, config.get(CONF_NAME))], True)
class ZamgWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, zamg_data, stationname=None):
"""Initialise the platform with a data instance and station name."""
self.zamg_data = zamg_data
self.stationname = stationname
def update(self):
"""Update current conditions."""
self.zamg_data.update()
@property
def name(self):
"""Return the name of the sensor."""
return self.stationname or 'ZAMG {}'.format(
self.zamg_data.data.get('Name') or '(unknown station)')
@property
def condition(self):
"""Return the current condition."""
return None
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def temperature(self):
"""Return the platform temperature."""
return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self.zamg_data.get_data(ATTR_WEATHER_PRESSURE)
@property
def humidity(self):
"""Return the humidity."""
return self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY)
@property
def wind_speed(self):
"""Return the wind speed."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING)
| 31.25
| 79
| 0.694519
|
4a0e0a289f888bbc91a59dd4534ff8164b1952d4
| 3,747
|
py
|
Python
|
SemanticSearch/settings.py
|
tkhang1999/Solr-SBERT-semantic-search
|
f805cd121b367f47706a024d67f456290bdfe346
|
[
"MIT"
] | 3
|
2021-12-07T09:49:11.000Z
|
2021-12-22T03:42:18.000Z
|
SemanticSearch/settings.py
|
tkhang1999/Solr-SBERT-semantic-search
|
f805cd121b367f47706a024d67f456290bdfe346
|
[
"MIT"
] | 1
|
2022-01-15T00:59:50.000Z
|
2022-01-15T00:59:50.000Z
|
SemanticSearch/settings.py
|
tkhang1999/Solr-SBERT-semantic-search
|
f805cd121b367f47706a024d67f456290bdfe346
|
[
"MIT"
] | null | null | null |
"""
Django settings for SemanticSearch project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-wq%8_uze9qr(rx00vgg5q1x^5@qyz^s!n0c3=g3-i+8x^64_*x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*', '0.0.0.0', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# my apps
'search.apps.SearchConfig',
]
MIDDLEWARE = [
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SemanticSearch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join((BASE_DIR), 'templates/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SemanticSearch.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.normpath(os.path.join(BASE_DIR, 'staticfiles'))
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 27.152174
| 91
| 0.707766
|
4a0e0a2c5da0bafd63cdf7ccf93b4191e6d673bf
| 9,265
|
py
|
Python
|
lektor/build_programs.py
|
yagebu/lektor
|
a31d8e57a5b2f4b090072527269c26a65202c736
|
[
"BSD-3-Clause"
] | 4,104
|
2015-11-16T18:37:51.000Z
|
2022-03-28T14:28:24.000Z
|
lektor/build_programs.py
|
yagebu/lektor
|
a31d8e57a5b2f4b090072527269c26a65202c736
|
[
"BSD-3-Clause"
] | 854
|
2015-12-05T12:19:02.000Z
|
2022-03-31T16:47:27.000Z
|
lektor/build_programs.py
|
yagebu/lektor
|
a31d8e57a5b2f4b090072527269c26a65202c736
|
[
"BSD-3-Clause"
] | 442
|
2015-11-21T10:18:02.000Z
|
2022-03-29T19:55:17.000Z
|
import os
import shutil
from itertools import chain
from lektor.assets import Directory
from lektor.assets import File
from lektor.constants import PRIMARY_ALT
from lektor.db import Attachment
from lektor.db import Page
from lektor.exception import LektorException
class BuildError(LektorException):
pass
builtin_build_programs = []
def buildprogram(source_cls):
def decorator(builder_cls):
builtin_build_programs.append((source_cls, builder_cls))
return builder_cls
return decorator
class SourceInfo:
"""Holds some information about a source file for indexing into the
build state.
"""
def __init__(
self, path, filename, alt=PRIMARY_ALT, type="unknown", title_i18n=None
):
self.path = path
self.alt = alt
self.filename = filename
self.type = type
self.title_i18n = {}
en_title = self.path
if "en" in title_i18n:
en_title = title_i18n["en"]
for key, value in title_i18n.items():
if key == "en":
continue
if value != en_title:
self.title_i18n[key] = value
self.title_i18n["en"] = en_title
class BuildProgram:
def __init__(self, source, build_state):
self.source = source
self.build_state = build_state
self.artifacts = []
self._built = False
@property
def primary_artifact(self):
"""Returns the primary artifact for this build program. By
default this is the first artifact produced. This needs to be the
one that corresponds to the URL of the source if it has one.
"""
try:
return self.artifacts[0]
except IndexError:
return None
def describe_source_record(self):
"""Can be used to describe the source info by returning a
:class:`SourceInfo` object. This is indexed by the builder into
the build state so that the UI can quickly find files without
having to scan the file system.
"""
def build(self):
"""Invokes the build program."""
if self._built:
raise RuntimeError("This build program was already used.")
self._built = True
self.produce_artifacts()
sub_artifacts = []
failures = []
gen = self.build_state.builder
def _build(artifact, build_func):
ctx = gen.build_artifact(artifact, build_func)
if ctx is not None:
if ctx.exc_info is not None:
failures.append(ctx.exc_info)
else:
sub_artifacts.extend(ctx.sub_artifacts)
# Step one is building the artifacts that this build program
# knows about.
for artifact in self.artifacts:
_build(artifact, self.build_artifact)
# For as long as our ctx keeps producing sub artifacts, we
# want to process them as well.
while sub_artifacts and not failures:
artifact, build_func = sub_artifacts.pop()
_build(artifact, build_func)
# If we failed anywhere we want to mark *all* artifacts as dirty.
# This means that if a sub-artifact failes we also rebuild the
# parent next time around.
if failures:
for artifact in self.artifacts:
artifact.set_dirty_flag()
def produce_artifacts(self):
"""This produces the artifacts for building. Usually this only
produces a single artifact.
"""
def declare_artifact(self, artifact_name, sources=None, extra=None):
"""This declares an artifact to be built in this program."""
self.artifacts.append(
self.build_state.new_artifact(
artifact_name=artifact_name,
sources=sources,
source_obj=self.source,
extra=extra,
)
)
def build_artifact(self, artifact):
"""This is invoked for each artifact declared."""
def iter_child_sources(self):
"""This allows a build program to produce children that also need
building. An individual build never recurses down to this, but
a `build_all` will use this.
"""
# pylint: disable=no-self-use
return iter(())
@buildprogram(Page)
class PageBuildProgram(BuildProgram):
def describe_source_record(self):
# When we describe the source record we need to consider that a
# page has multiple source file names but only one will actually
# be used. The order of the source iter is in order the files are
# attempted to be read. So we go with the first that actually
# exists and then return that.
for filename in self.source.iter_source_filenames():
if os.path.isfile(filename):
return SourceInfo(
path=self.source.path,
alt=self.source["_source_alt"],
filename=filename,
type="page",
title_i18n=self.source.get_record_label_i18n(),
)
return None
def produce_artifacts(self):
pagination_enabled = self.source.datamodel.pagination_config.enabled
if self.source.is_visible and (
self.source.page_num is not None or not pagination_enabled
):
artifact_name = self.source.url_path
if artifact_name.endswith("/"):
artifact_name += "index.html"
self.declare_artifact(
artifact_name, sources=list(self.source.iter_source_filenames())
)
def build_artifact(self, artifact):
try:
self.source.url_path.encode("ascii")
except UnicodeError as error:
raise BuildError(
"The URL for this record contains non ASCII "
"characters. This is currently not supported "
"for portability reasons (%r)." % self.source.url_path
) from error
artifact.render_template_into(self.source["_template"], this=self.source)
def _iter_paginated_children(self):
total = self.source.datamodel.pagination_config.count_pages(self.source)
for page_num in range(1, total + 1):
yield Page(self.source.pad, self.source._data, page_num=page_num)
def iter_child_sources(self):
p_config = self.source.datamodel.pagination_config
pagination_enabled = p_config.enabled
child_sources = []
# So this requires a bit of explanation:
#
# the basic logic is that if we have pagination enabled then we
# need to consider two cases:
#
# 1. our build program has page_num = None which means that we
# are not yet pointing to a page. In that case we want to
# iter over all children which will yield the pages.
# 2. we are pointing to a page, then our child sources are the
# items that are shown on that page.
#
# In addition, attachments and pages excluded from pagination are
# linked to the page with page_num = None.
#
# If pagination is disabled, all children and attachments are linked
# to this page.
all_children = self.source.children.include_undiscoverable(True)
if pagination_enabled:
if self.source.page_num is None:
child_sources.append(self._iter_paginated_children())
pq = p_config.get_pagination_query(self.source)
child_sources.append(set(all_children) - set(pq))
child_sources.append(self.source.attachments)
else:
child_sources.append(self.source.pagination.items)
else:
child_sources.append(all_children)
child_sources.append(self.source.attachments)
return chain(*child_sources)
@buildprogram(Attachment)
class AttachmentBuildProgram(BuildProgram):
def describe_source_record(self):
return SourceInfo(
path=self.source.path,
alt=self.source.alt,
filename=self.source.attachment_filename,
type="attachment",
title_i18n={"en": self.source["_id"]},
)
def produce_artifacts(self):
if self.source.is_visible:
self.declare_artifact(
self.source.url_path, sources=list(self.source.iter_source_filenames())
)
def build_artifact(self, artifact):
with artifact.open("wb") as df:
with open(self.source.attachment_filename, "rb") as sf:
shutil.copyfileobj(sf, df)
@buildprogram(File)
class FileAssetBuildProgram(BuildProgram):
def produce_artifacts(self):
self.declare_artifact(
self.source.artifact_name, sources=[self.source.source_filename]
)
def build_artifact(self, artifact):
with artifact.open("wb") as df:
with open(self.source.source_filename, "rb") as sf:
shutil.copyfileobj(sf, df)
@buildprogram(Directory)
class DirectoryAssetBuildProgram(BuildProgram):
def iter_child_sources(self):
return self.source.children
| 34.0625
| 87
| 0.621371
|
4a0e0a68fcaba4fee271701a349bc8683c3188dd
| 3,579
|
py
|
Python
|
micropython/sensors/adxl345_upy.py
|
kesking82/thingflow-python
|
4c00deafd1bf425ec90ef2159fc5f3ea2553ade8
|
[
"Apache-2.0"
] | 38
|
2017-04-25T12:24:56.000Z
|
2021-01-12T17:01:43.000Z
|
micropython/sensors/adxl345_upy.py
|
kesking82/thingflow-python
|
4c00deafd1bf425ec90ef2159fc5f3ea2553ade8
|
[
"Apache-2.0"
] | 4
|
2017-08-04T22:39:15.000Z
|
2017-08-07T17:36:21.000Z
|
micropython/sensors/adxl345_upy.py
|
kesking82/thingflow-python
|
4c00deafd1bf425ec90ef2159fc5f3ea2553ade8
|
[
"Apache-2.0"
] | 14
|
2017-07-31T21:25:29.000Z
|
2022-02-06T08:09:07.000Z
|
# ADXL345 Python library for Raspberry Pi
#
# author: Jonathan Williamson
# license: BSD, see LICENSE.txt included in this package
#
# This is a Raspberry Pi Python implementation to help you get started with
# the Adafruit Triple Axis ADXL345 breakout board:
# http://shop.pimoroni.com/products/adafruit-triple-axis-accelerometer
#
# Minor edit to print statement for Python 3 and AntEvents API changes (need sensor_id)
# Edits for MicroPython (no smbus module)
from machine import I2C, Pin
from time import sleep
# select the correct i2c bus for this revision of Raspberry Pi
#revision = ([l[12:-1] for l in open('/proc/cpuinfo','r').readlines() if l[:8]=="Revision"]+['0000'])[0]
#bus = smbus.SMBus(1 if int(revision, 16) >= 4 else 0)
bus = I2C(scl = Pin(5), sda = Pin(4), freq = 100000)
# ADXL345 constants
EARTH_GRAVITY_MS2 = 9.80665
SCALE_MULTIPLIER = 0.004
DATA_FORMAT = 0x31
BW_RATE = 0x2C
POWER_CTL = 0x2D
BW_RATE_1600HZ = [0x0F]
BW_RATE_800HZ = [0x0E]
BW_RATE_400HZ = [0x0D]
BW_RATE_200HZ = [0x0C]
BW_RATE_100HZ = [0x0B]
BW_RATE_50HZ = [0x0A]
BW_RATE_25HZ = [0x09]
RANGE_2G = 0x00
RANGE_4G = 0x01
RANGE_8G = 0x02
RANGE_16G = 0x03
MEASURE = [0x08]
AXES_DATA = 0x32
class ADXL345_upy:
address = None
def __init__(self, sensor_id, address = 0x53):
self.sensor_id = sensor_id
self.address = address
self.setBandwidthRate(BW_RATE_100HZ)
self.setRange(RANGE_2G)
self.enableMeasurement()
def enableMeasurement(self):
bus.writeto_mem(self.address, POWER_CTL, bytearray(MEASURE))
def setBandwidthRate(self, rate_flag):
bus.writeto_mem(self.address, BW_RATE, bytearray(rate_flag))
# set the measurement range for 10-bit readings
def setRange(self, range_flag):
value = bus.readfrom_mem(self.address, DATA_FORMAT,1)
val2 = value[0]
val2 &= ~0x0F;
val2 |= range_flag;
val2 |= 0x08;
buf = [val2]
bus.writeto_mem(self.address, DATA_FORMAT, bytearray(buf))
# returns the current reading from the sensor for each axis
#
# parameter gforce:
# False (default): result is returned in m/s^2
# True : result is returned in gs
def sample(self, gforce = False):
#bytes = bus.read_i2c_block_data(self.address, AXES_DATA, 6)
bytes = bus.readfrom_mem(self.address, AXES_DATA, 6)
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1<<16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1<<16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1<<16)
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce == False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 4)
y = round(y, 4)
z = round(z, 4)
return {"x": x, "y": y, "z": z}
if __name__ == "__main__":
# if run directly we'll just create an instance of the class and output
# the current readings
adxl345 = ADXL345()
axes = adxl345.sample(True)
print("ADXL345 on address 0x%x:" % (adxl345.address))
print(" x = %.3fG" % ( axes['x'] ))
print(" y = %.3fG" % ( axes['y'] ))
print(" z = %.3fG" % ( axes['z'] ))
| 29.825
| 104
| 0.581447
|
4a0e0a7e50c8a39cc2afdee7cd7c63510df885ab
| 9,652
|
py
|
Python
|
tests/beem/test_blockchain.py
|
anthonyadavisii/beem
|
2ddb62073e608c78391de4517f24a7858c5d74db
|
[
"MIT"
] | 1
|
2020-04-27T09:18:35.000Z
|
2020-04-27T09:18:35.000Z
|
tests/beem/test_blockchain.py
|
oldas1/beem
|
d0b9642bfe5f5df3004a59d4923ea497e3a944b3
|
[
"MIT"
] | null | null | null |
tests/beem/test_blockchain.py
|
oldas1/beem
|
d0b9642bfe5f5df3004a59d4923ea497e3a944b3
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import super
import unittest
from parameterized import parameterized
from datetime import datetime, timedelta
import pytz
import time
from pprint import pprint
from beem import Steem
from beem.blockchain import Blockchain
from beem.exceptions import BlockWaitTimeExceeded
from beem.block import Block
from beem.instance import set_shared_steem_instance
from beem.nodelist import NodeList
from beembase.signedtransactions import Signed_Transaction
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
nodelist = NodeList()
nodes = nodelist.get_nodes(hive=True)
nodelist.update_nodes(steem_instance=Steem(node=nodes, num_retries=10))
cls.bts = Steem(
node=nodelist.get_nodes(hive=True),
nobroadcast=True,
keys={"active": wif},
num_retries=10
)
b = Blockchain(steem_instance=cls.bts)
num = b.get_current_block_num()
cls.start = num - 5
cls.stop = num
# from getpass import getpass
# self.bts.wallet.unlock(getpass())
set_shared_steem_instance(cls.bts)
def test_blockchain(self):
bts = self.bts
b = Blockchain(steem_instance=bts)
num = b.get_current_block_num()
self.assertTrue(num > 0)
self.assertTrue(isinstance(num, int))
block = b.get_current_block()
self.assertTrue(isinstance(block, Block))
self.assertTrue((num - block.identifier) < 3)
block_time = b.block_time(block.identifier)
self.assertEqual(block.time(), block_time)
block_timestamp = b.block_timestamp(block.identifier)
timestamp = int(time.mktime(block.time().timetuple()))
self.assertEqual(block_timestamp, timestamp)
def test_estimate_block_num(self):
bts = self.bts
b = Blockchain(steem_instance=bts)
last_block = b.get_current_block()
num = last_block.identifier
old_block = Block(num - 60, steem_instance=bts)
date = old_block.time()
est_block_num = b.get_estimated_block_num(date, accurate=False)
self.assertTrue((est_block_num - (old_block.identifier)) < 10)
est_block_num = b.get_estimated_block_num(date, accurate=True)
self.assertTrue((est_block_num - (old_block.identifier)) < 2)
est_block_num = b.get_estimated_block_num(date, estimateForwards=True, accurate=True)
self.assertTrue((est_block_num - (old_block.identifier)) < 2)
est_block_num = b.get_estimated_block_num(date, estimateForwards=True, accurate=False)
def test_get_all_accounts(self):
bts = self.bts
b = Blockchain(steem_instance=bts)
accounts = []
limit = 200
for acc in b.get_all_accounts(steps=100, limit=limit):
accounts.append(acc)
self.assertEqual(len(accounts), limit)
self.assertEqual(len(set(accounts)), limit)
def test_awaitTX(self):
bts = self.bts
b = Blockchain(steem_instance=bts)
trans = {'ref_block_num': 3855, 'ref_block_prefix': 1730859721,
'expiration': '2018-03-09T06:21:06', 'operations': [],
'extensions': [], 'signatures':
['2033a872a8ad33c7d5b946871e4c9cc8f08a5809258355fc909058eac83'
'20ac2a872517a52b51522930d93dd2c1d5eb9f90b070f75f838c881ff29b11af98d6a1b']}
with self.assertRaises(
Exception
):
b.awaitTxConfirmation(trans)
def test_stream(self):
bts = self.bts
start = self.start
stop = self.stop
b = Blockchain(steem_instance=bts)
ops_stream = []
opNames = ["transfer", "vote"]
for op in b.stream(opNames=opNames, start=start, stop=stop):
ops_stream.append(op)
self.assertTrue(len(ops_stream) >= 0)
ops_raw_stream = []
opNames = ["transfer", "vote"]
for op in b.stream(opNames=opNames, raw_ops=True, start=start, stop=stop):
ops_raw_stream.append(op)
self.assertTrue(len(ops_raw_stream) >= 0)
only_ops_stream = []
opNames = ["transfer", "vote"]
for op in b.stream(opNames=opNames, start=start, stop=stop, only_ops=True):
only_ops_stream.append(op)
self.assertTrue(len(only_ops_stream) >= 0)
only_ops_raw_stream = []
opNames = ["transfer", "vote"]
for op in b.stream(opNames=opNames, raw_ops=True, start=start, stop=stop, only_ops=True):
only_ops_raw_stream.append(op)
self.assertTrue(len(only_ops_raw_stream) >= 0)
op_stat = b.ops_statistics(start=start, stop=stop)
op_stat2 = {"transfer": 0, "vote": 0}
for op in ops_stream:
self.assertIn(op["type"], opNames)
op_stat2[op["type"]] += 1
self.assertTrue(op["block_num"] >= start)
self.assertTrue(op["block_num"] <= stop)
self.assertEqual(op_stat["transfer"], op_stat2["transfer"])
self.assertEqual(op_stat["vote"], op_stat2["vote"])
op_stat3 = {"transfer": 0, "vote": 0}
for op in ops_raw_stream:
self.assertIn(op["op"][0], opNames)
op_stat3[op["op"][0]] += 1
self.assertTrue(op["block_num"] >= start)
self.assertTrue(op["block_num"] <= stop)
self.assertEqual(op_stat["transfer"], op_stat3["transfer"])
self.assertEqual(op_stat["vote"], op_stat3["vote"])
op_stat5 = {"transfer": 0, "vote": 0}
for op in only_ops_stream:
self.assertIn(op["type"], opNames)
op_stat5[op["type"]] += 1
self.assertTrue(op["block_num"] >= start)
self.assertTrue(op["block_num"] <= stop)
self.assertEqual(op_stat["transfer"], op_stat5["transfer"])
self.assertEqual(op_stat["vote"], op_stat5["vote"])
op_stat6 = {"transfer": 0, "vote": 0}
for op in only_ops_raw_stream:
self.assertIn(op["op"][0], opNames)
op_stat6[op["op"][0]] += 1
self.assertTrue(op["block_num"] >= start)
self.assertTrue(op["block_num"] <= stop)
self.assertEqual(op_stat["transfer"], op_stat6["transfer"])
self.assertEqual(op_stat["vote"], op_stat6["vote"])
ops_blocks = []
for op in b.blocks(start=start, stop=stop):
ops_blocks.append(op)
op_stat4 = {"transfer": 0, "vote": 0}
self.assertTrue(len(ops_blocks) > 0)
for block in ops_blocks:
for tran in block["transactions"]:
for op in tran['operations']:
if isinstance(op, list) and op[0] in opNames:
op_stat4[op[0]] += 1
elif isinstance(op, dict):
op_type = op["type"]
if len(op_type) > 10 and op_type[len(op_type) - 10:] == "_operation":
op_type = op_type[:-10]
if op_type in opNames:
op_stat4[op_type] += 1
self.assertTrue(block.identifier >= start)
self.assertTrue(block.identifier <= stop)
self.assertEqual(op_stat["transfer"], op_stat4["transfer"])
self.assertEqual(op_stat["vote"], op_stat4["vote"])
ops_blocks = []
for op in b.blocks():
ops_blocks.append(op)
break
self.assertTrue(len(ops_blocks) == 1)
def test_stream2(self):
bts = self.bts
b = Blockchain(steem_instance=bts)
stop_block = b.get_current_block_num()
start_block = stop_block - 10
ops_stream = []
for op in b.stream(start=start_block, stop=stop_block):
ops_stream.append(op)
self.assertTrue(len(ops_stream) > 0)
def test_wait_for_and_get_block(self):
bts = self.bts
b = Blockchain(steem_instance=bts, max_block_wait_repetition=18)
start_num = b.get_current_block_num()
blocknum = start_num
last_fetched_block_num = None
for i in range(3):
block = b.wait_for_and_get_block(blocknum)
last_fetched_block_num = block.block_num
blocknum = last_fetched_block_num + 1
self.assertEqual(last_fetched_block_num, start_num + 2)
b2 = Blockchain(steem_instance=bts, max_block_wait_repetition=1)
with self.assertRaises(
BlockWaitTimeExceeded
):
for i in range(300):
block = b2.wait_for_and_get_block(blocknum)
last_fetched_block_num = block.block_num
blocknum = last_fetched_block_num + 2
def test_hash_op(self):
bts = self.bts
b = Blockchain(steem_instance=bts)
op1 = {'type': 'vote_operation', 'value': {'voter': 'ubg', 'author': 'yesslife', 'permlink': 'steemit-sandwich-contest-week-25-2da-entry', 'weight': 100}}
op2 = ['vote', {'voter': 'ubg', 'author': 'yesslife', 'permlink': 'steemit-sandwich-contest-week-25-2da-entry', 'weight': 100}]
hash1 = b.hash_op(op1)
hash2 = b.hash_op(op2)
self.assertEqual(hash1, hash2)
def test_signing_appbase(self):
b = Blockchain(steem_instance=self.bts)
st = None
for block in b.blocks(start=25304468, stop=25304468):
for trx in block.transactions:
st = Signed_Transaction(trx.copy())
self.assertTrue(st is not None)
| 40.725738
| 162
| 0.617281
|
4a0e0e3fb77bd4547fce9dad775b4fed7b278137
| 34,310
|
py
|
Python
|
selfdrive/controls/controlsd.py
|
shake777/openpilot
|
df8e300057c9774f67b90a9a7a0172095ef2a0b9
|
[
"MIT"
] | null | null | null |
selfdrive/controls/controlsd.py
|
shake777/openpilot
|
df8e300057c9774f67b90a9a7a0172095ef2a0b9
|
[
"MIT"
] | null | null | null |
selfdrive/controls/controlsd.py
|
shake777/openpilot
|
df8e300057c9774f67b90a9a7a0172095ef2a0b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import math
from numbers import Number
from cereal import car, log
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise, get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
from selfdrive.car.hyundai.scc_smoother import SccSmoother
from selfdrive.ntune import ntune_common_get, ntune_common_enabled, ntune_scc_get
LDW_MIN_SPEED = 40 * CV.KPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
REPLAY = "REPLAY" in os.environ
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ACTUATOR_FIELDS = set(car.CarControl.Actuators.schema.fields.keys())
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
ButtonEvent = car.CarState.ButtonEvent
SafetyModel = car.CarParams.SafetyModel
IGNORED_SAFETY_MODES = [SafetyModel.silent, SafetyModel.noOutput]
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode")
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature = self.CP.communityFeature or \
self.CP.fingerprintSource == car.CarParams.FingerprintSource.can
community_feature_disallowed = community_feature and (not community_feature_toggle)
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
safety_config = car.CarParams.SafetyConfig.new_message()
safety_config.safetyModel = car.CarParams.SafetyModel.noOutput
self.CP.safetyConfigs = [safety_config]
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP)
self.VM = VehicleModel(self.CP)
self.lateral_control_select = 0
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP, self.CI)
self.lateral_control_select = 0
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
self.lateral_control_select = 1
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.lateral_control_select = 2
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.cruise_mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.button_timers = {ButtonEvent.Type.decelCruise: 0, ButtonEvent.Type.accelCruise: 0}
# scc smoother
self.is_cruise_enabled = False
self.applyMaxSpeed = 0
self.apply_accel = 0.
self.fused_accel = 0.
self.lead_drel = 0.
self.aReqValue = 0.
self.aReqValueMin = 0.
self.aReqValueMax = 0.
self.sccStockCamStatus = 0
self.sccStockCamAct = 0
self.left_lane_visible = False
self.right_lane_visible = False
self.wide_camera = TICI and params.get_bool('EnableWideCamera')
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, len(self.CP.carFw) > 0)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed and car_recognized and not self.CP.dashcamOnly:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
if len(self.CP.carFw) > 0:
set_offroad_alert("Offroad_CarUnrecognized", True)
else:
set_offroad_alert("Offroad_NoFirmware", True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Create events for battery, temperature, disk space, and memory
if EON and (self.sm['peripheralState'].pandaType != PandaType.uno) and \
self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > (90 if TICI else 65) and not SIMULATION:
self.events.add(EventName.lowMemory)
# TODO: enable this once loggerd CPU usage is more reasonable
#cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
#if max(cpus, default=0) > 95 and not SIMULATION:
# self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['peripheralState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['peripheralState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
elif self.sm['lateralPlan'].autoLaneChangeEnabled and self.sm['lateralPlan'].autoLaneChangeTimer > 0:
self.events.add(EventName.autoLaneChange)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or not CS.canValid:
self.events.add(EventName.canError)
for i, pandaState in enumerate(self.sm['pandaStates']):
# All pandas must match the list of safetyConfigs, and if outside this list, must be silent or noOutput
if i < len(self.CP.safetyConfigs):
safety_mismatch = pandaState.safetyModel != self.CP.safetyConfigs[i].safetyModel or pandaState.safetyParam != self.CP.safetyConfigs[i].safetyParam
else:
safety_mismatch = pandaState.safetyModel not in IGNORED_SAFETY_MODES
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
# Check for HW or system issues
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaStates"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
invalid = [s for s, valid in self.sm.valid.items() if not valid]
not_alive = [s for s, alive in self.sm.alive.items() if not alive]
cloudlog.event("commIssue", invalid=invalid, not_alive=not_alive)
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['lateralPlan'].mpcSolutionValid and not (EventName.turningIndicatorOn in self.events.names):
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
for pandaState in self.sm['pandaStates']:
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabledAcc and (not self.enabled or not self.CP.pcmCruise)
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(1. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)
# Check for FCW
stock_long_is_braking = self.enabled and not self.CP.openpilotLongitudinalControl and CS.aEgo < -1.5
model_fcw = self.sm['modelV2'].meta.hardBrakePredicted and not CS.brakePressed and not stock_long_is_braking
planner_fcw = self.sm['longitudinalPlan'].fcw and self.enabled
if planner_fcw or model_fcw:
self.events.add(EventName.fcw)
if TICI:
logs = messaging.drain_sock(self.log_sock, wait_for_one=False)
messages = []
for m in logs:
try:
messages.append(m.androidLog.message)
except UnicodeDecodeError:
pass
for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]:
for m in messages:
if err not in m:
continue
csid = m.split("CSID:")[-1].split(" ")[0]
evt = {"0": EventName.roadCameraError, "1": EventName.wideRoadCameraError,
"2": EventName.driverCameraError}.get(csid, None)
if evt is not None:
self.events.add(evt)
# TODO: fix simulator
if not SIMULATION:
#if not NOSENSOR:
# if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
# self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
#if CS.brakePressed and v_future >= self.CP.vEgoStarting \
# and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
# self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 3.5 or SIMULATION):
if not self.read_only:
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
# All pandas not in silent mode must have controlsAllowed when openpilot is enabled
if any(not ps.controlsAllowed and self.enabled for ps in self.sm['pandaStates']
if ps.safetyModel not in IGNORED_SAFETY_MODES):
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
self.CP.pcmCruise = self.CI.CP.pcmCruise
# if stock cruise is completely disabled, then we can use our own set speed logic
#if not self.CP.pcmCruise:
# self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.button_timers, self.enabled, self.is_metric)
#elif self.CP.pcmCruise and CS.cruiseState.enabled:
# self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
SccSmoother.update_cruise_buttons(self, CS, self.CP.openpilotLongitudinalControl)
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(0.5 / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
#sr = max(params.steerRatio, 0.1)
if ntune_common_enabled('useLiveSteerRatio'):
sr = max(params.steerRatio, 0.1)
else:
sr = max(ntune_common_get('steerRatio'), 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
actuators.longControlState = self.LoC.long_control_state
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
if not CS.cruiseState.enabledAcc:
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
# accel PID loop
pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
actuators.accel = self.LoC.update(self.active and CS.cruiseState.enabledAcc, CS, self.CP, long_plan, pid_accel_limits, self.sm['radarState'])
# Steering PID loop and lateral MPC
lat_active = self.active and not CS.steerWarning and not CS.steerError and CS.vEgo > self.CP.minSteerSpeed
desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(lat_active, CS, self.CP, self.VM, params,
desired_curvature, desired_curvature_rate)
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = True
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = steer
lac_log.saturated = abs(steer) >= 0.9
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
# TODO use desired vs actual curvature
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.20
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.20
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)
if not isinstance(attr, Number):
continue
if not math.isfinite(attr):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return actuators, lac_log
def update_button_timers(self, buttonEvents):
# increment timer for buttons still pressed
for k in self.button_timers.keys():
if self.button_timers[k] > 0:
self.button_timers[k] += 1
for b in buttonEvents:
if b.type.raw in self.button_timers:
self.button_timers[b.type.raw] = 1 if b.pressed else 0
def publish_logs(self, CS, start_time, actuators, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.active = self.active
CC.actuators = actuators
if len(self.sm['liveLocationKalman'].orientationNED.value) > 2:
CC.roll = self.sm['liveLocationKalman'].orientationNED.value[0]
CC.pitch = self.sm['liveLocationKalman'].orientationNED.value[1]
CC.cruiseControl.cancel = self.CP.pcmCruise and not self.enabled and CS.cruiseState.enabled
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
if self.sm.frame % 100 == 0:
self.right_lane_visible = right_lane_visible
self.left_lane_visible = left_lane_visible
CC.hudControl.rightLaneVisible = self.right_lane_visible
CC.hudControl.leftLaneVisible = self.left_lane_visible
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
cameraOffset = ntune_common_get("cameraOffset") + 0.08 if self.wide_camera else ntune_common_get("cameraOffset")
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + cameraOffset))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - cameraOffset))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
can_sends = self.CI.apply(CC, self)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.applyMaxSpeed if self.CP.openpilotLongitudinalControl else self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
controlsState.lateralControlSelect = int(self.lateral_control_select)
controlsState.angleSteers = steer_angle_without_offset * CV.RAD_TO_DEG
controlsState.applyAccel = self.apply_accel
controlsState.aReqValue = self.aReqValue
controlsState.aReqValueMin = self.aReqValueMin
controlsState.aReqValueMax = self.aReqValueMax
controlsState.sccStockCamAct = self.sccStockCamAct
controlsState.sccStockCamStatus = self.sccStockCamStatus
controlsState.steerRatio = self.VM.sR
controlsState.steerRateCost = ntune_common_get('steerRateCost')
controlsState.steerActuatorDelay = ntune_common_get('steerActuatorDelay')
controlsState.sccGasFactor = ntune_scc_get('sccGasFactor')
controlsState.sccBrakeFactor = ntune_scc_get('sccBrakeFactor')
controlsState.sccCurvatureFactor = ntune_scc_get('sccCurvatureFactor')
controlsState.longitudinalActuatorDelayLowerBound = ntune_scc_get('longitudinalActuatorDelayLowerBound')
controlsState.longitudinalActuatorDelayUpperBound = ntune_scc_get('longitudinalActuatorDelayUpperBound')
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, lac_log)
self.prof.checkpoint("Sent")
self.update_button_timers(CS.buttonEvents)
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| 42.621118
| 154
| 0.710434
|
4a0e0e98f3d7f6445e23a4b9030ef7b33bc78eb4
| 19,546
|
py
|
Python
|
pygmyhdl/pygmyhdl.py
|
xesscorp/pygmyhdl
|
c1c187dc97e1ba7551bda1944f727a4f35e2fac2
|
[
"MIT"
] | 20
|
2017-09-01T16:08:30.000Z
|
2020-12-12T22:19:19.000Z
|
pygmyhdl/pygmyhdl.py
|
devbisme/pygmyhdl
|
c1c187dc97e1ba7551bda1944f727a4f35e2fac2
|
[
"MIT"
] | 3
|
2018-02-01T20:07:18.000Z
|
2021-03-07T14:15:26.000Z
|
pygmyhdl/pygmyhdl.py
|
xesscorp/pygmyhdl
|
c1c187dc97e1ba7551bda1944f727a4f35e2fac2
|
[
"MIT"
] | 5
|
2018-03-13T18:12:08.000Z
|
2021-06-15T03:46:14.000Z
|
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2017 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
PygMyHDL main code module.
'''
from __future__ import print_function
#from __future__ import unicode_literals # Messes up byteplay on Python 2.
from __future__ import division
from __future__ import absolute_import
from builtins import super
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
USING_PYTHON2 = (sys.version_info.major == 2)
USING_PYTHON3 = not USING_PYTHON2
import pdb
import random
import types
import itertools
import functools
from myhdl import *
from myhdlpeek import *
if USING_PYTHON3:
import byteplay3 as bp
else:
import byteplay as bp
# List for storing MyHDL instances generated by this module.
_instances = list()
############## @comb_logic & @seq_logic decorators. #################
try:
# These are the logic function decorators for MyHDL version >= 1.0.
from myhdl._instance import _getCallInfo
from myhdl._always_comb import _AlwaysComb
from myhdl._always_seq import _AlwaysSeq
import myhdl._instance as myhdlinst
import myhdl._always_comb as myhdlcomb
import myhdl._always_seq as myhdlseq
import myhdl._always as myhdlalways
import myhdl._Signal as myhdlsig
import myhdl._util as myhdlutil
from types import FunctionType
def comb_logic(func):
'''Decorator for combinational logic functions in PygMyHDL.
Create a combinational logic block and store it on the instance list.'''
callinfo = myhdlinst._getCallInfo()
if not isinstance(func, FunctionType):
raise myhdlcomb.AlwaysCombError(myhdlcomb._error.ArgType)
if myhdlutil._isGenFunc(func):
raise myhdlcomb.AlwaysCombError(myhdlcomb._error.ArgType)
if func.__code__.co_argcount > 0:
raise myhdlcomb.AlwaysCombError(myhdlcomb._error.NrOfArgs)
c = myhdlcomb._AlwaysComb(func, callinfo=callinfo)
_instances.append(c)
return c
def seq_logic(edge, reset=None):
'''Decorator for sequential (clocked) logic functions in PygMyHDL.
Creates a sequential logic block and stores it on the instance list.'''
callinfo = myhdlinst._getCallInfo()
sigargs = []
if not isinstance(edge, myhdlsig._WaiterList):
raise AlwaysSeqError(myhdlseq._error.EdgeType)
edge.sig._read = True
edge.sig._used = True
sigargs.append(edge.sig)
if reset is not None:
if not isinstance(reset, myhdlseq.ResetSignal):
raise AlwaysSeqError(myhdlseq._error.ResetType)
reset._read = True
reset._used = True
sigargs.append(reset)
sigdict = myhdlalways._get_sigdict(sigargs, callinfo.symdict)
def _always_seq_decorator(func):
if not isinstance(func, FunctionType):
raise myhdlseq.AlwaysSeqError(myhdlseq._error.ArgType)
if myhdlutil._isGenFunc(func):
raise myhdlseq.AlwaysSeqError(myhdlseq._error.ArgType)
if func.__code__.co_argcount > 0:
raise myhdlseq.AlwaysSeqError(myhdlseq._error.NrOfArgs)
c = myhdlseq._AlwaysSeq(func, edge, reset, callinfo=callinfo, sigdict=sigdict)
_instances.append(c)
return c
return _always_seq_decorator
except ImportError:
# If the import statements in the above section throw an exception,
# that means we're using MyHDL < 1.0 which has a different way of
# doing the logic function decorators.
def comb_logic(f):
'''Decorator for combinational logic functions in PygMyHDL.
Create a combinational logic block and store it on the instance list.'''
def comb_func(f):
return always_comb(f)
inst = comb_func(f)
_instances.append(inst)
return inst
def seq_logic(trigger):
'''Decorator for sequential (clocked) logic functions in PygMyHDL.
Creates a sequential logic block and stores it on the instance list.'''
def seq_logic_decorator(f):
def seq_func(f):
return always_seq(trigger,None)(f)
inst = seq_func(f)
_instances.append(inst)
return inst
return seq_logic_decorator
############## @chunk decorator. #################
if USING_PYTHON3:
def _func_copy(f, new_code) :
'''
Return a copy of function f with __code__ section replaced with new_code.
Copied from https://stackoverflow.com/questions/13503079/how-to-create-a-copy-of-a-python-function
'''
g = types.FunctionType(new_code, f.__globals__, name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
else:
def _func_copy(f, new_code) :
'''
Return a copy of function f with __code__ section replaced with new_code.
Copied from https://stackoverflow.com/questions/13503079/how-to-create-a-copy-of-a-python-function
'''
g = types.FunctionType(new_code, f.func_globals, name=f.func_name,
argdefs=f.func_defaults,
closure=f.func_closure)
g = functools.update_wrapper(g, f)
return g
def preamble_func():
'''Preamble inserted to mark the hardware instantiated previous to this chunk.'''
return len(_instances)
def postamble_func(index, myhdl_instances):
'''Postamble inserted to hardware instantiated in this chunk.'''
global _instances
# Build a list of unique instances created by the chunked function.
chunk_insts = _instances[index:] + myhdl_instances
chunk_insts = sorted(chunk_insts, key=id)
chunk_insts = [k for k,_ in itertools.groupby(chunk_insts)]
# Append the list of instances to the global _instances list.
_instances = _instances[:index]
_instances.append(chunk_insts)
# Return the list of instances.
return chunk_insts
def chunk(f):
'''
Decorator for grouping components generated by function f.
Gets the generator function code section and prepends/appends code to
observe what components are instantiated in the _instances list and then
stores them in a local variable so MyHDL can detect them.
'''
# Get the generator function code section.
f_code = bp.Code.from_code(f.__code__)
# Add this code to the start to store the beginning index of the _instances list.
# Python version of preamble:
# instances_begin_index = len(pygmyhdl._instances)
preamble = [
(bp.LOAD_GLOBAL, 'preamble_func'),
(bp.CALL_FUNCTION, 0),
(bp.STORE_FAST, 'instances_begin_index')
]
# Add this code to the end to copy the new components added by f() to the
# _instances list and also return them.
# Python version of postamble:
# loc_insts = postamble_func(instances_begin_index, instances())
# return loc_insts
postamble = [
(bp.LOAD_GLOBAL, 'postamble_func'),
(bp.LOAD_FAST, 'instances_begin_index'),
(bp.LOAD_GLOBAL, 'instances'),
(bp.CALL_FUNCTION, 0),
(bp.CALL_FUNCTION, 2),
(bp.STORE_FAST, 'loc_insts'),
(bp.LOAD_FAST, 'loc_insts'),
(bp.RETURN_VALUE, None)
]
# Remove the original return value and return instruction from f().
f_code.code.pop()
f_code.code.pop()
# Create new code section from preamble + original code + postamble.
new_code = preamble
new_code.extend(f_code.code)
new_code.extend(postamble)
f_code.code = new_code
# Make a copy of the original function, replace its code section with the
# altered code section, and return the result as the decorated function.
return _func_copy(f, f_code.to_code())
############## Wire, Bus, and State classes. #################
@chunk
def _sig_xfer(a, b):
'''A simple hardware chunk to transfer one signal to another.'''
@comb_logic
def logic():
b.next = a
class Wire(SignalType):
'''A one-bit signal.'''
def __init__(self, init_val=0, name=None):
super(Wire, self).__init__(bool(init_val)) # Don't use super(). Fails on Python 2.
if name:
Peeker(self, name)
class Bus(SignalType):
'''A multi-bit signal.'''
def __init__(self, width=1, init_val=0, name=None, vtype=modbv):
super(Bus, self).__init__(vtype(init_val)[width:]) # Don't use super(). Fails on Python 2.
self.width = width
self.i_wires = None
self.o_bus = None
if name:
Peeker(self, name)
@property
def i(self):
'''Return a list of wires that will drive this Bus object.'''
if not self.i_wires:
self.i_wires = IWireBus([Wire(self.val[i]) for i in range(self.width)])
wires_bus = ConcatSignal(*reversed(self.i_wires))
_sig_xfer(wires_bus, self)
return self.i_wires
@property
def o(self):
if not self.o_bus:
self.o_bus = OBus(self)
return self.o_bus
class OBus():
'''List of output Wire objects driven from a Bus object.'''
def __init__(self, bus):
self.parent = bus
@property
def o(self):
'''Get the output bus of an output bus which is the bus itself.'''
return self
@property
def i(self):
'''Raise an exception if trying to get an input bus from an output bus.'''
raise Exception('Attempting to get inputs from the outputs of a Bus.')
def __getitem__(self, slice_):
'''Handle getting bus slices or individual bits.'''
if isinstance(slice_, slice):
start, stop, step = slice_.indices(len(self.parent))
return self.parent(start, stop)
elif isinstance(slice_, int):
return self.parent(slice_)
else:
raise Exception("Bus indexing requires an integer or A:B slice.")
class IWireBus(list):
'''List of input Wire objects that drive a Bus object.'''
def __init__(self, *args, **kwargs):
super(IWireBus, self).__init__(*args, **kwargs)
@property
def i(self):
'''Get the input bus of an input bus which is the bus itself.'''
return self
@property
def o(self):
'''Raise an exception if trying to get an output bus from an input bus.'''
raise Exception('Attempting to get outputs from the inputs of a Bus.')
def __setitem__(self, slice_, value):
'''Drive selected bits of a bus to a value.'''
# Turn integer index into a slice object.
if isinstance(slice_, int):
slice_ = slice(slice_+1, slice_) # single bit slice.
# Convert value into a bit-vector object.
try:
bv = intbv(value.val) # Do this if the value is a Signal.
except AttributeError:
bv = intbv(value) # Do this if the value is an integer.
# Set individual wires in this bus to bit values.
for indx, wire in enumerate(self[slice_]):
_sig_xfer(Signal(bv[indx]), wire)
class State(SignalType):
'''Stores state of finite-state machines.'''
def __init__(self, *args, **kwargs):
'''
Create a state variable like this: sv = State('S1', 'S2', 'S3', ...).
The initial state for sv will be sv.s.S1.
You can set the state like this: sv.next = sv.s.S2.
You can compare the state like this: sv == sv.s.S3.
Inputs:
*args: Positional arguments that are distinguished by type:
strings: Names of the states of the FSM.
State: Another State object that will be used to create
this State object with the same states.
EnumType: An object created by the MyHDL enum function.
**kwargs: Keyword arguments that are passed to the MyHDL enum function
except for:
init_state: A string indicating the initial state for
this state variable. If omitted, the first
string-type positional argument is used.
name: The name assigned to a Peeker object for this
state variable. If omitted, no Peeker is assigned.
'''
# Look for the init_state of the state variable and remove it from the list.
# The initial state will be given by the keyword argument or the 1st positional argument.
self.init_state = kwargs.pop('init_state', args[0])
# Get the name for the Peeker assigned to monitor this state variable.
name = kwargs.pop('name', None)
# Create a state variable from a list of state names as strings, or
# from an existing state variable or state type.
state_name_args = [arg for arg in args if isinstance(arg, type(''))]
state_type_args = [arg for arg in args if isinstance(arg, (State, EnumType))]
if state_type_args:
if isinstance(state_type_args[0], State):
self.s = state_type_args[0].s
elif isinstance(state_type_args[0], EnumType):
self.s = state_type_args[0]
else:
raise Exception('Creating a state variable from a non-state type object!')
elif state_name_args:
self.s = enum(*state_name_args, **kwargs)
else:
raise Exception('No state information provided to create a state variable!')
# The actual state variable is created here.
super(State, self).__init__(getattr(self.s, self.init_state))
# Create a Peeker for the state variable if the name keyword argument was given.
# Do this only after the state variable has been created.
if name:
Peeker(self, name)
############## Simulation. #################
def initialize():
'''Initialize the use of pygmyhdl module.'''
global _instances
_instances = list() # Remove any created instances.
Peeker.clear() # Remove any signal peekers.
def simulate(*modules):
'''Run a simulation with a set of modules.'''
def flatten(nested_list):
'''Flatten list-of-lists instances into a flat list of instances.'''
lst = []
for item in nested_list:
if isinstance(item, (list, tuple)):
lst.extend(flatten(item))
else:
lst.append(item)
return lst
# Combine all the explicit and internal instances into a single set.
all_modules = set(flatten(modules))
all_modules.update(flatten(_instances))
all_modules.update(Peeker.instances())
# Simulate the set of instances.
Simulation(*all_modules).run()
def _get_max(signal):
'''Get maximum value of a signal.'''
return signal.max or 2**len(signal)
def _get_min(signal):
'''Get minimum value of a signal.'''
return signal.min or 0
def _random_test(*signals, **kwargs):
'''
Generate a set of test vectors with random values assigned to the signals.
Parameters:
signals: One or more signals.
num_tests: Number of random test vectors to simulate.
dly: Time delay between changes of the clock signal.
'''
dly = kwargs.get('dly', 1)
num_tests = kwargs.get('num_tests', 10)
for _ in range(num_tests):
for sig in signals:
# Assign a random value within the allowable range of this signal.
sig.next = random.randrange(_get_min(sig), _get_max(sig))
yield delay(dly)
def random_sim(*signals, **kwargs):
'''
Run a simulation with a set of random test vectors.
Parameters:
signals: One or more signals.
num_tests: Number of random test vectors to simulate.
dly: Time delay between changes of the clock signal.
'''
simulate(_random_test(*signals, **kwargs))
def _exhaustive_test(*signals, **kwargs):
'''
Generate all possible test vectors for a set of signals.
Parameters:
signals: One or more signals.
dly: Time delay between changes of the clock signal.
'''
dly = kwargs.get('dly', 1)
if len(signals) == 0:
yield delay(dly)
else:
for signals[0].next in range(_get_min(signals[0]), _get_max(signals[0])):
#yield from exhaustive_test(*signals[1:])
for d in _exhaustive_test(*signals[1:]):
yield d
def exhaustive_sim(*signals, **kwargs):
'''
Run a simulation with an exhaustive set of test vectors.
Parameters:
signals: One or more signals.
dly: Time delay between changes of the clock signal.
'''
simulate(_exhaustive_test(*signals, **kwargs))
def _clk_test(clk, **kwargs):
'''
Strobe a clock signal for a number of cycles.
Parameters:
num_cycles: Number of clock cycles to execute.
dly: Time delay between changes of the clock signal.
'''
dly = kwargs.get('dly', 1)
num_cycles = kwargs.get('num_cycles', 10)
for _ in range(num_cycles):
clk.next = 0
yield delay(dly)
clk.next = 1
yield delay(dly)
def clk_sim(clk, **kwargs):
'''
Run a simulation for a number of clock cycles.
Parameters:
num_cycles: Number of clock cycles to execute.
dly: Time delay between changes of the clock signal.
'''
simulate(_clk_test(clk, **kwargs))
def _vector_test(*vectors, **kwargs):
'''
Apply vectors of values to signals.
Parameters:
vectors: Each vector is a two-element list with a Signal as the first
element and a list of values as the second element.
num_cycles: Number of clock cycles to execute.
dly: Time delay between changes of the clock signal.
'''
dly = kwargs.get('dly', 1)
try:
num_cycles = max([len(v[1]) for v in vectors])
except ValueError:
num_cycles = 0
num_cycles = kwargs.get('num_cycles', num_cycles)
for i in range(num_cycles):
for v in vectors:
try:
v[0].next = v[1][i]
except IndexError:
v[0].next = v[1][-1]
yield delay(1)
def vector_sim(*vectors, **kwargs):
simulate(_vector_test(*vectors, **kwargs))
| 36.39851
| 106
| 0.639261
|
4a0e0eacbe3139d4be66d723ab673feeed9cf05e
| 10,715
|
py
|
Python
|
scripts/datasets_to_h5/sleep_edf_in_bed/to_h5_no_lights.py
|
Dreem-Organization/RobustSleepNet
|
c8ff3f6f857299eb2bf2e9400483084d5ecd4106
|
[
"MIT"
] | 16
|
2021-04-06T14:04:45.000Z
|
2022-03-11T14:37:08.000Z
|
scripts/datasets_to_h5/sleep_edf_in_bed/to_h5_no_lights.py
|
Dreem-Organization/RobustSleepNet
|
c8ff3f6f857299eb2bf2e9400483084d5ecd4106
|
[
"MIT"
] | null | null | null |
scripts/datasets_to_h5/sleep_edf_in_bed/to_h5_no_lights.py
|
Dreem-Organization/RobustSleepNet
|
c8ff3f6f857299eb2bf2e9400483084d5ecd4106
|
[
"MIT"
] | 4
|
2021-06-10T06:48:33.000Z
|
2022-03-26T22:29:07.000Z
|
import json
import h5py
import pyedflib
from robust_sleep_net.utils.utils import standardize_signals_durations
import mne
import numpy as np
import pandas as pd
from dateutil import parser
stages_lookup = {
"Sleep stage 1": 1,
"Sleep stage 2": 2,
"Sleep stage 3": 3,
"Sleep stage 4": 3,
"Movement time": -1,
"Sleep stage ?": -1,
"Sleep stage R": 4,
"Sleep stage W": 0,
}
def compute_onset(lights_off, record_start):
from datetime import datetime, date
start, end = parser.parse(record_start).time(), parser.parse(lights_off).time()
start, end = (
datetime.combine(date.today(), start).timestamp(),
datetime.combine(date.today(), end).timestamp(),
)
if end < start - 60 * 2:
end += 60 * 60 * 24
if end < start:
end = start
epoch_onset = end - start
return epoch_onset
def get_sleep_stages_no_light(annotation_file, record_start, lights_off):
"""
Extract the sleep stages from an annotation file
annotation_file : (str) path to EDF annotation file
returns stages: list of sleep stages
time_begin: beginning of hypno
time_end: end of hypno
"""
annotation = mne.read_annotations(annotation_file)
seconds_before_lights_off = compute_onset(lights_off, record_start)
onsets = annotation.onset
durations = annotation.duration
tot_annotation_duration = onsets[-1] + durations[-1]
tot_epoch = int(tot_annotation_duration // 30)
stages = np.array([-1] * tot_epoch)
labels = annotation.description
for i, (onset, duration, label) in enumerate(zip(onsets, durations, labels)):
start_epoch = int(onset // 30)
end_epoch = int(start_epoch + duration // 30)
stages[start_epoch:end_epoch] = stages_lookup.get(label, -1)
stages_as_array = np.array(stages)
first_sleep, last_sleep = (
np.where(stages_as_array > 0)[0][0],
np.where(stages_as_array > 0)[0][-1],
)
first_sleep, last_sleep = (
max(0, first_sleep - 60),
min(len(stages_as_array), last_sleep + 60),
)
lights_off, lights_on = (
seconds_before_lights_off,
min(len(stages_as_array), last_sleep + 30) * 30,
)
staging_start = lights_off // 30
staging_end = lights_on // 30
staging_start, staging_end = int(staging_start), int(staging_end)
stages = stages[staging_start:staging_end].tolist()
return stages, staging_start * 30, staging_end * 30
def get_annotation(annotation_file, annotation_name, sampling_freq=64):
"""
Extract annotation from an EDF file
annotation_file : EDF handle
annotation_name :(str) name of the annoation to get
sampling_freq : (int) sampling freq to use to build the event binary representation
"""
annotations = annotation_file.readAnnotations()
result = np.zeros(annotation_file.file_duration * sampling_freq)
annot_idx = np.where(annotations[2] == annotation_name)[0]
time_begins, durations = [], []
for idx in annot_idx:
time_begins += [annotations[0][idx]]
durations += [annotations[1][idx]]
time_begin = int(annotations[0][idx] * sampling_freq)
time_end = time_begin + int(annotations[1][idx] * sampling_freq)
result[time_begin:time_end] = 1
return result, time_begins, durations
def to_h5(
record_file,
annotation_files,
h5_target_directory,
signals,
record_start,
lights_off,
crop_record=True,
force=False,
):
"""
Format a MASS EDF record and its annotation to a standardized h5
record_file :(str)
annotation_files :(list of str) the hypnogram has to be in the first annotation file
h5_target :(str)
crop_record : (bool)
"""
description = []
events_description = []
with pyedflib.EdfReader(record_file) as data:
if force or not os.path.exists(h5_target_directory):
with h5py.File(h5_target_directory, "w", driver="core") as h5_target:
signal_labels = {key: value for value, key in enumerate(data.getSignalLabels())}
hypno, time_begin, time_end = get_sleep_stages_no_light(
annotation_files, record_start=record_start, lights_off=lights_off
)
h5_target["hypnogram"] = np.array(hypno).astype(int)
# Add signal
h5_target.create_group("signals")
for group_name, signals_list in signals.items():
group_name = group_name.lower()
h5_target["signals"].create_group(group_name)
mod_fs = None
mod_unit = None
for signal in signals_list:
signal_idx = signal_labels[signal]
if mod_fs is None:
mod_fs = int(data.getSignalHeader(signal_idx)["sample_rate"])
mod_unit = data.getSignalHeader(signal_idx)["dimension"]
if mod_fs is not None:
signal_path = "signals/" + group_name + "/" + signal
if (
mod_fs == data.getSignalHeader(signal_idx)["sample_rate"]
and mod_unit == data.getSignalHeader(signal_idx)["dimension"]
):
if crop_record:
begin_idx = int(time_begin * mod_fs)
end_idx = int(time_end * mod_fs)
x = data.readSignal(signal_idx)[begin_idx:end_idx].astype(
np.float32
)
h5_target.create_dataset(
signal_path, data=x, compression="gzip"
)
else:
x = data.readSignal(signal_idx).astype(np.float32)
h5_target.create_dataset(
signal_path, data=x, compression="gzip"
)
signal_description = {
"fs": mod_fs,
"unit": mod_unit,
"path": signal_path,
"name": signal,
"domain": group_name,
"default": True,
}
description += [signal_description]
else:
print(
"Signal: ",
signal,
"has invalid frequency or dimension for the modality",
)
h5_target["signals/" + group_name].attrs["fs"] = mod_fs
h5_target["signals/" + group_name].attrs["unit"] = mod_unit
h5_target.attrs.create(
"description", json.dumps(description), dtype=np.dtype("S32768")
)
h5_target.attrs.create(
"events_description", json.dumps(events_description), dtype=np.dtype("S32768"),
)
# truncate file
h5_target.attrs["duration"] = standardize_signals_durations(h5_target)
h5_target.close()
print("Sucess: ", h5_target_directory)
return True
return True
# List of MASS event
stages_lookup = {
"Sleep stage 1": 1,
"Sleep stage 2": 2,
"Sleep stage 3": 3,
"Sleep stage 4": 3,
"Movement time": -1,
"Sleep stage ?": -1,
"Sleep stage R": 4,
"Sleep stage W": 0,
}
if __name__ == "__main__":
import os
from scripts.settings import BASE_DIRECTORY
url = "https://physionet.org/files/sleep-edfx/1.0.0/"
filename = os.system(f"wget -r -N -c -np -P {BASE_DIRECTORY} {url} ")
from scripts.settings import SLEEP_EDF_IN_BED_SETTINGS
from joblib import Parallel, delayed
records_directory, h5_directory = (
SLEEP_EDF_IN_BED_SETTINGS["edf_directory"],
SLEEP_EDF_IN_BED_SETTINGS["h5_directory"],
)
records = {}
for directory in records_directory:
records.update({x: directory for x in os.listdir(directory) if "-PSG.edf" in x})
meta_data = pd.read_csv("scripts/datasets_processing/sleep_edf/sleep_edf_meta.csv")
lights_time = {}
record_annotation = {}
for record_name, directory in records.items():
record_id = record_name[:7]
record_meta_data = meta_data[meta_data["filename"] == record_name.replace(".edf", "")]
lights_time[f"{directory}/{record_name}"] = (
record_meta_data["start_time"].values[0],
record_meta_data["LightsOff"].values[0],
)
annotations_in_directory = [
x for x in os.listdir(directory) if "-Hypnogram.edf" in x and record_id in x
]
assert len(annotations_in_directory) == 1, print(record_name, record_id)
record_annotation[f"{directory}/{record_name}"] = (
f"{directory}/" f"{annotations_in_directory[0]}"
)
if not os.path.exists(h5_directory):
os.mkdir(h5_directory)
parallel = False
def record_to_h5_full(record, annotation, record_start, lights_off, force=False):
with pyedflib.EdfReader(record) as f:
channels = f.getSignalLabels()
signals_prefix = ["EEG", "EMG", "ECG", "EOG"]
signals = {}
for channel in channels:
for prefix in signals_prefix:
if prefix in channel:
if prefix not in signals:
signals[prefix] = []
signals[prefix] += [channel]
break
output_file = h5_directory + record.split("/")[-1].replace(".edf", ".h5")
to_h5(
record,
annotation,
output_file,
record_start=record_start,
lights_off=lights_off,
signals=signals,
force=force,
)
if parallel is True:
Parallel(n_jobs=-1)(
delayed(record_to_h5_full)(record, annotation, *lights_time[record])
for record, annotation in record_annotation.items()
)
else:
for record, annotation in record_annotation.items():
record_to_h5_full(record, annotation, *lights_time[record], force=False)
| 36.821306
| 99
| 0.558936
|
4a0e1093b0ebac0d1639abb3ce7e2068d6e80c1a
| 136,318
|
py
|
Python
|
jax/_src/api.py
|
fengwang/jax
|
88f888d498ee5a063c7fbdf96ea593ab8bd01849
|
[
"Apache-2.0"
] | null | null | null |
jax/_src/api.py
|
fengwang/jax
|
88f888d498ee5a063c7fbdf96ea593ab8bd01849
|
[
"Apache-2.0"
] | 8
|
2022-01-03T16:09:12.000Z
|
2022-03-24T14:18:05.000Z
|
jax/_src/api.py
|
zhangqiaorjc/jax
|
5726be1a9e1d33d8e0052d09104501b19fb6efcf
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX user-facing transformations and utilities.
The transformations here mostly wrap internal transformations, providing
convenience flags to control behavior and handling Python containers of
arguments and outputs. The Python containers handled are pytrees (see
tree_util.py), which include nested tuples/lists/dicts, where the leaves are
arrays.
"""
# flake8: noqa: F401
import collections
import functools
from functools import partial
import inspect
import itertools as it
import sys
import threading
import weakref
import types
from typing import (Any, Callable, Iterable, NamedTuple, Mapping, Optional,
Sequence, Tuple, TypeVar, Union, overload)
from warnings import warn
import numpy as np
from contextlib import contextmanager, ExitStack
import jax
from jax import core
from jax import linear_util as lu
from jax._src import dtypes
from jax.core import eval_jaxpr
from jax._src.api_util import (
flatten_fun, apply_flat_fun, flatten_fun_nokwargs, flatten_fun_nokwargs2,
argnums_partial, argnums_partial_except, flatten_axes, donation_vector,
rebase_donate_argnums, _ensure_index, _ensure_index_tuple,
shaped_abstractify, _ensure_str_tuple, argnames_partial_except)
from jax._src import traceback_util
from jax._src.traceback_util import api_boundary
from jax.tree_util import (tree_map, tree_flatten, tree_unflatten,
tree_structure, tree_transpose, tree_leaves,
tree_multimap, treedef_is_leaf, treedef_children,
Partial, PyTreeDef)
from jax._src.util import (unzip2, curry, safe_map, safe_zip, prod, split_list,
extend_name_stack, wrap_name, cache, wraps,
HashableFunction)
from jax._src import device_array
from jax._src import dispatch
from jax._src.lib import jax_jit
from jax._src.lib import version
from jax._src.lib import xla_bridge as xb
from jax._src.lib import xla_client as xc
from jax._src.lib import pmap_lib
# Unused imports to be exported
from jax._src.lib.xla_bridge import (device_count, local_device_count, devices,
local_devices, process_index,
process_count, host_id, host_ids,
host_count, default_backend)
from jax.core import ShapedArray, raise_to_shaped
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.interpreters import pxla
from jax.interpreters import ad
from jax.interpreters import batching
from jax.interpreters import masking
from jax.interpreters import invertible_ad as iad
from jax.interpreters.invertible_ad import custom_ivjp
from jax.custom_derivatives import (closure_convert, custom_gradient, custom_jvp,
custom_vjp, linear_call)
from jax.ad_checkpoint import checkpoint_policies
from jax._src.config import (flags, config, bool_env,
disable_jit as _disable_jit,
debug_nans as config_debug_nans,
debug_infs as config_debug_infs,
_thread_local_state as config_thread_local_state)
traceback_util.register_exclusion(__file__)
_dtype = partial(dtypes.dtype, canonicalize=True)
AxisName = Any
# These TypeVars are used below to express the fact that function types
# (i.e. call signatures) are invariant under the jit, vmap, and pmap
# transformations.
# Note that the function type annotations will generally not strictly hold
# in JIT internals, as Tracer values are passed through the function.
# Should this raise any type errors for the tracing code in future, we can disable
# type checking in parts of the tracing code, or remove these annotations.
F = TypeVar("F", bound=Callable)
T = TypeVar("T")
U = TypeVar("U")
map, unsafe_map = safe_map, map
zip, unsafe_zip = safe_zip, zip
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"experimental_cpp_jit", bool_env("JAX_CPP_JIT", True),
"A flag enabling the C++ jax.jit fast path."
"Set this to `False` only if it crashes otherwise and report "
"the error to the jax-team.")
flags.DEFINE_bool(
"experimental_cpp_pmap", bool_env("JAX_CPP_PMAP", True),
"A flag enabling the C++ jax.pmap fast path. Until the default "
"is switched to True, the feature is not supported and possibly broken "
"(e.g. it may use unreleased code from jaxlib.")
def _nan_check_posthook(fun, args, kwargs, output):
"""Hook function called by the C++ jit/pmap to perform NaN checking."""
leaves = tree_leaves(output)
buffers = []
for da_or_sda in leaves:
if hasattr(da_or_sda, "device_buffer"):
buffers.append(da_or_sda.device_buffer)
elif hasattr(da_or_sda, "device_buffers"):
buffers.extend(da_or_sda.device_buffers)
try:
dispatch.check_special(xla.xla_call_p, buffers)
except FloatingPointError:
# compiled_fun can only raise in this case
assert config.jax_debug_nans or config.jax_debug_infs
print("Invalid nan value encountered in the output of a C++-jit/pmap "
"function. Calling the de-optimized version.")
fun._cache_miss(*args, **kwargs)[0] # probably won't return
def _update_debug_special_global(_):
if config._read("jax_debug_nans") or config._read("jax_debug_infs"):
jax_jit.global_state().post_hook = _nan_check_posthook
else:
jax_jit.global_state().post_hook = None
def _update_debug_special_thread_local(_):
if (getattr(config_thread_local_state, "jax_debug_nans", False) or
getattr(config_thread_local_state, "jax_debug_infs", False)):
jax_jit.thread_local_state().post_hook = _nan_check_posthook
else:
jax_jit.thread_local_state().post_hook = None
config_debug_nans._add_hooks(_update_debug_special_global,
_update_debug_special_thread_local)
config_debug_infs._add_hooks(_update_debug_special_global,
_update_debug_special_thread_local)
float0 = dtypes.float0
def _check_callable(fun):
# In Python 3.10+, the only thing stopping us from supporting staticmethods
# is that we can't take weak references to them, which the C++ JIT requires.
if isinstance(fun, staticmethod):
raise TypeError(f"staticmethod arguments are not supported, got {fun}")
if not callable(fun):
raise TypeError(f"Expected a callable value, got {fun}")
if _isgeneratorfunction(fun):
raise TypeError(f"Expected a function, got a generator function: {fun}")
def _isgeneratorfunction(fun):
# re-implemented here because of https://bugs.python.org/issue33261
while inspect.ismethod(fun):
fun = fun.__func__
while isinstance(fun, functools.partial):
fun = fun.func
return inspect.isfunction(fun) and bool(fun.__code__.co_flags & inspect.CO_GENERATOR)
_POSITIONAL_OR_KEYWORD = inspect.Parameter.POSITIONAL_OR_KEYWORD
def _infer_argnums_and_argnames(
fun: Callable,
argnums: Union[int, Iterable[int], None],
argnames: Union[str, Iterable[str], None],
) -> Tuple[Tuple[int, ...], Tuple[str, ...]]:
"""Infer missing argnums and argnames for a function with inspect."""
if argnums is None and argnames is None:
argnums = ()
argnames = ()
elif argnums is not None and argnames is not None:
argnums = _ensure_index_tuple(argnums)
argnames = _ensure_str_tuple(argnames)
else:
try:
signature = inspect.signature(fun)
except ValueError:
# In rare cases, inspect can fail, e.g., on some builtin Python functions.
# In these cases, don't infer any parameters.
parameters: Mapping[str, inspect.Parameter] = {}
else:
parameters = signature.parameters
if argnums is None:
assert argnames is not None
argnames = _ensure_str_tuple(argnames)
argnums = tuple(
i for i, (k, param) in enumerate(parameters.items())
if param.kind == _POSITIONAL_OR_KEYWORD and k in argnames
)
else:
assert argnames is None
argnums = _ensure_index_tuple(argnums)
argnames = tuple(
k for i, (k, param) in enumerate(parameters.items())
if param.kind == _POSITIONAL_OR_KEYWORD and i in argnums
)
return argnums, argnames
def jit(
fun: F,
*,
static_argnums: Union[int, Iterable[int], None] = None,
static_argnames: Union[str, Iterable[str], None] = None,
device: Optional[xc.Device] = None,
backend: Optional[str] = None,
donate_argnums: Union[int, Iterable[int]] = (),
inline: bool = False,
) -> F:
"""Sets up ``fun`` for just-in-time compilation with XLA.
Args:
fun: Function to be jitted. Should be a pure function, as side-effects may
only be executed once. Its arguments and return value should be arrays,
scalars, or (nested) standard Python containers (tuple/list/dict) thereof.
Positional arguments indicated by ``static_argnums`` can be anything at
all, provided they are hashable and have an equality operation defined.
Static arguments are included as part of a compilation cache key, which is
why hash and equality operators must be defined.
static_argnums: An optional int or collection of ints that specify which
positional arguments to treat as static (compile-time constant).
Operations that only depend on static arguments will be constant-folded in
Python (during tracing), and so the corresponding argument values can be
any Python object.
Static arguments should be hashable, meaning both ``__hash__`` and
``__eq__`` are implemented, and immutable. Calling the jitted function
with different values for these constants will trigger recompilation.
Arguments that are not arrays or containers thereof must be marked as
static.
If neither ``static_argnums`` nor ``static_argnames`` is provided, no
arguments are treated as static. If ``static_argnums`` is not provided but
``static_argnames`` is, or vice versa, JAX uses ``inspect.signature(fun)``
to find any positional arguments that correspond to ``static_argnames``
(or vice versa). If both ``static_argnums`` and ``static_argnames`` are
provided, ``inspect.signature`` is not used, and only actual
parameters listed in either ``static_argnums`` or ``static_argnames`` will
be treated as static.
static_argnames: An optional string or collection of strings specifying
which named arguments to treat as static (compile-time constant). See the
comment on ``static_argnums`` for details. If not
provided but ``static_argnums`` is set, the default is based on calling
``inspect.signature(fun)`` to find corresponding named arguments.
device: This is an experimental feature and the API is likely to change.
Optional, the Device the jitted function will run on. (Available devices
can be retrieved via :py:func:`jax.devices`.) The default is inherited
from XLA's DeviceAssignment logic and is usually to use
``jax.devices()[0]``.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
donate_argnums: Specify which arguments are "donated" to the computation.
It is safe to donate arguments if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to. By default, no arguments are donated.
inline: Specify whether this function should be inlined into enclosing
jaxprs (rather than being represented as an application of the xla_call
primitive with its own subjaxpr). Default False.
Returns:
A wrapped version of ``fun``, set up for just-in-time compilation.
In the following example, ``selu`` can be compiled into a single fused kernel
by XLA:
>>> import jax
>>>
>>> @jax.jit
... def selu(x, alpha=1.67, lmbda=1.05):
... return lmbda * jax.numpy.where(x > 0, x, alpha * jax.numpy.exp(x) - alpha)
>>>
>>> key = jax.random.PRNGKey(0)
>>> x = jax.random.normal(key, (10,))
>>> print(selu(x)) # doctest: +SKIP
[-0.54485 0.27744 -0.29255 -0.91421 -0.62452 -0.24748
-0.85743 -0.78232 0.76827 0.59566 ]
"""
if FLAGS.experimental_cpp_jit:
return _cpp_jit(fun, static_argnums, static_argnames, device, backend,
donate_argnums, inline)
else:
return _python_jit(fun, static_argnums, static_argnames, device, backend,
donate_argnums, inline)
def _prepare_jit(fun, static_argnums, static_argnames, donate_argnums,
args, kwargs):
if max(donate_argnums, default=-1) >= len(args):
raise ValueError(
f"jitted function has donate_argnums={donate_argnums} but "
f"was called with only {len(args)} positional arguments.")
f = lu.wrap_init(fun)
f, args = argnums_partial_except(f, static_argnums, args, allow_invalid=True)
f, kwargs = argnames_partial_except(f, static_argnames, kwargs)
args_flat, in_tree = tree_flatten((args, kwargs))
if donate_argnums:
donated_invars = donation_vector(donate_argnums, args, kwargs)
else:
donated_invars = (False,) * len(args_flat)
return f, in_tree, args_flat, donated_invars
def _python_jit(
fun: F,
static_argnums: Union[int, Iterable[int], None] = None,
static_argnames: Union[str, Iterable[str], None] = None,
device: Optional[xc.Device] = None,
backend: Optional[str] = None,
donate_argnums: Union[int, Iterable[int]] = (),
inline: bool = False,
) -> F:
# The Python implementation of `jax.jit`, being slowly replaced by _cpp_jit.
_check_callable(fun)
static_argnums, static_argnames = _infer_argnums_and_argnames(
fun, static_argnums, static_argnames)
static_argnums = _ensure_index_tuple(static_argnums)
donate_argnums = _ensure_index_tuple(donate_argnums)
donate_argnums = rebase_donate_argnums(donate_argnums, static_argnums)
@wraps(fun)
@api_boundary
def f_jitted(*args, **kwargs):
if config.jax_disable_jit:
return fun(*args, **kwargs)
closed_fun, in_tree, args_flat, donated_invars = _prepare_jit(
fun, static_argnums, static_argnames, donate_argnums, args, kwargs)
for arg in args_flat:
_check_arg(arg)
flat_fun, out_tree = flatten_fun(closed_fun, in_tree)
out_flat = xla.xla_call(
flat_fun, *args_flat,
device=device, backend=backend, name=flat_fun.__name__,
donated_invars=donated_invars, inline=inline)
return tree_unflatten(out_tree(), out_flat)
f_jitted.lower = _jit_lower(fun, static_argnums, static_argnames, device,
backend, donate_argnums, inline)
return f_jitted
class _BackendAndDeviceInfo(NamedTuple):
default_device: xc.Device
committed_to_device: bool
class _FastpathData(NamedTuple):
xla_executable: xla.XlaExecutable
out_pytree_def: Any
sticky_device: xc.Device
avals: Iterable[Any]
lazy_exprs: Iterable[Any]
kept_var_bitvec: Iterable[bool]
_cpp_jit_cache = jax_jit.CompiledFunctionCache()
def _cpp_jit(
fun: F,
static_argnums: Union[int, Iterable[int], None] = None,
static_argnames: Union[str, Iterable[str], None] = None,
device: Optional[xc.Device] = None,
backend: Optional[str] = None,
donate_argnums: Union[int, Iterable[int]] = (),
inline: bool = False,
) -> F:
# An implementation of `jit` that tries to do as much as possible in C++.
# The goal of this function is to speed up the time it takes to process the
# arguments, find the correct C++ executable, start the transfer of arguments
# and schedule the computation.
# As long as it does not support all features of the Python implementation
# the C++ code will fallback to `_python_jit` when it faces some unsupported
# feature.
_check_callable(fun)
static_argnums, static_argnames = _infer_argnums_and_argnames(
fun, static_argnums, static_argnames)
static_argnums = _ensure_index_tuple(static_argnums)
donate_argnums = _ensure_index_tuple(donate_argnums)
donate_argnums = rebase_donate_argnums(donate_argnums, static_argnums)
if device is not None and backend is not None:
raise ValueError("can't specify both a device and a backend for jit, "
f"got device={device} and backend={backend}.")
@api_boundary
def cache_miss(*args, **kwargs):
### This first part is basically the same code as in _python_jit.
# An alternative would be for cache_miss to accept from C++ the arguments
# (dyn_args, donated_invars, args_flat, in_tree), since otherwise we have
# work/code that is redundant between C++ and Python. We can try that later.
closed_fun, in_tree, args_flat, donated_invars = _prepare_jit(
fun, static_argnums, static_argnames, donate_argnums, args, kwargs)
for arg in args_flat:
_check_arg(arg)
flat_fun, out_tree = flatten_fun(closed_fun, in_tree)
out_flat = xla.xla_call(
flat_fun, *args_flat,
device=device, backend=backend, name=flat_fun.__name__,
donated_invars=donated_invars, inline=inline)
out_pytree_def = out_tree()
out = tree_unflatten(out_pytree_def, out_flat)
### Decide whether we can support the C++ fast path
# High level note: The Python tracing mechanism is complex; in particular
# to know whether `jax.jit(f)(x)` will execute or trace, it's not enough to
# inspect the argument x, we actually do need to execute it and look at the
# outputs that could be tracers (if f is capturing `Tracer` by closure).
execute: Optional[functools.partial] = (
dispatch._xla_callable.most_recent_entry())
use_fastpath = (
# This is if we have already executed this code-path (most-recent entry
# has been reset to None). Thus, we do not support the fast-path.
execute is not None and
execute.func is dispatch._execute_compiled and # not trivial, not pmap
# Not supported: ShardedDeviceArray
all(device_array.type_is_device_array(x) for x in out_flat))
### If we can use the fastpath, we return required info to the caller.
if use_fastpath:
_, xla_executable, _, result_handlers, kept_var_idx = execute.args
sticky_device = None
avals = []
lazy_exprs = [None] * len(result_handlers)
for result_handler in result_handlers:
aval, sticky_device = result_handler.args
avals.append(aval)
assert len(avals) == len(out_flat)
kept_var_bitvec = [i in kept_var_idx for i in range(len(args_flat))]
fastpath_data = _FastpathData(xla_executable, out_pytree_def,
sticky_device, avals, lazy_exprs,
kept_var_bitvec)
else:
fastpath_data = None
return out, fastpath_data
def get_device_info():
"""Backends do not exist before __main__ is being executed."""
committed_to_device = device is not None or backend is not None
if device is not None:
default_device = device
else:
backend_ = xb.get_backend(backend)
default_device = backend_.get_default_device_assignment(1)[0]
return _BackendAndDeviceInfo(default_device, committed_to_device)
cpp_jitted_f = jax_jit.jit(fun, cache_miss, get_device_info,
static_argnums=static_argnums,
static_argnames=static_argnames,
donate_argnums=donate_argnums,
cache=_cpp_jit_cache)
f_jitted = wraps(fun)(cpp_jitted_f)
f_jitted.lower = _jit_lower(fun, static_argnums, static_argnames, device,
backend, donate_argnums, inline)
return f_jitted
class Lowered:
"""Lowering of a function specialized to argument types and values.
A lowering is a computation ready for compilation. This class
carries a lowering together with the remaining information needed to
later compile and execute it. It also provides a common API for
querying properties of lowered computations across JAX's various
lowering paths (``jit``, ``pmap``, etc.).
"""
__slots__ = ['in_tree', 'out_tree', 'donate_argnums', '_lowering',
'_no_kwargs']
in_tree: PyTreeDef
out_tree: PyTreeDef
donate_argnums: Tuple[int]
_lowering: Union[dispatch.XlaComputation,
pxla.MeshComputation,
pxla.PmapComputation]
_no_kwargs: bool
def __init__(self, lowering, in_tree, out_tree, donate_argnums,
no_kwargs=False):
self._lowering = lowering
self.in_tree = in_tree
self.out_tree = out_tree
self.donate_argnums = donate_argnums
self._no_kwargs = no_kwargs
def compile(self) -> 'Compiled':
return Compiled(
self._lowering.compile(), self.in_tree, self.out_tree,
self.donate_argnums, self._no_kwargs)
def compiler_ir(self, dialect: Optional[str] = None):
if dialect == "mhlo":
return self._lowering.mhlo()
elif dialect == "hlo" or dialect is None:
return self._lowering.hlo()
else:
raise ValueError(f"Unknown dialect {dialect}")
# TODO(frostig): remove this in favor of `compiler_ir`
def _xla_computation(self):
return self._lowering.hlo()
class Compiled:
"""Compiled representation of a function specialized to types/values.
A compiled computation is associated with an executable and the
remaining information needed to execute it. It also provides a
common API for querying properties of compiled computations across
JAX's various compilation paths and backends.
"""
__slots__ = ['in_tree', 'out_tree', 'donate_argnums', '_executable',
'_no_kwargs']
in_tree: PyTreeDef
out_tree: PyTreeDef
donate_argnums: Tuple[int]
_executable: Union[dispatch.XlaCompiledComputation,
pxla.MeshExecutable,
pxla.PmapExecutable]
_no_kwargs: bool
def __init__(self, executable, in_tree, out_tree, donate_argnums,
no_kwargs=False):
self._executable = executable
self.in_tree = in_tree
self.out_tree = out_tree
self.donate_argnums = donate_argnums
self._no_kwargs = no_kwargs
def compiler_ir(self):
"""Post-compilation IR.
Compilation typically involves code transformation and
optimization. This method exists to reflect the compiler's
representation of the program after such passes, whenever
possible.
"""
return self._executable.xla_executable().hlo_modules()
def runtime_executable(self):
return self._executable.xla_executable()
def _xla_executable(self):
# TODO(frostig): finalize API. For now, return the underlying
# executable directly via this method.
return self._executable.xla_executable()
def __call__(self, *args, **kwargs):
if self._no_kwargs:
if kwargs:
kws = ', '.join(kwargs.keys())
raise NotImplementedError(
'function was compiled by a transformation that does not support '
f'keyword arguments, but called with keyword arguments: {kws}')
args_flat, in_tree = tree_flatten(args)
else:
args_flat, in_tree = tree_flatten((args, kwargs))
if in_tree != self.in_tree:
# TODO(frostig): provide more info about the source function
# and transformation
raise TypeError(
f'function compiled for {self.in_tree}, called with {in_tree}')
try:
out_flat = self._executable.call(*args_flat)
except TypeError as e:
# We can't transform ahead-of-time compiled calls, since we've
# lowered and compiled for a fixed function signature, and JAX
# transformations change signatures. We interpret a Tracer
# argument as an indication of a transformation attempt. We
# could check this before the executable call, but we'd rather
# avoid isinstance checks on the call path. Seeing a TypeError
# might mean that arguments have JAX-invalid types, which in
# turn might mean some are Tracers.
for arg in args_flat:
if isinstance(arg, core.Tracer):
raise TypeError(
'Cannot apply JAX transformations to a function lowered and '
'compiled for a particular signature. Detected argument of '
f'Tracer type {type(arg)}.')
else:
raise
return tree_unflatten(self.out_tree, out_flat)
def _jit_lower(fun, static_argnums, static_argnames, device, backend,
donate_argnums, inline):
"""Make a ``lower`` method for jitted functions."""
# If the function we returned from ``jit`` were a class instance,
# this might naturally be a method, with ``fun`` as a ``self`` and
# all the other arguments stored as attributes.
def arg_spec(x):
# like xla.arg_spec but duck-types on x.shape and x.dtype
aval = shaped_abstractify(x)
try:
return aval, x._device
except:
return aval, None
@api_boundary
def lower(*args, **kwargs) -> Lowered:
"""Lower this function for the given arguments.
A lowered function is staged out of Python and translated to a
compiler's input language, possibly in a backend-dependent
manner. It is ready for compilation but not yet compiled.
Returns:
A ``Lowered`` instance representing the lowering.
"""
closed_fun, in_tree, args_flat, donated_invars = _prepare_jit(
fun, static_argnums, static_argnames, donate_argnums, args, kwargs)
flat_fun, out_tree = flatten_fun(closed_fun, in_tree)
name = flat_fun.__name__
arg_specs = unsafe_map(arg_spec, args_flat)
computation = dispatch.lower_xla_callable(
flat_fun, device, backend, name, donated_invars, *arg_specs)
return Lowered(computation, in_tree, out_tree(), donate_argnums)
return lower
@contextmanager
def disable_jit():
"""Context manager that disables :py:func:`jit` behavior under its dynamic context.
For debugging it is useful to have a mechanism that disables :py:func:`jit`
everywhere in a dynamic context.
Values that have a data dependence on the arguments to a jitted function are
traced and abstracted. For example, an abstract value may be a
:py:class:`ShapedArray` instance, representing the set of all possible arrays
with a given shape and dtype, but not representing one concrete array with
specific values. You might notice those if you use a benign side-effecting
operation in a jitted function, like a print:
>>> import jax
>>>
>>> @jax.jit
... def f(x):
... y = x * 2
... print("Value of y is", y)
... return y + 3
...
>>> print(f(jax.numpy.array([1, 2, 3])))
Value of y is Traced<ShapedArray(int32[3])>with<DynamicJaxprTrace(level=0/1)>
[5 7 9]
Here ``y`` has been abstracted by :py:func:`jit` to a :py:class:`ShapedArray`,
which represents an array with a fixed shape and type but an arbitrary value.
The value of ``y`` is also traced. If we want to see a concrete value while
debugging, and avoid the tracer too, we can use the :py:func:`disable_jit`
context manager:
>>> import jax
>>>
>>> with jax.disable_jit():
... print(f(jax.numpy.array([1, 2, 3])))
...
Value of y is [2 4 6]
[5 7 9]
"""
with _disable_jit(True):
yield
def xla_computation(fun: Callable,
static_argnums: Union[int, Iterable[int]] = (),
axis_env: Optional[Sequence[Tuple[AxisName, int]]] = None,
in_parts=None, out_parts=None,
backend: Optional[str] = None,
tuple_args: bool = False,
instantiate_const_outputs: Optional[bool] = None,
return_shape: bool = False,
donate_argnums: Union[int, Iterable[int]] = ()) -> Callable:
"""Creates a function that produces its XLA computation given example args.
Args:
fun: Function from which to form XLA computations.
static_argnums: See the :py:func:`jax.jit` docstring.
axis_env: Optional, a sequence of pairs where the first element is an axis
name and the second element is a positive integer representing the size of
the mapped axis with that name. This parameter is useful when lowering
functions that involve parallel communication collectives, and it
specifies the axis name/size environment that would be set up by
applications of :py:func:`jax.pmap`. See the examples below.
in_parts: Optional, how each argument to ``fun`` should be partitioned or
replicated. This is used to specify partitioned XLA computations, see
``sharded_jit`` for more info.
out_parts: Optional, how each output of ``fun`` should be partitioned or
replicated. This is used to specify partitioned XLA computations, see
``sharded_jit`` for more info.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
tuple_args: Optional bool, defaults to ``False``. If ``True``, the resulting
XLA computation will have a single tuple argument that is unpacked into
the specified function arguments. If `None`, tupling will be enabled when
there are more than 100 arguments, since some platforms have limits on
argument arity.
instantiate_const_outputs: Deprecated argument, does nothing.
return_shape: Optional boolean, defaults to ``False``. If ``True``, the
wrapped function returns a pair where the first element is the XLA
computation and the second element is a pytree with the same structure as
the output of ``fun`` and where the leaves are objects with ``shape``,
``dtype``, and ``named_shape`` attributes representing the corresponding
types of the output leaves.
donate_argnums: Specify which arguments are "donated" to the computation.
It is safe to donate arguments if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to.
Returns:
A wrapped version of ``fun`` that when applied to example arguments returns
a built XLA Computation (see xla_client.py), from which representations of
the unoptimized XLA HLO computation can be extracted using methods like
``as_hlo_text``, ``as_serialized_hlo_module_proto``, and
``as_hlo_dot_graph``. If the argument ``return_shape`` is ``True``, then the
wrapped function returns a pair where the first element is the XLA
Computation and the second element is a pytree representing the structure,
shapes, dtypes, and named shapes of the output of ``fun``.
Concrete example arguments are not always necessary. For those arguments not
indicated by ``static_argnums``, any object with ``shape`` and ``dtype``
attributes is acceptable (excepting namedtuples, which are treated as Python
containers).
For example:
>>> import jax
>>>
>>> def f(x): return jax.numpy.sin(jax.numpy.cos(x))
>>> c = jax.xla_computation(f)(3.)
>>> print(c.as_hlo_text()) # doctest: +SKIP
HloModule xla_computation_f.6
<BLANKLINE>
ENTRY xla_computation_f.6 {
constant.2 = pred[] constant(false)
parameter.1 = f32[] parameter(0)
cosine.3 = f32[] cosine(parameter.1)
sine.4 = f32[] sine(cosine.3)
ROOT tuple.5 = (f32[]) tuple(sine.4)
}
<BLANKLINE>
<BLANKLINE>
Alternatively, the assignment to ``c`` above could be written:
>>> import types
>>> scalar = types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32))
>>> c = jax.xla_computation(f)(scalar)
Here's an example that involves a parallel collective and axis name:
>>> def f(x): return x - jax.lax.psum(x, 'i')
>>> c = jax.xla_computation(f, axis_env=[('i', 4)])(2)
>>> print(c.as_hlo_text()) # doctest: +SKIP
HloModule jaxpr_computation.9
primitive_computation.3 {
parameter.4 = s32[] parameter(0)
parameter.5 = s32[] parameter(1)
ROOT add.6 = s32[] add(parameter.4, parameter.5)
}
ENTRY jaxpr_computation.9 {
tuple.1 = () tuple()
parameter.2 = s32[] parameter(0)
all-reduce.7 = s32[] all-reduce(parameter.2), replica_groups={{0,1,2,3}}, to_apply=primitive_computation.3
ROOT subtract.8 = s32[] subtract(parameter.2, all-reduce.7)
}
<BLANKLINE>
<BLANKLINE>
Notice the ``replica_groups`` that were generated. Here's an example that
generates more interesting ``replica_groups``:
>>> from jax import lax
>>> def g(x):
... rowsum = lax.psum(x, 'i')
... colsum = lax.psum(x, 'j')
... allsum = lax.psum(x, ('i', 'j'))
... return rowsum, colsum, allsum
...
>>> axis_env = [('i', 4), ('j', 2)]
>>> c = xla_computation(g, axis_env=axis_env)(5.)
>>> print(c.as_hlo_text()) # doctest: +SKIP
HloModule jaxpr_computation__1.19
[removed uninteresting text here]
ENTRY jaxpr_computation__1.19 {
tuple.1 = () tuple()
parameter.2 = f32[] parameter(0)
all-reduce.7 = f32[] all-reduce(parameter.2), replica_groups={{0,2,4,6},{1,3,5,7}}, to_apply=primitive_computation__1.3
all-reduce.12 = f32[] all-reduce(parameter.2), replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=primitive_computation__1.8
all-reduce.17 = f32[] all-reduce(parameter.2), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=primitive_computation__1.13
ROOT tuple.18 = (f32[], f32[], f32[]) tuple(all-reduce.7, all-reduce.12, all-reduce.17)
}
"""
del instantiate_const_outputs # Unused
_check_callable(fun)
static_argnums = _ensure_index_tuple(static_argnums)
donate_argnums = _ensure_index_tuple(donate_argnums)
donate_argnums = rebase_donate_argnums(donate_argnums, static_argnums)
fun_name = getattr(fun, "__name__", "unknown")
backend = backend if backend is not None else xb.get_backend().platform
def make_axis_env(nreps):
if axis_env is None:
return xla.AxisEnv(nreps, (), ())
else:
nreps = nreps * prod(size for name, size in axis_env)
names, sizes = unzip2(axis_env)
return xla.AxisEnv(nreps, names, sizes)
@wraps(fun)
@api_boundary
def computation_maker(*args, **kwargs):
if max(static_argnums + donate_argnums, default=-1) >= len(args):
raise ValueError(f"jitted function has static_argnums={static_argnums},"
f" donate_argnums={donate_argnums} but "
f"was called with only {len(args)} positional arguments.")
f = lu.wrap_init(fun)
if static_argnums:
f, dyn_args = argnums_partial_except(f, static_argnums, args, allow_invalid=False)
else:
dyn_args = args
args_flat, in_tree = tree_flatten((dyn_args, kwargs))
if donate_argnums:
donated_invars = donation_vector(donate_argnums, dyn_args, kwargs)
else:
donated_invars = (False,) * len(args_flat)
if in_parts is None:
in_parts_flat = None
else:
in_parts_flat = tuple(flatten_axes(
"xla_computation in_parts", in_tree.children()[0], in_parts))
jaxtree_fun, out_tree = flatten_fun(f, in_tree)
avals = map(shaped_abstractify, args_flat)
with ExitStack() as stack:
for axis_name, size in axis_env or []:
stack.enter_context(core.extend_axis_env(axis_name, size, None))
jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(jaxtree_fun, avals)
jaxpr = dispatch.apply_outfeed_rewriter(jaxpr)
axis_env_ = make_axis_env(dispatch.jaxpr_replicas(jaxpr))
if out_parts is None:
out_parts_flat = None
else:
out_parts_flat = tuple(flatten_axes(
"xla_computation out_parts", out_tree(), out_parts))
c = xc.XlaBuilder(f"xla_computation_{fun_name}")
xla_consts = map(partial(xla.pyval_to_ir_constant, c), consts)
should_tuple = tuple_args if tuple_args is not None else (len(avals) > 100)
xla_args, donated_invars = xla._xla_callable_args(
c, avals, should_tuple, partitions=in_parts_flat, donated_invars=donated_invars)
ctx = xla.TranslationContext(
c, backend, axis_env_,
extend_name_stack(wrap_name(fun_name, "xla_computation")))
out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)
build_out_tuple = partial(xc.ops.Tuple, c, out_nodes)
if out_parts is not None:
out_tuple = xla.with_sharding(c, out_parts_flat, build_out_tuple)
else:
out_tuple = build_out_tuple()
if any(donated_invars):
donated_invars = xla.set_up_aliases(c, xla_args, c.GetShape(out_tuple),
donated_invars, tuple_args)
if any(donated_invars):
shapes = [str(c.GetShape(a)) for a, d in zip(xla_args, donated_invars) if d]
warn(f"Some donated buffers were not usable: {', '.join(shapes)}")
built = c.build(out_tuple)
out_shapes_flat = [
ShapeDtypeStruct(a.shape, a.dtype, a.named_shape) for a in out_avals]
out_shape = tree_unflatten(out_tree(), out_shapes_flat)
for out_aval in out_avals:
if not isinstance(out_aval, xla.ShapedArray):
raise RuntimeError("As we want to propagate the weak_type, we need "
"to get a ShapedArray, otherwise this "
"information is lost")
if return_shape:
return built, out_shape
else:
return built
return computation_maker
def grad(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
has_aux: bool = False, holomorphic: bool = False,
allow_int: bool = False,
reduce_axes: Sequence[AxisName] = ()) -> Callable:
"""Creates a function that evaluates the gradient of ``fun``.
Args:
fun: Function to be differentiated. Its arguments at positions specified by
``argnums`` should be arrays, scalars, or standard Python containers.
Argument arrays in the positions specified by ``argnums`` must be of
inexact (i.e., floating-point or complex) type. It
should return a scalar (which includes arrays with shape ``()`` but not
arrays with shape ``(1,)`` etc.)
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default 0).
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. If True, inputs and outputs must be complex. Default False.
allow_int: Optional, bool. Whether to allow differentiating with
respect to integer valued inputs. The gradient of an integer input will
have a trivial vector-space dtype (float0). Default False.
reduce_axes: Optional, tuple of axis names. If an axis is listed here, and
``fun`` implicitly broadcasts a value over that axis, the backward pass
will perform a ``psum`` of the corresponding gradient. Otherwise, the
gradient will be per-example over named axes. For example, if ``'batch'``
is a named batch axis, ``grad(f, reduce_axes=('batch',))`` will create a
function that computes the total gradient while ``grad(f)`` will create
one that computes the per-example gradient.
Returns:
A function with the same arguments as ``fun``, that evaluates the gradient
of ``fun``. If ``argnums`` is an integer then the gradient has the same
shape and type as the positional argument indicated by that integer. If
argnums is a tuple of integers, the gradient is a tuple of values with the
same shapes and types as the corresponding arguments. If ``has_aux`` is True
then a pair of (gradient, auxiliary_data) is returned.
For example:
>>> import jax
>>>
>>> grad_tanh = jax.grad(jax.numpy.tanh)
>>> print(grad_tanh(0.2))
0.961043
"""
value_and_grad_f = value_and_grad(fun, argnums, has_aux=has_aux,
holomorphic=holomorphic,
allow_int=allow_int,
reduce_axes=reduce_axes)
docstr = ("Gradient of {fun} with respect to positional argument(s) "
"{argnums}. Takes the same arguments as {fun} but returns the "
"gradient, which has the same shape as the arguments at "
"positions {argnums}.")
@wraps(fun, docstr=docstr, argnums=argnums)
@api_boundary
def grad_f(*args, **kwargs):
_, g = value_and_grad_f(*args, **kwargs)
return g
@wraps(fun, docstr=docstr, argnums=argnums)
@api_boundary
def grad_f_aux(*args, **kwargs):
(_, aux), g = value_and_grad_f(*args, **kwargs)
return g, aux
return grad_f_aux if has_aux else grad_f
def value_and_grad(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
has_aux: bool = False, holomorphic: bool = False,
allow_int: bool = False, reduce_axes: Sequence[AxisName] = ()
) -> Callable[..., Tuple[Any, Any]]:
"""Create a function that evaluates both ``fun`` and the gradient of ``fun``.
Args:
fun: Function to be differentiated. Its arguments at positions specified by
``argnums`` should be arrays, scalars, or standard Python containers. It
should return a scalar (which includes arrays with shape ``()`` but not
arrays with shape ``(1,)`` etc.)
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default 0).
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. If True, inputs and outputs must be complex. Default False.
allow_int: Optional, bool. Whether to allow differentiating with
respect to integer valued inputs. The gradient of an integer input will
have a trivial vector-space dtype (float0). Default False.
reduce_axes: Optional, tuple of axis names. If an axis is listed here, and
``fun`` implicitly broadcasts a value over that axis, the backward pass
will perform a ``psum`` of the corresponding gradient. Otherwise, the
gradient will be per-example over named axes. For example, if ``'batch'``
is a named batch axis, ``value_and_grad(f, reduce_axes=('batch',))`` will
create a function that computes the total gradient while
``value_and_grad(f)`` will create one that computes the per-example
gradient.
Returns:
A function with the same arguments as ``fun`` that evaluates both ``fun``
and the gradient of ``fun`` and returns them as a pair (a two-element
tuple). If ``argnums`` is an integer then the gradient has the same shape
and type as the positional argument indicated by that integer. If argnums is
a sequence of integers, the gradient is a tuple of values with the same
shapes and types as the corresponding arguments.
"""
docstr = ("Value and gradient of {fun} with respect to positional "
"argument(s) {argnums}. Takes the same arguments as {fun} but "
"returns a two-element tuple where the first element is the value "
"of {fun} and the second element is the gradient, which has the "
"same shape as the arguments at positions {argnums}.")
_check_callable(fun)
argnums = core.concrete_or_error(_ensure_index, argnums)
reduce_axes = _ensure_str_tuple(reduce_axes)
@wraps(fun, docstr=docstr, argnums=argnums)
@api_boundary
def value_and_grad_f(*args, **kwargs):
max_argnum = argnums if isinstance(argnums, int) else max(argnums)
if max_argnum >= len(args):
raise TypeError(f"differentiating with respect to argnums={argnums} requires at least "
f"{max_argnum + 1} positional arguments to be passed by the caller, "
f"but got only {len(args)} positional arguments.")
f = lu.wrap_init(fun, kwargs)
f_partial, dyn_args = argnums_partial(f, argnums, args,
require_static_args_hashable=False)
for leaf in tree_leaves(dyn_args):
_check_input_dtype_grad(holomorphic, allow_int, leaf)
if not has_aux:
ans, vjp_py = _vjp(f_partial, *dyn_args, reduce_axes=reduce_axes)
else:
ans, vjp_py, aux = _vjp(
f_partial, *dyn_args, has_aux=True, reduce_axes=reduce_axes)
_check_scalar(ans)
tree_map(partial(_check_output_dtype_grad, holomorphic), ans)
g = vjp_py(jax.lax._one(ans))
g = g[0] if isinstance(argnums, int) else g
if not has_aux:
return ans, g
else:
return (ans, aux), g
return value_and_grad_f
def _check_scalar(x):
msg = "Gradient only defined for scalar-output functions. Output {}.".format
try:
aval = core.get_aval(x)
except TypeError as e:
raise TypeError(msg(f"was {x}")) from e
else:
if isinstance(aval, ShapedArray):
if aval.shape != ():
raise TypeError(msg(f"had shape: {aval.shape}"))
else:
raise TypeError(msg(f"had abstract value {aval}"))
def _check_input_dtype_revderiv(name, holomorphic, allow_int, x):
_check_arg(x)
aval = core.get_aval(x)
if holomorphic:
if not dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError(f"{name} with holomorphic=True requires inputs with complex dtype, "
f"but got {aval.dtype.name}.")
if (dtypes.issubdtype(aval.dtype, np.integer) or
dtypes.issubdtype(aval.dtype, np.bool_)):
if not allow_int:
raise TypeError(f"{name} requires real- or complex-valued inputs (input dtype "
f"that is a sub-dtype of np.inexact), but got {aval.dtype.name}. "
"If you want to use Boolean- or integer-valued inputs, use vjp "
"or set allow_int to True.")
elif not dtypes.issubdtype(aval.dtype, np.inexact):
raise TypeError(f"{name} requires numerical-valued inputs (input dtype that is a "
f"sub-dtype of np.bool_ or np.number), but got {aval.dtype.name}.")
_check_input_dtype_grad = partial(_check_input_dtype_revderiv, "grad")
def _check_output_dtype_revderiv(name, holomorphic, x):
aval = core.get_aval(x)
if holomorphic:
if not dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError(f"{name} with holomorphic=True requires outputs with complex dtype, "
f"but got {aval.dtype.name}.")
elif dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError(f"{name} requires real-valued outputs (output dtype that is "
f"a sub-dtype of np.floating), but got {aval.dtype.name}. "
"For holomorphic differentiation, pass holomorphic=True. "
"For differentiation of non-holomorphic functions involving complex "
"outputs, use jax.vjp directly.")
elif not dtypes.issubdtype(aval.dtype, np.floating):
raise TypeError(f"{name} requires real-valued outputs (output dtype that is "
f"a sub-dtype of np.floating), but got {aval.dtype.name}. "
"For differentiation of functions with integer outputs, use "
"jax.vjp directly.")
_check_output_dtype_grad = partial(_check_output_dtype_revderiv, "grad")
def jacfwd(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
holomorphic: bool = False) -> Callable:
"""Jacobian of ``fun`` evaluated column-by-column using forward-mode AD.
Args:
fun: Function whose Jacobian is to be computed.
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default ``0``).
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. Default False.
Returns:
A function with the same arguments as ``fun``, that evaluates the Jacobian of
``fun`` using forward-mode automatic differentiation.
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> def f(x):
... return jnp.asarray(
... [x[0], 5*x[2], 4*x[1]**2 - 2*x[2], x[2] * jnp.sin(x[0])])
...
>>> print(jax.jacfwd(f)(jnp.array([1., 2., 3.])))
[[ 1. 0. 0. ]
[ 0. 0. 5. ]
[ 0. 16. -2. ]
[ 1.6209 0. 0.84147]]
"""
_check_callable(fun)
argnums = _ensure_index(argnums)
def jacfun(*args, **kwargs):
f = lu.wrap_init(fun, kwargs)
f_partial, dyn_args = argnums_partial(f, argnums, args,
require_static_args_hashable=False)
tree_map(partial(_check_input_dtype_jacfwd, holomorphic), dyn_args)
pushfwd = partial(_jvp, f_partial, dyn_args)
y, jac = vmap(pushfwd, out_axes=(None, -1))(_std_basis(dyn_args))
tree_map(partial(_check_output_dtype_jacfwd, holomorphic), y)
example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args
return tree_map(partial(_jacfwd_unravel, example_args), y, jac)
return jacfun
def _check_input_dtype_jacfwd(holomorphic: bool, x: Any) -> None:
_check_arg(x)
aval = core.get_aval(x)
if holomorphic:
if not dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError("jacfwd with holomorphic=True requires inputs with complex "
f"dtype, but got {aval.dtype.name}.")
elif not dtypes.issubdtype(aval.dtype, np.floating):
raise TypeError("jacfwd requires real-valued inputs (input dtype that is "
f"a sub-dtype of np.floating), but got {aval.dtype.name}. "
"For holomorphic differentiation, pass holomorphic=True. "
"For differentiation of non-holomorphic functions involving "
"complex inputs or integer inputs, use jax.jvp directly.")
def _check_output_dtype_jacfwd(holomorphic, x):
aval = core.get_aval(x)
if holomorphic:
if not dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError("jacfwd with holomorphic=True requires outputs with complex dtype, "
f"but got {aval.dtype.name}.")
def jacrev(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
holomorphic: bool = False, allow_int: bool = False) -> Callable:
"""Jacobian of ``fun`` evaluated row-by-row using reverse-mode AD.
Args:
fun: Function whose Jacobian is to be computed.
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default ``0``).
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. Default False.
allow_int: Optional, bool. Whether to allow differentiating with
respect to integer valued inputs. The gradient of an integer input will
have a trivial vector-space dtype (float0). Default False.
Returns:
A function with the same arguments as ``fun``, that evaluates the Jacobian of
``fun`` using reverse-mode automatic differentiation.
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> def f(x):
... return jnp.asarray(
... [x[0], 5*x[2], 4*x[1]**2 - 2*x[2], x[2] * jnp.sin(x[0])])
...
>>> print(jax.jacrev(f)(jnp.array([1., 2., 3.])))
[[ 1. 0. 0. ]
[ 0. 0. 5. ]
[ 0. 16. -2. ]
[ 1.6209 0. 0.84147]]
"""
_check_callable(fun)
def jacfun(*args, **kwargs):
f = lu.wrap_init(fun, kwargs)
f_partial, dyn_args = argnums_partial(f, argnums, args,
require_static_args_hashable=False)
tree_map(partial(_check_input_dtype_jacrev, holomorphic, allow_int), dyn_args)
y, pullback = _vjp(f_partial, *dyn_args)
tree_map(partial(_check_output_dtype_jacrev, holomorphic), y)
jac = vmap(pullback)(_std_basis(y))
jac = jac[0] if isinstance(argnums, int) else jac
example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args
jac_tree = tree_map(partial(_jacrev_unravel, y), example_args, jac)
return tree_transpose(tree_structure(example_args), tree_structure(y), jac_tree)
return jacfun
jacobian = jacrev
_check_input_dtype_jacrev = partial(_check_input_dtype_revderiv, "jacrev")
_check_output_dtype_jacrev = partial(_check_output_dtype_revderiv, "jacrev")
def hessian(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
holomorphic: bool = False) -> Callable:
"""Hessian of ``fun`` as a dense array.
Args:
fun: Function whose Hessian is to be computed. Its arguments at positions
specified by ``argnums`` should be arrays, scalars, or standard Python
containers thereof. It should return arrays, scalars, or standard Python
containers thereof.
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default ``0``).
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. Default False.
Returns:
A function with the same arguments as ``fun``, that evaluates the Hessian of
``fun``.
>>> import jax
>>>
>>> g = lambda x: x[0]**3 - 2*x[0]*x[1] - x[1]**6
>>> print(jax.hessian(g)(jax.numpy.array([1., 2.])))
[[ 6. -2.]
[ -2. -480.]]
:py:func:`hessian` is a generalization of the usual definition of the Hessian
that supports nested Python containers (i.e. pytrees) as inputs and outputs.
The tree structure of ``jax.hessian(fun)(x)`` is given by forming a tree
product of the structure of ``fun(x)`` with a tree product of two copies of
the structure of ``x``. A tree product of two tree structures is formed by
replacing each leaf of the first tree with a copy of the second. For example:
>>> import jax.numpy as jnp
>>> f = lambda dct: {"c": jnp.power(dct["a"], dct["b"])}
>>> print(jax.hessian(f)({"a": jnp.arange(2.) + 1., "b": jnp.arange(2.) + 2.}))
{'c': {'a': {'a': DeviceArray([[[ 2., 0.], [ 0., 0.]],
[[ 0., 0.], [ 0., 12.]]], dtype=float32),
'b': DeviceArray([[[ 1. , 0. ], [ 0. , 0. ]],
[[ 0. , 0. ], [ 0. , 12.317766]]], dtype=float32)},
'b': {'a': DeviceArray([[[ 1. , 0. ], [ 0. , 0. ]],
[[ 0. , 0. ], [ 0. , 12.317766]]], dtype=float32),
'b': DeviceArray([[[0. , 0. ], [0. , 0. ]],
[[0. , 0. ], [0. , 3.843624]]], dtype=float32)}}}
Thus each leaf in the tree structure of ``jax.hessian(fun)(x)`` corresponds to
a leaf of ``fun(x)`` and a pair of leaves of ``x``. For each leaf in
``jax.hessian(fun)(x)``, if the corresponding array leaf of ``fun(x)`` has
shape ``(out_1, out_2, ...)`` and the corresponding array leaves of ``x`` have
shape ``(in_1_1, in_1_2, ...)`` and ``(in_2_1, in_2_2, ...)`` respectively,
then the Hessian leaf has shape ``(out_1, out_2, ..., in_1_1, in_1_2, ...,
in_2_1, in_2_2, ...)``. In other words, the Python tree structure represents
the block structure of the Hessian, with blocks determined by the input and
output pytrees.
In particular, an array is produced (with no pytrees involved) when the
function input ``x`` and output ``fun(x)`` are each a single array, as in the
``g`` example above. If ``fun(x)`` has shape ``(out1, out2, ...)`` and ``x``
has shape ``(in1, in2, ...)`` then ``jax.hessian(fun)(x)`` has shape
``(out1, out2, ..., in1, in2, ..., in1, in2, ...)``. To flatten pytrees into
1D vectors, consider using :py:func:`jax.flatten_util.flatten_pytree`.
"""
return jacfwd(jacrev(fun, argnums, holomorphic), argnums, holomorphic)
def _std_basis(pytree):
leaves, _ = tree_flatten(pytree)
ndim = sum(map(np.size, leaves))
dtype = dtypes.result_type(*leaves)
flat_basis = jax.numpy.eye(ndim, dtype=dtype)
return _unravel_array_into_pytree(pytree, 1, None, flat_basis)
def _jacfwd_unravel(input_pytree, output_pytree_leaf, arr):
return _unravel_array_into_pytree(
input_pytree, -1, output_pytree_leaf, arr)
def _jacrev_unravel(output_pytree, input_pytree_leaf, arr):
return _unravel_array_into_pytree(
output_pytree, 0, input_pytree_leaf, arr)
def _possible_downcast(x, example):
if (dtypes.issubdtype(x.dtype, np.complexfloating) and
not dtypes.issubdtype(_dtype(example), np.complexfloating)):
x = x.real
dtype = None if example is None else _dtype(example)
weak_type = None if example is None else dtypes.is_weakly_typed(example)
return jax._src.lax.lax._convert_element_type(x, dtype, weak_type)
def _unravel_array_into_pytree(pytree, axis, example, arr):
"""Unravel an array into a PyTree with a given structure.
Args:
pytree: The pytree that provides the structure.
axis: The parameter axis is either -1, 0, or 1. It controls the
resulting shapes.
example: If specified, cast the components to the matching dtype/weak_type,
or else use the pytree leaf type if example is None.
arr: The array to be unraveled.
"""
leaves, treedef = tree_flatten(pytree)
axis = axis % arr.ndim
shapes = [arr.shape[:axis] + np.shape(l) + arr.shape[axis+1:] for l in leaves]
parts = _split(arr, np.cumsum(map(np.size, leaves[:-1])), axis)
reshaped_parts = [
_possible_downcast(np.reshape(x, shape), leaf if example is None else example)
for x, shape, leaf in zip(parts, shapes, leaves)]
return tree_unflatten(treedef, reshaped_parts)
def _split(x, indices, axis):
if isinstance(x, np.ndarray):
return np.split(x, indices, axis)
else:
return x.split(indices, axis)
def vmap(fun: F, in_axes=0, out_axes=0, axis_name=None, axis_size=None) -> F:
"""Vectorizing map. Creates a function which maps ``fun`` over argument axes.
Args:
fun: Function to be mapped over additional axes.
in_axes: An integer, None, or (nested) standard Python container
(tuple/list/dict) thereof specifying which input array axes to map over.
If each positional argument to ``fun`` is an array, then ``in_axes`` can
be an integer, a None, or a tuple of integers and Nones with length equal
to the number of positional arguments to ``fun``. An integer or ``None``
indicates which array axis to map over for all arguments (with ``None``
indicating not to map any axis), and a tuple indicates which axis to map
for each corresponding positional argument. Axis integers must be in the
range ``[-ndim, ndim)`` for each array, where ``ndim`` is the number of
dimensions (axes) of the corresponding input array.
If the positional arguments to ``fun`` are container types, the
corresponding element of ``in_axes`` can itself be a matching container,
so that distinct array axes can be mapped for different container
elements. ``in_axes`` must be a container tree prefix of the positional
argument tuple passed to ``fun``.
Either ``axis_size`` must be provided explicitly, or at least one
positional argument must have ``in_axes`` not None. The sizes of the
mapped input axes for all mapped positional arguments must all be equal.
Arguments passed as keywords are always mapped over their leading axis
(i.e. axis index 0).
See below for examples.
out_axes: An integer, None, or (nested) standard Python container
(tuple/list/dict) thereof indicating where the mapped axis should appear
in the output. All outputs with a mapped axis must have a non-None
``out_axes`` specification. Axis integers must be in the range ``[-ndim,
ndim)`` for each output array, where ``ndim`` is the number of dimensions
(axes) of the array returned by the :func:`vmap`-ed function, which is one
more than the number of dimensions (axes) of the corresponding array
returned by ``fun``.
axis_name: Optional, a hashable Python object used to identify the mapped
axis so that parallel collectives can be applied.
axis_size: Optional, an integer indicating the size of the axis to be
mapped. If not provided, the mapped axis size is inferred from arguments.
Returns:
Batched/vectorized version of ``fun`` with arguments that correspond to
those of ``fun``, but with extra array axes at positions indicated by
``in_axes``, and a return value that corresponds to that of ``fun``, but
with extra array axes at positions indicated by ``out_axes``.
For example, we can implement a matrix-matrix product using a vector dot
product:
>>> import jax.numpy as jnp
>>>
>>> vv = lambda x, y: jnp.vdot(x, y) # ([a], [a]) -> []
>>> mv = vmap(vv, (0, None), 0) # ([b,a], [a]) -> [b] (b is the mapped axis)
>>> mm = vmap(mv, (None, 1), 1) # ([b,a], [a,c]) -> [b,c] (c is the mapped axis)
Here we use ``[a,b]`` to indicate an array with shape (a,b). Here are some
variants:
>>> mv1 = vmap(vv, (0, 0), 0) # ([b,a], [b,a]) -> [b] (b is the mapped axis)
>>> mv2 = vmap(vv, (0, 1), 0) # ([b,a], [a,b]) -> [b] (b is the mapped axis)
>>> mm2 = vmap(mv2, (1, 1), 0) # ([b,c,a], [a,c,b]) -> [c,b] (c is the mapped axis)
Here's an example of using container types in ``in_axes`` to specify which
axes of the container elements to map over:
>>> A, B, C, D = 2, 3, 4, 5
>>> x = jnp.ones((A, B))
>>> y = jnp.ones((B, C))
>>> z = jnp.ones((C, D))
>>> def foo(tree_arg):
... x, (y, z) = tree_arg
... return jnp.dot(x, jnp.dot(y, z))
>>> tree = (x, (y, z))
>>> print(foo(tree))
[[12. 12. 12. 12. 12.]
[12. 12. 12. 12. 12.]]
>>> from jax import vmap
>>> K = 6 # batch size
>>> x = jnp.ones((K, A, B)) # batch axis in different locations
>>> y = jnp.ones((B, K, C))
>>> z = jnp.ones((C, D, K))
>>> tree = (x, (y, z))
>>> vfoo = vmap(foo, in_axes=((0, (1, 2)),))
>>> print(vfoo(tree).shape)
(6, 2, 5)
Here's another example using container types in ``in_axes``, this time a
dictionary, to specify the elements of the container to map over:
>>> dct = {'a': 0., 'b': jnp.arange(5.)}
>>> x = 1.
>>> def foo(dct, x):
... return dct['a'] + dct['b'] + x
>>> out = vmap(foo, in_axes=({'a': None, 'b': 0}, None))(dct, x)
>>> print(out)
[1. 2. 3. 4. 5.]
The results of a vectorized function can be mapped or unmapped. For example,
the function below returns a pair with the first element mapped and the second
unmapped. Only for unmapped results we can specify ``out_axes`` to be ``None``
(to keep it unmapped).
>>> print(vmap(lambda x, y: (x + y, y * 2.), in_axes=(0, None), out_axes=(0, None))(jnp.arange(2.), 4.))
(DeviceArray([4., 5.], dtype=float32), 8.0)
If the ``out_axes`` is specified for an unmapped result, the result is
broadcast across the mapped axis:
>>> print(vmap(lambda x, y: (x + y, y * 2.), in_axes=(0, None), out_axes=0)(jnp.arange(2.), 4.))
(DeviceArray([4., 5.], dtype=float32), DeviceArray([8., 8.], dtype=float32, weak_type=True))
If the ``out_axes`` is specified for a mapped result, the result is transposed
accordingly.
Finally, here's an example using ``axis_name`` together with collectives:
>>> xs = jnp.arange(3. * 4.).reshape(3, 4)
>>> print(vmap(lambda x: lax.psum(x, 'i'), axis_name='i')(xs))
[[12. 15. 18. 21.]
[12. 15. 18. 21.]
[12. 15. 18. 21.]]
See the :py:func:`jax.pmap` docstring for more examples involving collectives.
"""
_check_callable(fun)
docstr = ("Vectorized version of {fun}. Takes similar arguments as {fun} "
"but with additional array axes over which {fun} is mapped.")
if fun.__doc__:
docstr += "\n\nOriginal documentation:\n\n"
docstr += fun.__doc__
axis_name = core.no_axis_name if axis_name is None else axis_name
if isinstance(in_axes, list):
# To be a tree prefix of the positional args tuple, in_axes can never be a
# list: if in_axes is not a leaf, it must be a tuple of trees. However,
# in cases like these users expect tuples and lists to be treated
# essentially interchangeably, so we canonicalize lists to tuples here
# rather than raising an error. https://github.com/google/jax/issues/2367
in_axes = tuple(in_axes)
if not all(type(l) is int or type(l) in batching.spec_types
for l in tree_leaves(in_axes)):
raise TypeError("vmap in_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {in_axes}.")
if not all(type(l) is int or type(l) in batching.spec_types
for l in tree_leaves(out_axes)):
raise TypeError("vmap out_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {out_axes}.")
@wraps(fun, docstr=docstr)
@api_boundary
def batched_fun(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs), is_leaf=batching.is_vmappable)
f = lu.wrap_init(fun)
flat_fun, out_tree = batching.flatten_fun_for_vmap(f, in_tree)
in_axes_flat = flatten_axes("vmap in_axes", in_tree, (in_axes, 0), kws=True)
axis_size_ = (axis_size if axis_size is not None else
_mapped_axis_size(in_tree, args_flat, in_axes_flat, "vmap",
kws=True))
out_flat = batching.batch(
flat_fun, axis_name, axis_size_, in_axes_flat,
lambda: flatten_axes("vmap out_axes", out_tree(), out_axes)
).call_wrapped(*args_flat)
return tree_unflatten(out_tree(), out_flat)
return batched_fun
def _mapped_axis_size(tree, vals, dims, name, *, kws=False):
if not vals:
args, kwargs = tree_unflatten(tree, vals)
raise ValueError(
f"{name} wrapped function must be passed at least one argument "
f"containing an array, got empty *args={args} and **kwargs={kwargs}"
)
def _get_axis_size(name: str, shape: Tuple[int, ...], axis: int):
try:
return shape[axis]
except (IndexError, TypeError) as e:
min_rank = axis + 1 if axis >= 0 else -axis
raise ValueError(f"{name} was requested to map its argument along axis {axis}, "
f"which implies that its rank should be at least {min_rank}, "
f"but is only {len(shape)} (its shape is {shape})") from e
mapped_axis_sizes = {_get_axis_size(name, np.shape(x), d)
for x, d in zip(vals, dims)
if d is not None}
try:
size, = mapped_axis_sizes
return size
except ValueError as e:
if not mapped_axis_sizes:
raise ValueError(f"{name} must have at least one non-None value in in_axes") from e
msg = f"{name} got inconsistent sizes for array axes to be mapped:\n" + "{}"
# we switch the error message based on whether args is a tuple of arrays,
# in which case we can produce an error message based on argument indices,
# or if it has nested containers.
if kws:
# if keyword arguments are included in the tree, we make adapt the error
# message only to be about the positional arguments
tree, leaf = treedef_children(tree)
assert treedef_is_leaf(leaf)
# TODO(mattjj,phawkins): add a way to inspect pytree kind more directly
if tree == tree_flatten((core.unit,) * tree.num_leaves)[1]:
lines1 = [f"arg {i} has shape {np.shape(x)} and axis {d} is to be mapped"
for i, (x, d) in enumerate(zip(vals, dims))]
sizes = collections.defaultdict(list)
for i, (x, d) in enumerate(zip(vals, dims)):
if d is not None:
sizes[x.shape[d]].append(i)
lines2 = ["{} {} {} {} to be mapped of size {}".format(
"args" if len(idxs) > 1 else "arg",
", ".join(map(str, idxs)),
"have" if len(idxs) > 1 else "has",
"axes" if len(idxs) > 1 else "an axis",
size)
for size, idxs in sizes.items()]
raise ValueError(msg.format("\n".join(lines1 + ["so"] + lines2))) from None
else:
sizes = [x.shape[d] if d is not None else None for x, d in zip(vals, dims)]
sizes = tree_unflatten(tree, sizes)
raise ValueError(msg.format(f"the tree of axis sizes is:\n{sizes}")) from None
def pmap(
fun: F,
axis_name: Optional[AxisName] = None,
*,
in_axes=0,
out_axes=0,
static_broadcasted_argnums: Union[int, Iterable[int]] = (),
devices: Optional[Sequence[xc.Device]] = None,
backend: Optional[str] = None,
axis_size: Optional[int] = None,
donate_argnums: Union[int, Iterable[int]] = (),
global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]] = None,
) -> F:
"""Parallel map with support for collective operations.
The purpose of :py:func:`pmap` is to express single-program multiple-data
(SPMD) programs. Applying :py:func:`pmap` to a function will compile the
function with XLA (similarly to :py:func:`jit`), then execute it in parallel
on XLA devices, such as multiple GPUs or multiple TPU cores. Semantically it
is comparable to :py:func:`vmap` because both transformations map a function
over array axes, but where :py:func:`vmap` vectorizes functions by pushing the
mapped axis down into primitive operations, :py:func:`pmap` instead replicates
the function and executes each replica on its own XLA device in parallel.
The mapped axis size must be less than or equal to the number of local XLA
devices available, as returned by :py:func:`jax.local_device_count()` (unless
``devices`` is specified, see below). For nested :py:func:`pmap` calls, the
product of the mapped axis sizes must be less than or equal to the number of
XLA devices.
.. note::
:py:func:`pmap` compiles ``fun``, so while it can be combined with
:py:func:`jit`, it's usually unnecessary.
**Multi-process platforms:** On multi-process platforms such as TPU pods,
:py:func:`pmap` is designed to be used in SPMD Python programs, where every
process is running the same Python code such that all processes run the same
pmapped function in the same order. Each process should still call the pmapped
function with mapped axis size equal to the number of *local* devices (unless
``devices`` is specified, see below), and an array of the same leading axis
size will be returned as usual. However, any collective operations in ``fun``
will be computed over *all* participating devices, including those on other
processes, via device-to-device communication. Conceptually, this can be
thought of as running a pmap over a single array sharded across processes,
where each process "sees" only its local shard of the input and output. The
SPMD model requires that the same multi-process pmaps must be run in the same
order on all devices, but they can be interspersed with arbitrary operations
running in a single process.
Args:
fun: Function to be mapped over argument axes. Its arguments and return
value should be arrays, scalars, or (nested) standard Python containers
(tuple/list/dict) thereof. Positional arguments indicated by
``static_broadcasted_argnums`` can be anything at all, provided they are
hashable and have an equality operation defined.
axis_name: Optional, a hashable Python object used to identify the mapped
axis so that parallel collectives can be applied.
in_axes: A non-negative integer, None, or nested Python container thereof
that specifies which axes of positional arguments to map over. Arguments
passed as keywords are always mapped over their leading axis (i.e. axis
index 0). See :py:func:`vmap` for details.
out_axes: A non-negative integer, None, or nested Python container thereof
indicating where the mapped axis should appear in the output. All outputs
with a mapped axis must have a non-None ``out_axes`` specification
(see :py:func:`vmap`).
static_broadcasted_argnums: An int or collection of ints specifying which
positional arguments to treat as static (compile-time constant).
Operations that only depend on static arguments will be constant-folded.
Calling the pmapped function with different values for these constants
will trigger recompilation. If the pmapped function is called with fewer
positional arguments than indicated by ``static_argnums`` then an error is
raised. Each of the static arguments will be broadcasted to all devices.
Arguments that are not arrays or containers thereof must be marked as
static. Defaults to ().
Static arguments must be hashable, meaning both ``__hash__`` and
``__eq__`` are implemented, and should be immutable.
devices: This is an experimental feature and the API is likely to change.
Optional, a sequence of Devices to map over. (Available devices can be
retrieved via jax.devices()). Must be given identically for each process
in multi-process settings (and will therefore include devices across
processes). If specified, the size of the mapped axis must be equal to
the number of devices in the sequence local to the given process. Nested
:py:func:`pmap` s with ``devices`` specified in either the inner or outer
:py:func:`pmap` are not yet supported.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend. 'cpu', 'gpu', or 'tpu'.
axis_size: Optional; the size of the mapped axis.
donate_argnums: Specify which arguments are "donated" to the computation.
It is safe to donate arguments if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to.
global_arg_shapes: Optional, must be set when using pmap(sharded_jit) and
the partitioned values span multiple processes. The global cross-process
per-replica shape of each argument, i.e. does not include the leading
pmapped dimension. Can be None for replicated arguments. This API is
likely to change in the future.
Returns:
A parallelized version of ``fun`` with arguments that correspond to those of
``fun`` but with extra array axes at positions indicated by ``in_axes`` and
with output that has an additional leading array axis (with the same size).
For example, assuming 8 XLA devices are available, :py:func:`pmap` can be used
as a map along a leading array axis:
>>> import jax.numpy as jnp
>>>
>>> out = pmap(lambda x: x ** 2)(jnp.arange(8)) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
[0, 1, 4, 9, 16, 25, 36, 49]
When the leading dimension is smaller than the number of available devices JAX
will simply run on a subset of devices:
>>> x = jnp.arange(3 * 2 * 2.).reshape((3, 2, 2))
>>> y = jnp.arange(3 * 2 * 2.).reshape((3, 2, 2)) ** 2
>>> out = pmap(jnp.dot)(x, y) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
[[[ 4. 9.]
[ 12. 29.]]
[[ 244. 345.]
[ 348. 493.]]
[[ 1412. 1737.]
[ 1740. 2141.]]]
If your leading dimension is larger than the number of available devices you
will get an error:
>>> pmap(lambda x: x ** 2)(jnp.arange(9)) # doctest: +SKIP
ValueError: ... requires 9 replicas, but only 8 XLA devices are available
As with :py:func:`vmap`, using ``None`` in ``in_axes`` indicates that an
argument doesn't have an extra axis and should be broadcasted, rather than
mapped, across the replicas:
>>> x, y = jnp.arange(2.), 4.
>>> out = pmap(lambda x, y: (x + y, y * 2.), in_axes=(0, None))(x, y) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
([4., 5.], [8., 8.])
Note that :py:func:`pmap` always returns values mapped over their leading axis,
equivalent to using ``out_axes=0`` in :py:func:`vmap`.
In addition to expressing pure maps, :py:func:`pmap` can also be used to express
parallel single-program multiple-data (SPMD) programs that communicate via
collective operations. For example:
>>> f = lambda x: x / jax.lax.psum(x, axis_name='i')
>>> out = pmap(f, axis_name='i')(jnp.arange(4.)) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
[ 0. 0.16666667 0.33333334 0.5 ]
>>> print(out.sum()) # doctest: +SKIP
1.0
In this example, ``axis_name`` is a string, but it can be any Python object
with ``__hash__`` and ``__eq__`` defined.
The argument ``axis_name`` to :py:func:`pmap` names the mapped axis so that
collective operations, like :func:`jax.lax.psum`, can refer to it. Axis names
are important particularly in the case of nested :py:func:`pmap` functions,
where collective operations can operate over distinct axes:
>>> from functools import partial
>>> import jax
>>>
>>> @partial(pmap, axis_name='rows')
... @partial(pmap, axis_name='cols')
... def normalize(x):
... row_normed = x / jax.lax.psum(x, 'rows')
... col_normed = x / jax.lax.psum(x, 'cols')
... doubly_normed = x / jax.lax.psum(x, ('rows', 'cols'))
... return row_normed, col_normed, doubly_normed
>>>
>>> x = jnp.arange(8.).reshape((4, 2))
>>> row_normed, col_normed, doubly_normed = normalize(x) # doctest: +SKIP
>>> print(row_normed.sum(0)) # doctest: +SKIP
[ 1. 1.]
>>> print(col_normed.sum(1)) # doctest: +SKIP
[ 1. 1. 1. 1.]
>>> print(doubly_normed.sum((0, 1))) # doctest: +SKIP
1.0
On multi-process platforms, collective operations operate over all devices,
including those on other processes. For example, assuming the following code
runs on two processes with 4 XLA devices each:
>>> f = lambda x: x + jax.lax.psum(x, axis_name='i')
>>> data = jnp.arange(4) if jax.process_index() == 0 else jnp.arange(4, 8)
>>> out = pmap(f, axis_name='i')(data) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
[28 29 30 31] # on process 0
[32 33 34 35] # on process 1
Each process passes in a different length-4 array, corresponding to its 4
local devices, and the psum operates over all 8 values. Conceptually, the two
length-4 arrays can be thought of as a sharded length-8 array (in this example
equivalent to jnp.arange(8)) that is mapped over, with the length-8 mapped
axis given name 'i'. The pmap call on each process then returns the
corresponding length-4 output shard.
The ``devices`` argument can be used to specify exactly which devices are used
to run the parallel computation. For example, again assuming a single process
with 8 devices, the following code defines two parallel computations, one
which runs on the first six devices and one on the remaining two:
>>> from functools import partial
>>> @partial(pmap, axis_name='i', devices=jax.devices()[:6])
... def f1(x):
... return x / jax.lax.psum(x, axis_name='i')
>>>
>>> @partial(pmap, axis_name='i', devices=jax.devices()[-2:])
... def f2(x):
... return jax.lax.psum(x ** 2, axis_name='i')
>>>
>>> print(f1(jnp.arange(6.))) # doctest: +SKIP
[0. 0.06666667 0.13333333 0.2 0.26666667 0.33333333]
>>> print(f2(jnp.array([2., 3.]))) # doctest: +SKIP
[ 13. 13.]
"""
if FLAGS.experimental_cpp_pmap:
func = _cpp_pmap
else:
func = _python_pmap
return func(
fun,
axis_name,
in_axes=in_axes,
out_axes=out_axes,
static_broadcasted_argnums=static_broadcasted_argnums,
devices=devices,
backend=backend,
axis_size=axis_size,
donate_argnums=donate_argnums,
global_arg_shapes=global_arg_shapes)
class PmapCallInfo(NamedTuple):
flat_fun: lu.WrappedFun
in_tree: PyTreeDef
out_tree: PyTreeDef
flat_args: Sequence[Any]
donated_invars: Sequence[bool]
in_axes_flat: Sequence[Optional[int]]
local_axis_size: int
global_arg_shapes_flat: Sequence[Optional[Tuple[int, ...]]]
out_axes_thunk: HashableFunction
def _prepare_pmap(fun, in_axes, out_axes, static_broadcasted_tuple,
donate_tuple, global_arg_shapes, args, kwargs):
f = lu.wrap_init(fun)
if static_broadcasted_tuple:
if max(static_broadcasted_tuple) >= len(args):
raise ValueError(
f"pmapped function has static_broadcasted_argnums={static_broadcasted_tuple}"
f" but was called with only {len(args)} positional "
f"argument{'s' if len(args) > 1 else ''}. "
"All static broadcasted arguments must be passed positionally.")
dyn_argnums = [i for i in range(len(args))
if i not in static_broadcasted_tuple]
f, dyn_args = argnums_partial(f, dyn_argnums, args)
if isinstance(in_axes, tuple):
dyn_in_axes = tuple(in_axes[i] for i in dyn_argnums)
else:
dyn_in_axes = in_axes
dyn_global_arg_shapes = global_arg_shapes
if isinstance(global_arg_shapes, tuple):
dyn_global_arg_shapes = tuple(global_arg_shapes[i] for i in dyn_argnums)
else:
dyn_global_arg_shapes = global_arg_shapes
else:
dyn_args, dyn_in_axes = args, in_axes
dyn_global_arg_shapes = global_arg_shapes
args, in_tree = tree_flatten((dyn_args, kwargs))
if donate_tuple:
donated_invars = donation_vector(donate_tuple, dyn_args, kwargs)
else:
donated_invars = (False,) * len(args)
in_axes_flat = tuple(flatten_axes("pmap in_axes", in_tree, (dyn_in_axes, 0)))
global_arg_shapes_flat = tuple(flatten_axes(
"pmap global_arg_shapes", in_tree, (dyn_global_arg_shapes, None),
kws=True))
local_axis_size = _mapped_axis_size(
in_tree, args, in_axes_flat, "pmap", kws=True)
for arg in args:
_check_arg(arg)
flat_fun, out_tree = flatten_fun(f, in_tree)
if any(out_axis is None for out_axis in tree_flatten(out_axes)):
raise NotImplementedError("None out_axes in pmap are not supported yet")
# NOTE: We don't put out_tree() in the closure, because it's (1) non-hashable,
# (2) depends deterministically on flat_fun (at least that's the assumption
# that we make).
if out_axes == 0:
# TODO(apaszke,mattjj): flatten_axes assumes that the output pytree is
# functorial (i.e. it can hold leaves of any type), but some user code
# breaks this assumption. This is a stop-gap solution to keep the old
# out_axes == 0 path working as we look for a better solution.
out_axes_thunk = HashableFunction(
lambda: (0,) * out_tree().num_leaves,
closure=out_axes)
else:
# out_axes_thunk closes over the out_axes, they are flattened here to make
# them hashable.
out_axes_leaves, out_axes_treedef = tree_flatten(out_axes)
out_axes_thunk = HashableFunction(
lambda: tuple(flatten_axes("pmap out_axes", out_tree(),
tree_unflatten(out_axes_treedef,
list(out_axes_leaves)))),
closure=(tuple(out_axes_leaves), out_axes_treedef))
return PmapCallInfo(flat_fun=flat_fun,
in_tree=in_tree,
out_tree=out_tree,
flat_args=args,
donated_invars=donated_invars,
in_axes_flat=in_axes_flat,
local_axis_size=local_axis_size,
global_arg_shapes_flat=global_arg_shapes_flat,
out_axes_thunk=out_axes_thunk)
def _get_f_mapped(
*,
fun: F,
axis_name: Optional[AxisName],
in_axes=0,
out_axes=0,
static_broadcasted_tuple: Tuple[int],
devices: Optional[Sequence[xc.Device]],
backend: Optional[str],
axis_size: Optional[int],
donate_tuple: Tuple[int],
global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]],
):
def f_pmapped(*args, **kwargs):
p = _prepare_pmap(
fun, in_axes, out_axes, static_broadcasted_tuple, donate_tuple,
global_arg_shapes, args, kwargs)
out = pxla.xla_pmap(
p.flat_fun, *p.flat_args, backend=backend, axis_name=axis_name,
axis_size=p.local_axis_size, global_axis_size=axis_size,
devices=None if devices is None else tuple(devices),
in_axes=p.in_axes_flat, out_axes_thunk=p.out_axes_thunk,
name=p.flat_fun.__name__, donated_invars=p.donated_invars,
global_arg_shapes=p.global_arg_shapes_flat)
return p.out_tree, out
return f_pmapped
def _shared_code_pmap(fun, axis_name, static_broadcasted_argnums,
donate_argnums, in_axes, out_axes):
# axis_size is an optional integer representing the global axis size. The
# aggregate size (across all processes) size of the mapped axis must match the
# given value.
_check_callable(fun)
axis_name = core._TempAxisName(fun) if axis_name is None else axis_name
static_broadcasted_tuple = _ensure_index_tuple(static_broadcasted_argnums)
donate_tuple = rebase_donate_argnums(
_ensure_index_tuple(donate_argnums), static_broadcasted_tuple)
if not all(type(l) is int for l in tree_leaves(in_axes)):
raise TypeError("pmap in_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {in_axes}.")
if not all(type(l) is int for l in tree_leaves(out_axes)):
raise TypeError("pmap out_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {out_axes}.")
return axis_name, static_broadcasted_tuple, donate_tuple
def _python_pmap(
fun: F,
axis_name: Optional[AxisName] = None,
*,
in_axes=0,
out_axes=0,
static_broadcasted_argnums: Union[int, Iterable[int]] = (),
devices: Optional[Sequence[xc.Device]] = None,
backend: Optional[str] = None,
axis_size: Optional[int] = None,
donate_argnums: Union[int, Iterable[int]] = (),
global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]] = None,
) -> F:
"""The Python only implementation."""
axis_name, static_broadcasted_tuple, donate_tuple = _shared_code_pmap(
fun, axis_name, static_broadcasted_argnums, donate_argnums, in_axes,
out_axes)
@wraps(fun)
@api_boundary
def f_pmapped(*args, **kwargs):
f_pmapped_ = _get_f_mapped(
fun=fun,
axis_name=axis_name,
in_axes=in_axes,
out_axes=out_axes,
static_broadcasted_tuple=static_broadcasted_tuple,
devices=devices,
backend=backend,
axis_size=axis_size,
global_arg_shapes=global_arg_shapes,
donate_tuple=donate_tuple)
out_tree, out_flat = f_pmapped_(*args, **kwargs)
return tree_unflatten(out_tree(), out_flat)
f_pmapped.lower = _pmap_lower(
fun, axis_name, in_axes, out_axes, static_broadcasted_tuple, devices,
backend, axis_size, global_arg_shapes, donate_tuple)
return f_pmapped
class _PmapFastpathData(NamedTuple):
version: int # For forward and backward compatibility
xla_executable: xla.XlaExecutable
in_handler: Any
out_handler: Any
out_pytree_def: Any
# Data needed to handle the inputs.
input_sharding_specs: Sequence[pxla.ShardingSpec]
input_devices: Sequence[xc.Device]
input_indices: Sequence[pxla.Index]
# Data needed to build the ShardedDeviceArray from C++.
out_sharding_specs: Sequence[pxla.ShardingSpec]
out_indices: Sequence[pxla.Index]
out_avals: Sequence[Any]
def _cpp_pmap(
fun: F,
axis_name: Optional[AxisName] = None,
*,
in_axes=0,
out_axes=0,
static_broadcasted_argnums: Union[int, Iterable[int]] = (),
devices: Optional[Sequence[xc.Device]] = None,
backend: Optional[str] = None,
axis_size: Optional[int] = None,
donate_argnums: Union[int, Iterable[int]] = (),
global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]] = None,
) -> F:
axis_name, static_broadcasted_tuple, donate_tuple = _shared_code_pmap(
fun, axis_name, static_broadcasted_argnums, donate_argnums, in_axes,
out_axes)
del static_broadcasted_argnums, donate_argnums
@api_boundary
def cache_miss(*args, **kwargs):
f_pmapped_ = _get_f_mapped(
fun=fun,
axis_name=axis_name,
in_axes=in_axes,
out_axes=out_axes,
static_broadcasted_tuple=static_broadcasted_tuple,
devices=devices,
backend=backend,
axis_size=axis_size,
global_arg_shapes=global_arg_shapes,
donate_tuple=donate_tuple)
out_tree, out_flat = f_pmapped_(*args, **kwargs)
out_pytree_def = out_tree()
out = tree_unflatten(out_pytree_def, out_flat)
### Decide whether we can support the C++ fast path
execute: Optional[functools.partial] = None
execute = pxla.parallel_callable.most_recent_entry()
use_fastpath = (
execute is not None and
# We don't support JAX extension backends. In particular, some
# extentions do not return a partial with a `func` attribute.
getattr(execute[0], "func", None) is pxla.execute_replicated and
# No tracers in the outputs. Checking for ShardedDeviceArray should be
# sufficient, but we use the more general `DeviceArray`.
all(isinstance(x, device_array.DeviceArray) for x in out_flat))
### If we can use the fastpath, we return required info to the caller.
if use_fastpath:
xla_executable, backend_, in_handler, out_handler = execute[0].args
fastpath_data = _PmapFastpathData(
version=1,
xla_executable=xla_executable,
in_handler=in_handler,
out_handler=out_handler,
out_pytree_def=out_pytree_def,
input_sharding_specs=in_handler.sharding_specs,
input_devices=in_handler.local_devices,
input_indices=in_handler.input_indices,
out_sharding_specs=out_handler.out_specs,
out_indices=out_handler.out_indices,
out_avals=out_handler.unmapped_local_out_avals,
)
else:
fastpath_data = None
return out, fastpath_data
cpp_mapped_f = pmap_lib.pmap(fun, cache_miss,
static_broadcasted_tuple, pxla._shard_arg)
f_pmapped = wraps(fun)(cpp_mapped_f)
f_pmapped.lower = _pmap_lower(
fun, axis_name, in_axes, out_axes, static_broadcasted_tuple, devices,
backend, axis_size, global_arg_shapes, donate_tuple)
return f_pmapped
def _pmap_lower(fun, axis_name, in_axes, out_axes, static_broadcasted_tuple,
devices, backend, axis_size, global_arg_shapes, donate_tuple):
"""Make a ``lower`` method for pmapped functions."""
# If the function we returned from ``pmap`` were a class instance,
# this might naturally be a method, with ``fun`` as a ``self`` and
# all the other arguments stored as attributes.
@api_boundary
def lower(*args, **kwargs) -> Lowered:
"""Lower a parallel-mapped form of this function for the given arguments.
A parallel-mapped and lowered function is staged out of Python and
translated to a compiler's input language, possibly in a
backend-dependent manner. It is ready for compilation but is not yet
compiled. It represents a function intended for SPMD execution on
multiple devices.
Returns:
A ``Lowered`` instance representing the post-map lowering.
"""
p = _prepare_pmap(
fun, in_axes, out_axes, static_broadcasted_tuple, donate_tuple,
global_arg_shapes, args, kwargs)
abstract_args = map(xla.abstractify, p.flat_args)
computation = pxla.lower_parallel_callable(
p.flat_fun, backend, axis_name,
axis_size=p.local_axis_size, global_axis_size=axis_size,
devices=None if devices is None else tuple(devices),
name=p.flat_fun.__name__,
in_axes=p.in_axes_flat,
out_axes_thunk=p.out_axes_thunk,
donated_invars=p.donated_invars,
global_arg_shapes=p.global_arg_shapes_flat,
avals=abstract_args)
return Lowered(computation, p.in_tree, p.out_tree(), donate_tuple)
return lower
def mask(fun: Callable, in_shapes, out_shape=None) -> Callable:
_check_callable(fun)
unique_ids = masking.UniqueIds()
in_specs, in_shapes_tree = tree_flatten(in_shapes)
in_specs = map(masking.parse_spec, in_specs)
in_specs = map(partial(masking.remap_ids, unique_ids), in_specs)
if out_shape is not None:
out_specs, out_spec_tree = tree_flatten(out_shape)
out_specs = map(masking.parse_spec, out_specs)
out_specs = map(partial(masking.remap_ids, unique_ids), out_specs)
def wrapped_fun(args, logical_env):
args_flat, in_tree = tree_flatten(args)
if in_tree != in_shapes_tree:
raise TypeError(f"Tree mismatch: Input {in_tree} and shape spec {in_shapes_tree}.")
logical_env = {unique_ids[name] : val for name, val in logical_env.items()}
in_shapes = map(masking.finalize_spec, in_specs, map(np.shape, args_flat))
padded_env = masking.bind_shapes(in_shapes, [x.shape for x in args_flat])
f = lu.wrap_init(fun)
flat_fun, out_tree_thunk = flatten_fun_nokwargs(f, in_tree)
outs, out_shapes = masking.mask_fun(
flat_fun, logical_env, padded_env, args_flat, in_shapes)
out_tree = out_tree_thunk()
if out_shape is None:
def logical_shape(poly_shape, padded_val):
shape = masking.eval_poly_shape(poly_shape, logical_env)
return ShapeDtypeStruct(shape, core.get_aval(padded_val).dtype)
out_logicals = map(logical_shape, out_shapes, outs)
return tree_unflatten(out_tree, outs), tree_unflatten(out_tree, out_logicals)
else:
masking.check_shapes(out_specs, out_spec_tree, list(out_shapes), out_tree)
def padded_spec(shape_spec):
return tuple(dim if dim is masking._monomorphic_dim else
masking.eval_poly(dim, padded_env) for dim in shape_spec)
masking.check_shapes(map(padded_spec, out_specs), out_spec_tree,
map(np.shape, outs), out_tree, "Padded output")
return tree_unflatten(out_tree, outs)
return wrapped_fun
@curry
def shapecheck(in_shapes, out_shape, fun: Callable):
_check_callable(fun)
in_shapes, in_tree = tree_flatten(in_shapes)
in_shapes = map(masking.parse_spec, in_shapes)
out_specs, out_spec_tree = tree_flatten(out_shape)
out_specs = map(masking.parse_spec, out_specs)
flat_fun, out_tree_thunk = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
avals = map(partial(ShapedArray, dtype=np.float32), in_shapes)
out_shapes = [o.shape for o in pe.abstract_eval_fun(flat_fun.call_wrapped, *avals)]
masking.check_shapes(map(tuple, out_specs), out_spec_tree,
map(tuple, out_shapes), out_tree_thunk())
return fun
def jvp(fun: Callable, primals, tangents) -> Tuple[Any, Any]:
"""Computes a (forward-mode) Jacobian-vector product of ``fun``.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard Python container of arrays or scalars.
primals: The primal values at which the Jacobian of ``fun`` should be
evaluated. Should be either a tuple or a list of arguments,
and its length should be equal to the number of positional parameters of
``fun``.
tangents: The tangent vector for which the Jacobian-vector product should be
evaluated. Should be either a tuple or a list of tangents, with the same
tree structure and array shapes as ``primals``.
Returns:
A ``(primals_out, tangents_out)`` pair, where ``primals_out`` is
``fun(*primals)``, and ``tangents_out`` is the Jacobian-vector product of
``function`` evaluated at ``primals`` with ``tangents``. The
``tangents_out`` value has the same Python tree structure and shapes as
``primals_out``.
For example:
>>> import jax
>>>
>>> y, v = jax.jvp(jax.numpy.sin, (0.1,), (0.2,))
>>> print(y)
0.09983342
>>> print(v)
0.19900084
"""
_check_callable(fun)
return _jvp(lu.wrap_init(fun), primals, tangents)
def _jvp(fun: lu.WrappedFun, primals, tangents):
"""Variant of jvp() that takes an lu.WrappedFun."""
if (not isinstance(primals, (tuple, list)) or
not isinstance(tangents, (tuple, list))):
raise TypeError("primal and tangent arguments to jax.jvp must be tuples or lists; "
f"found {type(primals).__name__} and {type(tangents).__name__}.")
ps_flat, tree_def = tree_flatten(primals)
ts_flat, tree_def_2 = tree_flatten(tangents)
if tree_def != tree_def_2:
raise TypeError("primal and tangent arguments to jax.jvp must have the same tree "
f"structure; primals have tree structure {tree_def} whereas tangents have "
f"tree structure {tree_def_2}.")
for p, t in safe_zip(ps_flat, ts_flat):
if core.primal_dtype_to_tangent_dtype(_dtype(p)) != _dtype(t):
raise TypeError("primal and tangent arguments to jax.jvp do not match; "
"dtypes must be equal, or in case of int/bool primal dtype "
"the tangent dtype must be float0."
f"Got primal dtype {_dtype(p)} and so expected tangent dtype "
f"{core.primal_dtype_to_tangent_dtype(_dtype(p))}, but got "
f"tangent dtype {_dtype(t)} instead.")
if np.shape(p) != np.shape(t):
raise ValueError("jvp called with different primal and tangent shapes;"
f"Got primal shape {np.shape(p)} and tangent shape as {np.shape(t)}")
flat_fun, out_tree = flatten_fun_nokwargs(fun, tree_def)
out_primals, out_tangents = ad.jvp(flat_fun).call_wrapped(ps_flat, ts_flat)
return (tree_unflatten(out_tree(), out_primals),
tree_unflatten(out_tree(), out_tangents))
def linearize(fun: Callable, *primals) -> Tuple[Any, Callable]:
"""Produces a linear approximation to ``fun`` using :py:func:`jvp` and partial eval.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard python container of arrays or scalars.
primals: The primal values at which the Jacobian of ``fun`` should be
evaluated. Should be a tuple of arrays, scalar, or standard Python
container thereof. The length of the tuple is equal to the number of
positional parameters of ``fun``.
Returns:
A pair where the first element is the value of ``f(*primals)`` and the
second element is a function that evaluates the (forward-mode)
Jacobian-vector product of ``fun`` evaluated at ``primals`` without re-doing
the linearization work.
In terms of values computed, :py:func:`linearize` behaves much like a curried
:py:func:`jvp`, where these two code blocks compute the same values::
y, out_tangent = jax.jvp(f, (x,), (in_tangent,))
y, f_jvp = jax.linearize(f, x)
out_tangent = f_jvp(in_tangent)
However, the difference is that :py:func:`linearize` uses partial evaluation
so that the function ``f`` is not re-linearized on calls to ``f_jvp``. In
general that means the memory usage scales with the size of the computation,
much like in reverse-mode. (Indeed, :py:func:`linearize` has a similar
signature to :py:func:`vjp`!)
This function is mainly useful if you want to apply ``f_jvp`` multiple times,
i.e. to evaluate a pushforward for many different input tangent vectors at the
same linearization point. Moreover if all the input tangent vectors are known
at once, it can be more efficient to vectorize using :py:func:`vmap`, as in::
pushfwd = partial(jvp, f, (x,))
y, out_tangents = vmap(pushfwd, out_axes=(None, 0))((in_tangents,))
By using :py:func:`vmap` and :py:func:`jvp` together like this we avoid the stored-linearization
memory cost that scales with the depth of the computation, which is incurred
by both :py:func:`linearize` and :py:func:`vjp`.
Here's a more complete example of using :py:func:`linearize`:
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> def f(x): return 3. * jnp.sin(x) + jnp.cos(x / 2.)
...
>>> jax.jvp(f, (2.,), (3.,))
(DeviceArray(3.26819, dtype=float32, weak_type=True), DeviceArray(-5.00753, dtype=float32, weak_type=True))
>>> y, f_jvp = jax.linearize(f, 2.)
>>> print(y)
3.2681944
>>> print(f_jvp(3.))
-5.007528
>>> print(f_jvp(4.))
-6.676704
"""
_check_callable(fun)
f = lu.wrap_init(fun)
primals_flat, in_tree = tree_flatten((primals, {}))
jaxtree_fun, out_tree = flatten_fun(f, in_tree)
out_primals, out_pvals, jaxpr, consts = ad.linearize(jaxtree_fun, *primals_flat)
out_tree = out_tree()
out_primal_py = tree_unflatten(out_tree, out_primals)
primal_avals = list(map(core.get_aval, primals_flat))
# Ensure that lifted_jvp is a PyTree
lifted_jvp = Partial(partial(_lift_linearized, jaxpr, primal_avals,
(in_tree, out_tree), out_pvals), consts)
return out_primal_py, lifted_jvp
def _lift_linearized(jaxpr, primal_avals, io_tree, out_pvals, consts, *py_args):
def fun(*tangents):
tangent_avals = list(map(core.get_aval, tangents))
for primal_aval, tangent_aval in zip(primal_avals, tangent_avals):
if not core.typecompat(primal_aval.at_least_vspace(), tangent_aval):
raise ValueError("linearized function called on tangent values inconsistent with "
"the original primal values: "
f"got {tangent_aval} for primal aval {primal_aval}")
tangents_out = eval_jaxpr(jaxpr, consts, *tangents)
return tuple(map(lambda out_pv, tan_out: out_pv.merge_with_known(tan_out),
out_pvals, tangents_out))
return apply_flat_fun(fun, io_tree, *py_args)
def _vjp_pullback_wrapper(cotangent_dtypes, cotangent_shapes,
io_tree, fun, py_args):
in_tree_expected, out_tree = io_tree
args, in_tree = tree_flatten(py_args)
if in_tree != in_tree_expected:
raise TypeError(f"Tree structure of cotangent input {in_tree}, does not match structure of "
f"primal output {in_tree_expected}.")
for arg, ct_dtype, ct_shape in safe_zip(args, cotangent_dtypes, cotangent_shapes):
expected_tangent_dtype = core.primal_dtype_to_tangent_dtype(_dtype(arg))
if expected_tangent_dtype != ct_dtype:
raise TypeError(
f"Type of cotangent input to vjp pullback function ({ct_dtype}) is not "
f"the expected tangent type ({expected_tangent_dtype}) of corresponding primal output "
f"with dtype {_dtype(arg)}.")
if np.shape(arg) != ct_shape:
raise ValueError(
f"Shape of cotangent input to vjp pullback function {np.shape(arg)} "
"must be the same as the shape of corresponding primal input "
f"{ct_shape}.")
ans = fun(*args)
return tree_unflatten(out_tree, ans)
if sys.version_info >= (3, 8):
from typing import Literal
@overload # type: ignore
def vjp(fun: Callable[..., T],
*primals: Any,
has_aux: Literal[False] = False,
reduce_axes: Sequence[AxisName] = ()) -> Tuple[T, Callable]:
...
@overload
def vjp(fun: Callable[..., Tuple[T, U]], *primals: Any,
has_aux: Literal[True],
reduce_axes: Sequence[AxisName] = ()) -> Tuple[T, Callable, U]:
...
else:
@overload # type: ignore
def vjp(fun: Callable[..., T], *primals: Any) -> Tuple[T, Callable]:
...
@overload
def vjp(
fun: Callable[..., Any], *primals: Any,
has_aux: bool,
reduce_axes: Sequence[AxisName] = ()
) -> Union[Tuple[Any, Callable], Tuple[Any, Callable, Any]]:
...
def vjp( # type: ignore
fun: Callable, *primals, has_aux: bool = False, reduce_axes=()
) -> Union[Tuple[Any, Callable], Tuple[Any, Callable, Any]]:
"""Compute a (reverse-mode) vector-Jacobian product of ``fun``.
:py:func:`grad` is implemented as a special case of :py:func:`vjp`.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard Python container of arrays or scalars.
primals: A sequence of primal values at which the Jacobian of ``fun``
should be evaluated. The length of ``primals`` should be equal to the
number of positional parameters to ``fun``. Each primal value should be a
tuple of arrays, scalar, or standard Python containers thereof.
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
reduce_axes: Optional, tuple of axis names. If an axis is listed here, and
``fun`` implicitly broadcasts a value over that axis, the backward pass
will perform a ``psum`` of the corresponding gradient. Otherwise, the
VJP will be per-example over named axes. For example, if ``'batch'``
is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will
create a VJP function that sums over the batch while ``vjp(f, *args)``
will create a per-example VJP.
Returns:
If ``has_aux`` is ``False``, returns a ``(primals_out, vjpfun)`` pair, where
``primals_out`` is ``fun(*primals)``.
``vjpfun`` is a function from a cotangent vector with the same shape as
``primals_out`` to a tuple of cotangent vectors with the same shape as
``primals``, representing the vector-Jacobian product of ``fun`` evaluated at
``primals``. If ``has_aux`` is ``True``, returns a
``(primals_out, vjpfun, aux)`` tuple where ``aux`` is the auxiliary data
returned by ``fun``.
>>> import jax
>>>
>>> def f(x, y):
... return jax.numpy.sin(x), jax.numpy.cos(y)
...
>>> primals, f_vjp = jax.vjp(f, 0.5, 1.0)
>>> xbar, ybar = f_vjp((-0.7, 0.3))
>>> print(xbar)
-0.61430776
>>> print(ybar)
-0.2524413
"""
_check_callable(fun)
reduce_axes = _ensure_str_tuple(reduce_axes)
return _vjp(
lu.wrap_init(fun), *primals, has_aux=has_aux, reduce_axes=reduce_axes)
def _vjp(fun: lu.WrappedFun, *primals, has_aux=False, reduce_axes=()):
"""Variant of vjp() that takes an lu.WrappedFun."""
primals_flat, in_tree = tree_flatten(primals)
for arg in primals_flat: _check_arg(arg)
if not has_aux:
flat_fun, out_tree = flatten_fun_nokwargs(fun, in_tree)
out_primal, out_vjp = ad.vjp(
flat_fun, primals_flat, reduce_axes=reduce_axes)
out_tree = out_tree()
else:
flat_fun, out_aux_trees = flatten_fun_nokwargs2(fun, in_tree)
out_primal, out_vjp, aux = ad.vjp(
flat_fun, primals_flat, has_aux=True, reduce_axes=reduce_axes)
out_tree, aux_tree = out_aux_trees()
out_primal_py = tree_unflatten(out_tree, out_primal)
ct_dtypes = [core.primal_dtype_to_tangent_dtype(_dtype(x)) for x in out_primal]
ct_shapes = [np.shape(x) for x in out_primal]
# Ensure that vjp_py is a PyTree so that we can pass it from the forward to the
# backward pass in a custom VJP.
vjp_py = Partial(partial(_vjp_pullback_wrapper,
ct_dtypes, ct_shapes,
(out_tree, in_tree)),
out_vjp)
if not has_aux:
return out_primal_py, vjp_py
else:
return out_primal_py, vjp_py, tree_unflatten(aux_tree, aux)
def linear_transpose(fun: Callable, *primals, reduce_axes=()) -> Callable:
"""Transpose a function that is promised to be linear.
For linear functions, this transformation is equivalent to ``vjp``, but
avoids the overhead of computing the forward pass.
The outputs of the transposed function will always have the exact same dtypes
as ``primals``, even if some values are truncated (e.g., from complex to
float, or from float64 to float32). To avoid truncation, use dtypes in
``primals`` that match the full range of desired outputs from the transposed
function. Integer dtypes are not supported.
Args:
fun: the linear function to be transposed.
*primals: a positional argument tuple of arrays, scalars, or (nested)
standard Python containers (tuples, lists, dicts, namedtuples, i.e.,
pytrees) of those types used for evaluating the shape/dtype of
``fun(*primals)``. These arguments may be real scalars/ndarrays, but that
is not required: only the ``shape`` and ``dtype`` attributes are accessed.
See below for an example. (Note that the duck-typed objects cannot be
namedtuples because those are treated as standard Python containers.)
reduce_axes: Optional, tuple of axis names. If an axis is listed here, and
``fun`` implicitly broadcasts a value over that axis, the backward pass
will perform a ``psum`` of the corresponding cotangent. Otherwise, the
transposed function will be per-example over named axes. For example, if
``'batch'`` is a named batch axis, ``linear_transpose(f, *args,
reduce_axes=('batch',))`` will create a transpose function that sums over
the batch while ``linear_transpose(f, args)`` will create a per-example
transpose.
Returns:
A callable that calculates the transpose of ``fun``. Valid input into this
function must have the same shape/dtypes/structure as the result of
``fun(*primals)``. Output will be a tuple, with the same
shape/dtypes/structure as ``primals``.
>>> import jax
>>> import types
>>>
>>> f = lambda x, y: 0.5 * x - 0.5 * y
>>> scalar = types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32))
>>> f_transpose = jax.linear_transpose(f, scalar, scalar)
>>> f_transpose(1.0)
(DeviceArray(0.5, dtype=float32), DeviceArray(-0.5, dtype=float32))
"""
reduce_axes = _ensure_str_tuple(reduce_axes)
primals_flat, in_tree = tree_flatten(primals)
flat_fun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
in_avals = map(shaped_abstractify, primals_flat)
in_dtypes = map(dtypes.dtype, in_avals)
in_pvals = map(pe.PartialVal.unknown, in_avals)
jaxpr, out_pvals, consts = pe.trace_to_jaxpr(flat_fun, in_pvals,
instantiate=True)
out_avals, _ = unzip2(out_pvals)
out_dtypes = map(dtypes.dtype, out_avals)
if not (all(dtypes.issubdtype(d, np.inexact) for d in in_dtypes + out_dtypes)
or all(dtypes.issubdtype(d, np.integer)
for d in in_dtypes + out_dtypes)):
raise TypeError("linear_transpose only supports [float or complex] -> "
"[float or complex], and integer -> integer functions, "
f"but got {in_dtypes} -> {out_dtypes}.")
def transposed_fun(consts, out_cotangent):
out_cotangents, out_tree2 = tree_flatten(out_cotangent)
if out_tree() != out_tree2:
raise TypeError("cotangent tree does not match function output, "
f"expected {out_tree()} but got {out_tree2}")
if not all(map(core.typecheck, out_avals, out_cotangents)):
raise TypeError("cotangent type does not match function output, "
f"expected {out_avals} but got {out_cotangents}")
dummies = [ad.UndefinedPrimal(a) for a in in_avals]
in_cotangents = map(
ad.instantiate_zeros,
ad.backward_pass(jaxpr, reduce_axes, consts, dummies, out_cotangents))
return tree_unflatten(in_tree, in_cotangents)
# Ensure that transposed_fun is a PyTree
return Partial(transposed_fun, consts)
def make_jaxpr(fun: Callable,
static_argnums: Union[int, Iterable[int]] = (),
axis_env: Optional[Sequence[Tuple[AxisName, int]]] = None,
return_shape: bool = False,
) -> Callable[..., core.ClosedJaxpr]:
"""Creates a function that produces its jaxpr given example args.
Args:
fun: The function whose ``jaxpr`` is to be computed. Its positional
arguments and return value should be arrays, scalars, or standard Python
containers (tuple/list/dict) thereof.
static_argnums: See the :py:func:`jax.jit` docstring.
axis_env: Optional, a sequence of pairs where the first element is an axis
name and the second element is a positive integer representing the size of
the mapped axis with that name. This parameter is useful when lowering
functions that involve parallel communication collectives, and it
specifies the axis name/size environment that would be set up by
applications of :py:func:`jax.pmap`.
return_shape: Optional boolean, defaults to ``False``. If ``True``, the
wrapped function returns a pair where the first element is the XLA
computation and the second element is a pytree with the same structure as
the output of ``fun`` and where the leaves are objects with ``shape``,
``dtype``, and ``named_shape`` attributes representing the corresponding
types of the output leaves.
Returns:
A wrapped version of ``fun`` that when applied to example arguments returns
a ``ClosedJaxpr`` representation of ``fun`` on those arguments. If the
argument ``return_shape`` is ``True``, then the returned function instead
returns a pair where the first element is the ``ClosedJaxpr``
representation of ``fun`` and the second element is a pytree representing
the structure, shape, dtypes, and named shapes of the output of ``fun``.
A ``jaxpr`` is JAX's intermediate representation for program traces. The
``jaxpr`` language is based on the simply-typed first-order lambda calculus
with let-bindings. :py:func:`make_jaxpr` adapts a function to return its
``jaxpr``, which we can inspect to understand what JAX is doing internally.
The ``jaxpr`` returned is a trace of ``fun`` abstracted to
:py:class:`ShapedArray` level. Other levels of abstraction exist internally.
We do not describe the semantics of the ``jaxpr`` language in detail here, but
instead give a few examples.
>>> import jax
>>>
>>> def f(x): return jax.numpy.sin(jax.numpy.cos(x))
>>> print(f(3.0))
-0.83602
>>> jax.make_jaxpr(f)(3.0)
{ lambda ; a:f32[]. let b:f32[] = cos a; c:f32[] = sin b in (c,) }
>>> jax.make_jaxpr(jax.grad(f))(3.0)
{ lambda ; a:f32[]. let
b:f32[] = cos a
c:f32[] = sin a
_:f32[] = sin b
d:f32[] = cos b
e:f32[] = mul 1.0 d
f:f32[] = neg e
g:f32[] = mul f c
in (g,) }
"""
_check_callable(fun)
static_argnums = _ensure_index_tuple(static_argnums)
@wraps(fun)
@api_boundary
def jaxpr_maker(*args, **kwargs):
wrapped = lu.wrap_init(fun)
if static_argnums:
dyn_argnums = [i for i in range(len(args)) if i not in static_argnums]
wrapped, args = argnums_partial(wrapped, dyn_argnums, args)
jax_args, in_tree = tree_flatten((args, kwargs))
jaxtree_fun, out_tree = flatten_fun(wrapped, in_tree)
in_avals = map(shaped_abstractify, jax_args)
with ExitStack() as stack:
for axis_name, size in axis_env or []:
stack.enter_context(core.extend_axis_env(axis_name, size, None))
jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(jaxtree_fun, in_avals)
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
if return_shape:
out_shapes_flat = [
ShapeDtypeStruct(a.shape, a.dtype, a.named_shape) for a in out_avals]
return closed_jaxpr, tree_unflatten(out_tree(), out_shapes_flat)
return closed_jaxpr
jaxpr_maker.__name__ = f"make_jaxpr({jaxpr_maker.__name__})"
return jaxpr_maker
def device_put(x, device: Optional[xc.Device] = None):
"""Transfers ``x`` to ``device``.
Args:
x: An array, scalar, or (nested) standard Python container thereof.
device: The (optional) :py:class:`Device` to which ``x`` should be
transferred. If given, then the result is committed to the device.
If the ``device`` parameter is ``None``, then this operation behaves like the
identity function if the operand is on any device already, otherwise it
transfers the data to the default device, uncommitted.
For more details on data placement see the
:ref:`FAQ on data placement <faq-data-placement>`.
Returns:
A copy of ``x`` that resides on ``device``.
"""
return tree_map(lambda y: dispatch.device_put_p.bind(y, device=device), x)
def device_put_sharded(shards: Sequence[Any], devices: Sequence[xc.Device]):
"""Transfer array shards to specified devices and form ShardedDeviceArray(s).
Args:
shards: A sequence of arrays, scalars, or (nested) standard Python
containers thereof representing the shards to be stacked together to form
the output. The length of ``shards`` must equal the length of ``devices``.
devices: A sequence of :py:class:`Device` instances representing the devices
to which corresponding shards in ``shards`` will be transferred.
Returns:
A ShardedDeviceArray or (nested) Python container thereof representing the
elements of ``shards`` stacked together, with each shard backed by physical
device memory specified by the corresponding entry in ``devices``.
Examples:
Passing a list of arrays for ``shards`` results in a sharded array
containing a stacked version of the inputs:
>>> import jax
>>> devices = jax.local_devices()
>>> x = [jax.numpy.ones(5) for device in devices]
>>> y = jax.device_put_sharded(x, devices)
>>> np.allclose(y, jax.numpy.stack(x))
True
Passing a list of nested container objects with arrays at the leaves for
``shards`` corresponds to stacking the shards at each leaf. This requires
all entries in the list to have the same tree structure:
>>> x = [(i, jax.numpy.arange(i, i + 4)) for i in range(len(devices))]
>>> y = jax.device_put_sharded(x, devices)
>>> type(y)
<class 'tuple'>
>>> y0 = jax.device_put_sharded([a for a, b in x], devices)
>>> y1 = jax.device_put_sharded([b for a, b in x], devices)
>>> np.allclose(y[0], y0)
True
>>> np.allclose(y[1], y1)
True
See Also:
- device_put
- device_put_replicated
"""
# TODO(jakevdp): provide a default for devices that considers both local
# devices and pods
if not isinstance(shards, Sequence):
raise ValueError("device_put_sharded `shards` input must be a sequence; "
f"got {type(shards)}")
if not len(shards) == len(devices):
raise ValueError(f"len(shards) = {len(shards)} must equal "
f"len(devices) = {len(devices)}.")
def _device_put_sharded(*xs):
avals = [core.raise_to_shaped(core.get_aval(x)) for x in xs]
if not all(a1 == a2 for a1, a2 in zip(avals[:-1], avals[1:])):
a1, a2 = next((a1, a2) for a1, a2 in zip(avals[:-1], avals[1:])
if a1 != a2)
raise ValueError("the shards passed to device_put_sharded must have "
f"consistent shape and dtype, but got {a1} and {a2}.")
stacked_aval = avals[0].update(shape=(len(devices),) + avals[0].shape)
buffers = [buf for x, d in zip(xs, devices)
for buf in dispatch.device_put(x, d)]
return pxla.make_sharded_device_array(stacked_aval, None, buffers)
return tree_multimap(_device_put_sharded, *shards)
def device_put_replicated(x: Any, devices: Sequence[xc.Device]):
"""Transfer array(s) to each specified device and form ShardedDeviceArray(s).
Args:
x: an array, scalar, or (nested) standard Python container thereof
representing the array to be replicated to form the output.
devices: A sequence of :py:class:`Device` instances representing the devices
to which ``x`` will be transferred.
Returns:
A ShardedDeviceArray or (nested) Python container thereof representing the
value of ``x`` broadcasted along a new leading axis of size
``len(devices)``, with each slice along that new leading axis backed by
memory on the device specified by the corresponding entry in ``devices``.
Examples:
Passing an array:
>>> import jax
>>> devices = jax.local_devices()
>>> x = jax.numpy.array([1., 2., 3.])
>>> y = jax.device_put_replicated(x, devices)
>>> np.allclose(y, jax.numpy.stack([x for _ in devices]))
True
See Also:
- device_put
- device_put_sharded
"""
if not isinstance(devices, Sequence) or not devices:
raise ValueError("`devices` argument to `device_put_replicated must be "
"a non-empty sequence.")
def _device_put_replicated(x):
aval = core.unmapped_aval(len(devices), core.no_axis_name, 0,
core.raise_to_shaped(core.get_aval(x)))
assert (isinstance(aval, core.ShapedArray) and
len(xla.aval_to_xla_shapes(aval)) == 1)
buf, = dispatch.device_put(x, devices[0])
rest_bufs = [buf.copy_to_device(d) for d in devices[1:]]
return pxla.make_sharded_device_array(aval, None, [buf, *rest_bufs])
return tree_map(_device_put_replicated, x)
# TODO(mattjj): consider revising
def _device_get(x):
if isinstance(x, core.Tracer):
return x
try:
copy = x.copy
except AttributeError:
return x
else:
return copy()
def device_get(x: Any):
"""Transfer ``x`` to host.
If ``x`` is a pytree, then the individual buffers are copied in parallel.
Args:
x: An array, scalar, DeviceArray or (nested) standard Python container thereof
representing the array to be transferred to host.
Returns:
An array or (nested) Python container thereof representing the
value of ``x``.
Examples:
Passing a DeviceArray:
>>> import jax
>>> x = jax.numpy.array([1., 2., 3.])
>>> jax.device_get(x)
array([1., 2., 3.], dtype=float32)
Passing a scalar (has no effect):
>>> jax.device_get(1)
1
See Also:
- device_put
- device_put_sharded
- device_put_replicated
"""
for y in tree_leaves(x):
try:
y.copy_to_host_async()
except AttributeError:
pass
return tree_map(_device_get, x)
def _check_arg(arg):
if not (isinstance(arg, core.Tracer) or _valid_jaxtype(arg)):
raise TypeError(f"Argument '{arg}' of type {type(arg)} is not a valid JAX type.")
# TODO(mattjj,necula): this duplicates code in core.valid_jaxtype, but one
# internal user relies on it for duck-typing. must fix downstream user!
def _valid_jaxtype(arg):
try:
xla.abstractify(arg) # faster than core.get_aval
except TypeError:
return False
else:
return True
class ShapeDtypeStruct:
__slots__ = ["shape", "dtype", "named_shape"]
def __init__(self, shape, dtype, named_shape=None):
self.shape = shape
self.dtype = np.dtype(dtype)
self.named_shape = {} if named_shape is None else dict(named_shape)
size = property(lambda self: prod(self.shape))
ndim = property(lambda self: len(self.shape))
def __len__(self):
try:
return self.shape[0]
except IndexError as e:
raise TypeError("len() of unsized object") from e # same as numpy error
def __repr__(self):
ns = f", named_shape={self.named_shape}" if self.named_shape else ""
return f"{type(self).__name__}(shape={self.shape}, dtype={self.dtype.name}{ns})"
__str__ = __repr__
def __eq__(self, other):
if not isinstance(other, ShapeDtypeStruct):
return False
else:
return (other.shape, other.dtype, other.named_shape) == (
self.shape, self.dtype, self.named_shape)
def __hash__(self):
# TODO(frostig): avoid the conversion from dict by addressing
# https://github.com/google/jax/issues/8182
named = frozenset(self.named_shape.items())
return hash((self.shape, self.dtype, named))
def eval_shape(fun: Callable, *args, **kwargs):
"""Compute the shape/dtype of ``fun`` without any FLOPs.
This utility function is useful for performing shape inference. Its
input/output behavior is defined by::
def eval_shape(fun, *args, **kwargs):
out = fun(*args, **kwargs)
return jax.tree_util.tree_map(shape_dtype_struct, out)
def shape_dtype_struct(x):
return ShapeDtypeStruct(x.shape, x.dtype)
class ShapeDtypeStruct:
__slots__ = ["shape", "dtype"]
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
In particular, the output is a pytree of objects that have ``shape`` and
``dtype`` attributes, but nothing else about them is guaranteed by the API.
But instead of applying ``fun`` directly, which might be expensive, it uses
JAX's abstract interpretation machinery to evaluate the shapes without doing
any FLOPs.
Using :py:func:`eval_shape` can also catch shape errors, and will raise same
shape errors as evaluating ``fun(*args, **kwargs)``.
Args:
fun: The function whose output shape should be evaluated.
*args: a positional argument tuple of arrays, scalars, or (nested) standard
Python containers (tuples, lists, dicts, namedtuples, i.e. pytrees) of
those types. Since only the ``shape`` and ``dtype`` attributes are
accessed, only values that duck-type arrays are required, rather than real
ndarrays. The duck-typed objects cannot be namedtuples because those are
treated as standard Python containers. See the example below.
**kwargs: a keyword argument dict of arrays, scalars, or (nested) standard
Python containers (pytrees) of those types. As in ``args``, array values
need only be duck-typed to have ``shape`` and ``dtype`` attributes.
For example:
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> f = lambda A, x: jnp.tanh(jnp.dot(A, x))
>>> class MyArgArray(object):
... def __init__(self, shape, dtype):
... self.shape = shape
... self.dtype = jnp.dtype(dtype)
...
>>> A = MyArgArray((2000, 3000), jnp.float32)
>>> x = MyArgArray((3000, 1000), jnp.float32)
>>> out = jax.eval_shape(f, A, x) # no FLOPs performed
>>> print(out.shape)
(2000, 1000)
>>> print(out.dtype)
float32
"""
args_flat, in_tree = tree_flatten((args, kwargs))
wrapped_fun, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
debug_info = pe.debug_info(fun, in_tree, True, "eval_shape")
out = pe.abstract_eval_fun(wrapped_fun.call_wrapped,
*map(shaped_abstractify, args_flat),
debug_info=debug_info)
out = [ShapeDtypeStruct(x.shape, x.dtype, x.named_shape) for x in out]
return tree_unflatten(out_tree(), out)
def checkpoint(fun: Callable, concrete: bool = False, prevent_cse: bool = True,
policy: Optional[Callable[..., bool]] = None,
) -> Callable:
"""Make ``fun`` recompute internal linearization points when differentiated.
The :func:`jax.checkpoint` decorator, aliased to ``jax.remat``, provides a
way to trade off computation time and memory cost in the context of automatic
differentiation, especially with reverse-mode autodiff like :func:`jax.grad`
and :func:`jax.vjp` but also with :func:`jax.linearize`.
When differentiating a function in reverse-mode, by default all the
linearization points (e.g. inputs to elementwise nonlinear primitive
operations) are stored when evaluating the forward pass so that they can be
reused on the backward pass. This evaluation strategy can lead to a high
memory cost, or even to poor performance on hardware accelerators where memory
access is much more expensive than FLOPs.
An alternative evaluation strategy is for some of the linearization points to
be recomputed (i.e. rematerialized) rather than stored. This approach can
reduce memory usage at the cost of increased computation.
This function decorator produces a new version of ``fun`` which follows
the rematerialization strategy rather than the default store-everything
strategy. That is, it returns a new version of ``fun`` which, when
differentiated, doesn't store any of its intermediate linearization points.
Instead, these linearization points are recomputed from the function's saved
inputs.
See the examples below.
Args:
fun: Function for which the autodiff evaluation strategy is to be changed
from the default of storing all intermediate linearization points to
recomputing them. Its arguments and return value should be arrays,
scalars, or (nested) standard Python containers (tuple/list/dict) thereof.
concrete: Optional, boolean indicating whether ``fun`` may involve
value-dependent Python control flow (default False). Support for such
control flow is optional, and disabled by default, because in some
edge-case compositions with :func:`jax.jit` it can lead to some extra
computation.
prevent_cse: Optional, boolean indicating whether to prevent common
subexpression elimination (CSE) optimizations in the HLO generated from
differentiation. This CSE prevention has costs because it can foil other
optimizations, and because it can incur high overheads on some backends,
especially GPU. The default is True because otherwise, under a ``jit`` or
``pmap``, CSE can defeat the purpose of this decorator. But in some
settings, like when used inside a ``scan``, this CSE prevention mechanism
is unnecessary, in which case ``prevent_cse`` can be set to False.
policy: This is an experimental feature and the API is likely to change.
Optional callable, one of the attributes of ``jax.checkpoint_policies``,
which takes as input a type-level specification of a first-order primitive
application and returns a boolean indicating whether the corresponding
output value(s) can be saved as a residual (or, if not, instead must be
recomputed in the (co)tangent computation).
Returns:
A function (callable) with the same input/output behavior as ``fun`` but
which, when differentiated using e.g. :func:`jax.grad`, :func:`jax.vjp`, or
:func:`jax.linearize`, recomputes rather than stores intermediate
linearization points, thus potentially saving memory at the cost of extra
computation.
Here is a simple example:
>>> import jax
>>> import jax.numpy as jnp
>>> @jax.checkpoint
... def g(x):
... y = jnp.sin(x)
... z = jnp.sin(y)
... return z
...
>>> jax.value_and_grad(g)(2.0)
(DeviceArray(0.78907233, dtype=float32, weak_type=True), DeviceArray(-0.2556391, dtype=float32, weak_type=True))
Here, the same value is produced whether or not the :func:`jax.checkpoint`
decorator is present. When the decorator is not present, the values
``jnp.cos(2.0)`` and ``jnp.cos(jnp.sin(2.0))`` are computed on the forward
pass and are stored for use in the backward pass, because they are needed
on the backward pass and depend only on the primal inputs. When using
:func:`jax.checkpoint`, the forward pass will compute only the primal outputs
and only the primal inputs (``2.0``) will be stored for the backward pass.
At that time, the value ``jnp.sin(2.0)`` is recomputed, along with the values
``jnp.cos(2.0)`` and ``jnp.cos(jnp.sin(2.0))``.
While ``jax.checkpoint`` controls what values are stored from the forward-pass
to be used on the backward pass, the total amount of memory required to
evaluate a function or its VJP depends on many additional internal details of
that function. Those details include which numerical primitives are used,
how they're composed, where jit and control flow primitives like scan
are used, and other factors.
The :func:`jax.checkpoint` decorator can be applied recursively to express
sophisticated autodiff rematerialization strategies. For example:
>>> def recursive_checkpoint(funs):
... if len(funs) == 1:
... return funs[0]
... elif len(funs) == 2:
... f1, f2 = funs
... return lambda x: f1(f2(x))
... else:
... f1 = recursive_checkpoint(funs[:len(funs)//2])
... f2 = recursive_checkpoint(funs[len(funs)//2:])
... return lambda x: f1(jax.checkpoint(f2)(x))
...
"""
@wraps(fun)
@api_boundary
def fun_remat(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs))
flat_fun, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
out_flat = pe.remat_call(flat_fun, *args_flat, name=flat_fun.__name__,
concrete=concrete, prevent_cse=prevent_cse,
differentiated=False,
policy=policy)
return tree_unflatten(out_tree(), out_flat)
return fun_remat
remat = checkpoint # type: ignore
def named_call(
fun: Callable[..., Any],
*,
name: Optional[str] = None,
) -> Callable[..., Any]:
"""Adds a user specified name to a function when staging out JAX computations.
When staging out computations for just-in-time compilation to XLA (or other
backends such as TensorFlow) JAX runs your Python program but by default does
not preserve any of the function names or other metadata associated with it.
This can make debugging the staged out (and/or compiled) representation of
your program complicated because there is limited context information for each
operation being executed.
`named_call` tells JAX to stage the given function out as a subcomputation
with a specific name. When the staged out program is compiled with XLA these
named subcomputations are preserved and show up in debugging utilities like
the TensorFlow Profiler in TensorBoard. Names are also preserved when staging
out JAX programs to TensorFlow using :func:`experimental.jax2tf.convert`.
Args:
fun: Function to be wrapped. This can be any Callable.
name: Optional. The prefix to use to name all sub computations created
within the name scope. Use the fun.__name__ if not specified.
Returns:
A version of `fun` that is wrapped in a name_scope.
"""
if name is None:
name = fun.__name__
_, in_tree = tree_flatten(())
@functools.wraps(fun)
def named_f(*args, **kwargs):
lu_f = lu.wrap_init(lambda: fun(*args, **kwargs))
flat_f, out_tree = flatten_fun_nokwargs(lu_f, in_tree)
out_flat = core.named_call_p.bind(flat_f, name=name)
return tree_unflatten(out_tree(), out_flat)
return named_f
def invertible(fun: Callable) -> Callable:
"""Asserts that the decorated function is invertible.
Applying reverse-mode AD to a decorated function will use a more memory efficient
procedure than usual, which will reconstruct the necessary intermediate values
by inverting the function. Note that this might degrade the numerical accuracy of
obtained gradients if the inverse is unstable.
Args:
fun: The function assumed to be invertible.
"""
return iad.invertible(fun)
def block_until_ready(x):
"""
Tries to call a ``block_until_ready`` method on pytree leaves.
Args:
x: a pytree, usually with at least some JAX array instances at its leaves.
Returns:
A pytree with the same structure and values of the input, where the values
of all JAX array leaves are ready.
"""
def try_to_block(x):
try:
return x.block_until_ready()
except AttributeError:
return x
return jax.tree_util.tree_map(try_to_block, x)
| 43.138608
| 128
| 0.682815
|
4a0e118360d23fe4fdcb68049c8b7f5e36e9308e
| 157
|
py
|
Python
|
gym_cooking/__init__.py
|
atbolsh/gym-cooking
|
bbb207e12c5bfdf72bd7d473de4b79665c23984d
|
[
"MIT"
] | 124
|
2020-11-02T06:31:23.000Z
|
2022-01-11T10:42:37.000Z
|
gym_cooking/__init__.py
|
atbolsh/gym-cooking
|
bbb207e12c5bfdf72bd7d473de4b79665c23984d
|
[
"MIT"
] | 7
|
2020-11-02T14:11:29.000Z
|
2021-05-21T02:53:18.000Z
|
gym_cooking/__init__.py
|
atbolsh/gym-cooking
|
bbb207e12c5bfdf72bd7d473de4b79665c23984d
|
[
"MIT"
] | 24
|
2020-11-02T14:39:34.000Z
|
2022-01-20T06:51:39.000Z
|
from gym.envs.registration import register
register(
id="overcookedEnv-v0",
entry_point="gym_cooking.envs:OvercookedEnvironment",
)
| 22.428571
| 61
| 0.694268
|
4a0e11cf1cff8843e3e5fd25d0f4174f1f7bbcd0
| 14,090
|
py
|
Python
|
act/workers/attack.py
|
tomd/act-workers
|
ef42eaf26b14197a6bd1ac9ae12c4d39acc740c1
|
[
"ISC"
] | 4
|
2018-10-17T21:19:07.000Z
|
2020-08-12T09:55:18.000Z
|
act/workers/attack.py
|
tomd/act-workers
|
ef42eaf26b14197a6bd1ac9ae12c4d39acc740c1
|
[
"ISC"
] | 13
|
2018-10-23T06:37:15.000Z
|
2022-01-28T13:10:54.000Z
|
act/workers/attack.py
|
tomd/act-workers
|
ef42eaf26b14197a6bd1ac9ae12c4d39acc740c1
|
[
"ISC"
] | 16
|
2018-10-23T06:16:23.000Z
|
2022-01-27T11:36:41.000Z
|
#!/usr/bin/env python3
"""
Worker for Mitre ATT&CK, using the STIX implementation available here:
https://github.com/mitre/cti
ATT&CK Property STIX Object type ACT object
=========================================================
Technique attack-pattern technique
Group intrusion-set threatActor
Software malware or tool tool
Mitigation course-of-action n/a
"""
import argparse
import os
import sys
import traceback
from logging import error, info, warning
from typing import Any, Dict, List, Text
import stix2
from stix2 import Filter, MemoryStore, parse
import act.api
from act.api.helpers import handle_fact
from act.workers.libs import worker
MITRE_URLS = {
"enterprise": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json",
"pre": "https://raw.githubusercontent.com/mitre/cti/master/pre-attack/pre-attack.json",
"mobile": "https://raw.githubusercontent.com/mitre/cti/master/mobile-attack/mobile-attack.json"
}
DEFAULT_NOTIFY_CACHE = os.path.join(os.environ["HOME"], "act-mitre-attack-notify.cache")
class NotificationError(Exception):
"""NotificationError"""
def __init__(self, *args: Any) -> None:
Exception.__init__(self, *args)
def parseargs() -> argparse.ArgumentParser:
""" Parse arguments """
parser = worker.parseargs('Mitre ATT&CK worker')
parser.add_argument('--smtphost', dest='smtphost', help="SMTP host used to send revoked/deprecated objects")
parser.add_argument('--sender', dest='sender', help="Sender address used to send revoked/deprecated objects")
parser.add_argument('--recipient', dest='recipient', help="Recipient address used to send revoked/deprecated objects")
parser.add_argument(
'--type',
choices=list(MITRE_URLS.keys()),
help='Specify a single type to download (enterprise, mobile or pre). Default is to fetch all')
parser.add_argument('--notifycache', dest='notifycache', help="Cache for revoked/deprecated objects", default=DEFAULT_NOTIFY_CACHE)
return parser
def get_attack(url: str, proxy_string: str, timeout: int) -> MemoryStore:
"""Fetch Mitre ATT&CK JSON data in Stix2 format and return a Stix2 memory store"""
attack = worker.fetch_json(url, proxy_string, timeout)
# Create memory store
mem = MemoryStore()
# Add all objects to the memory store
for obj in parse(attack, allow_custom=True).objects:
mem.add(obj)
return mem
def add_techniques(client, attack: MemoryStore, output_format: Text = "json") -> List[stix2.AttackPattern]:
"""
extract objects/facts related to ATT&CK techniques
Args:
attack (stix2): Stix attack instance
"""
notify = []
# ATT&CK concept STIX Object type ACT object
# =========================================================
# Technique attack-pattern technique
# Filter out ATT&CK techniques (attack-pattern) from bundle
for technique in attack.query([Filter("type", "=", "attack-pattern")]):
if getattr(technique, "revoked", None):
# Object is revoked, add to notification list but do not add to facts that should be added to the platform
notify.append(technique)
continue
if getattr(technique, "x_mitre_deprecated", None):
# Object is revoked, add to notification list AND continue to add to facts that should be added to the platform
notify.append(technique)
# Mitre ATT&CK Tactics are implemented in STIX as kill chain phases with kill_chain_name "mitre-attack"
for tactic in technique.kill_chain_phases:
if tactic.kill_chain_name != "mitre-attack":
continue
handle_fact(
client.fact("accomplishes")
.source("technique", technique.name)
.destination("tactic", tactic.phase_name),
output_format=output_format
)
return notify
def add_groups(client, attack: MemoryStore, output_format: Text = "json") -> List[stix2.AttackPattern]:
"""
extract objects/facts related to ATT&CK Groups
Args:
attack (stix2): Stix attack instance
"""
notify = []
# ATT&CK concept STIX Object type ACT object
# =========================================================
# Group intrusion-set threatActor
#
# Filter out ATT&CK groups (intrusion-set) from bundle
for group in attack.query([Filter("type", "=", "intrusion-set")]):
if getattr(group, "revoked", None):
# Object is revoked, add to notification list but do not add to facts that should be added to the platform
notify.append(group)
continue
if getattr(group, "x_mitre_deprecated", None):
# Object is revoked, add to notification list AND continue to add to facts that should be added to the platform
notify.append(group)
for alias in getattr(group, "aliases", []):
if group.name != alias:
handle_fact(
client.fact("alias")
.bidirectional("threatActor", group.name, "threatActor", alias),
output_format=output_format
)
# ATT&CK concept STIX Properties
# ==========================================================================
# Software relationship where relationship_type == "uses",
# points to a target object with type== "malware" or "tool"
for tool in attack.related_to(group, relationship_type="uses"):
if tool.type not in ("malware", "tool"):
continue
chain = act.api.fact.fact_chain(
client.fact("classifiedAs")
.source("content", "*")
.destination("tool", tool.name.lower()),
client.fact("observedIn")
.source("content", "*")
.destination("event", "*"),
client.fact("attributedTo")
.source("event", "*")
.destination("incident", "*"),
client.fact("attributedTo")
.source("incident", "*")
.destination("threatActor", group.name)
)
for fact in chain:
handle_fact(fact, output_format=output_format)
# ATT&CK concept STIX Properties
# ==========================================================================
# Technqiues relationship where relationship_type == "uses", points to
# a target object with type == "attack-pattern"
for technique in attack.related_to(group, relationship_type="uses"):
if technique.type != "attack-pattern":
continue
chain = act.api.fact.fact_chain(
client.fact("classifiedAs")
.source("event", "*")
.destination("technique", technique.name),
client.fact("attributedTo")
.source("event", "*")
.destination("incident", "*"),
client.fact("attributedTo")
.source("incident", "*")
.destination("threatActor", group.name)
)
for fact in chain:
handle_fact(fact, output_format=output_format)
return notify
def add_software(client, attack: MemoryStore, output_format: Text = "json") -> List[stix2.AttackPattern]:
"""
extract objects/facts related to ATT&CK Software
Insert to ACT if client.baseurl is set, if not, print to stdout
Args:
attack (stix2): Stix attack instance
"""
notify = []
for software in attack.query([Filter("type", "in", ["tool", "malware"])]):
tool_name = software.name.lower()
# Tool category
handle_fact(
client.fact("category", software.type).source("tool", tool_name),
output_format=output_format
)
if getattr(software, "revoked", None):
# Object is revoked, add to notification list but do not add to facts that should be added to the platform
notify.append(software)
continue
if getattr(software, "x_mitre_deprecated", None):
# Object is revoked, add to notification list AND continue to add to facts that should be added to the platform
notify.append(software)
for alias in getattr(software, "x_mitre_aliases", []):
if tool_name != alias.lower():
# Tool category (alias)
handle_fact(
client.fact("category", software.type).source("tool", alias.lower()),
output_format=output_format
)
handle_fact(
client.fact("alias")
.bidirectional("tool", tool_name, "tool", alias.lower()),
output_format=output_format
)
# ATT&CK concept STIX Properties
# ==========================================================================
# Technqiues relationship where relationship_type == "uses", points to
# a target object with type == "attack-pattern"
for technique in attack.related_to(software, relationship_type="uses"):
if technique.type != "attack-pattern":
continue
handle_fact(
client.fact("implements")
.source("tool", software.name.lower())
.destination("technique", technique.name),
output_format=output_format
)
return notify
def notify_cache(filename: str) -> Dict:
"""
Read notify cache from filename
Args:
filename(str): Cache filename
"""
cache = {}
try:
with open(filename) as f:
for line in f:
if line:
cache[line.strip()] = True
except FileNotFoundError:
warning("Cache file {} not found, will be created if necessary".format(filename))
return cache
def add_to_cache(filename: str, entry: str) -> None:
"""
Add entry to cache
Args:
filename(str): Cache filename
entry(str): Cache entry
"""
with open(filename, "a") as f:
f.write(entry.strip())
f.write("\n")
def send_notification(
notify: List[stix2.AttackPattern],
smtphost: str,
sender: str,
recipient: str,
url: str) -> List[str]:
"""
Process revoked objects
Args:
notify(attack[]): Array of revoked/deprecated Stix objects
notifycache(str): Filename of notify cache
smtphost(str): SMTP host used to notify of revoked/deprecated objects
sender(str): sender address used to notify of revoked/deprecated objects
recipient(str): recipient address used to notify of revoked/deprecated objects
smtphost, sender AND recipient must be set to notify of revoked/deprecated objects
Return list of IDs that was successfully notified
"""
notified = []
if not (smtphost and recipient and sender):
error("--smtphost, --recipient and --sender must be set to send revoked/deprecated objects on email")
return []
body = url + "\n\n"
warning("[{}]".format(url))
for obj in notify:
if getattr(obj, "revoked", None):
text = "revoked: {}:{}".format(obj.type, obj.name)
elif getattr(obj, "x_mitre_deprecated", None):
text = "deprecated: {}:{}".format(obj.type, obj.name)
else:
raise NotificationError("object tis not deprecated or revoked: {}:{}".format(obj.type, obj.name))
notified.append(obj.id)
body += text + "\n"
warning(text)
worker.sendmail(smtphost, sender, recipient, "Revoked/deprecated objects from MITRE/ATT&CK", body)
info("Email sent to {}".format(recipient))
return notified
def main() -> None:
""" Main function """
# Look for default ini file in "/etc/actworkers.ini" and ~/config/actworkers/actworkers.ini
# (or replace .config with $XDG_CONFIG_DIR if set)
args = worker.handle_args(parseargs())
actapi = worker.init_act(args)
if args.type:
types = [args.type]
else:
types = list(MITRE_URLS.keys())
for mitre_type in types:
url = MITRE_URLS.get(mitre_type.lower())
if not url:
error("Unknown mitre type: {}. Valid types: {}".format(mitre_type, ",".join(MITRE_URLS.keys())))
sys.exit(2)
cache = notify_cache(args.notifycache)
# Get attack dataset as Stix Memory Store
attack = get_attack(url, args.proxy_string, args.http_timeout)
techniques_notify = add_techniques(actapi, attack, args.output_format)
groups_notify = add_groups(actapi, attack, args.output_format)
software_notify = add_software(actapi, attack, args.output_format)
# filter revoked objects from those allready notified
notify = [
notify
for notify in techniques_notify + groups_notify + software_notify
if notify.id not in cache
]
if notify:
notified = send_notification(notify, args.smtphost, args.sender, args.recipient, url)
for object_id in notified:
# Add object to cache, so we will not be notified on the same object on the next run
add_to_cache(args.notifycache, object_id)
def main_log_error() -> None:
"Call main() and log all excetions as errors"
try:
main()
except Exception:
error("Unhandled exception: {}".format(traceback.format_exc()))
raise
if __name__ == '__main__':
main_log_error()
| 34.199029
| 135
| 0.582186
|
4a0e1255f9a897b7a4918f90f771d32cb30bd436
| 376
|
py
|
Python
|
tests/conftest.py
|
guiloga/guirpc
|
fe740c4f318265df8b4331c682edb4cb7fe4bd89
|
[
"MIT"
] | 3
|
2021-02-01T22:48:10.000Z
|
2021-05-28T12:36:04.000Z
|
tests/conftest.py
|
guiloga/guirpc
|
fe740c4f318265df8b4331c682edb4cb7fe4bd89
|
[
"MIT"
] | 1
|
2021-05-28T12:51:43.000Z
|
2021-06-13T09:33:23.000Z
|
tests/conftest.py
|
guiloga/guirpc
|
fe740c4f318265df8b4331c682edb4cb7fe4bd89
|
[
"MIT"
] | null | null | null |
from pytest import fixture
from guirpc.amqp.utils import ClientConnector
@fixture(scope="session")
def connector():
return ClientConnector()
@fixture(scope="session")
def raw_obj():
return Foo()
class Foo:
def __init__(self):
self.name = "Foo"
self.likes = "Bars"
def __eq__(self, other):
return self.__dict__ == other.__dict__
| 16.347826
| 46
| 0.662234
|
4a0e13aa9f669b01d906d52d5af970f910200904
| 4,862
|
py
|
Python
|
projects/scocen/data/covariance_matrices_for_comps_for_upload_to_the_journal.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 4
|
2018-05-28T11:05:42.000Z
|
2021-05-14T01:13:11.000Z
|
projects/scocen/data/covariance_matrices_for_comps_for_upload_to_the_journal.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 13
|
2019-08-14T07:30:24.000Z
|
2021-11-08T23:44:29.000Z
|
projects/scocen/data/covariance_matrices_for_comps_for_upload_to_the_journal.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 4
|
2016-04-21T08:25:26.000Z
|
2021-02-25T06:53:52.000Z
|
"""
A table of covariance matrices for all Sco-Cen components.
For upload to the journal.
Actually, for completeness, unclude all the data from Table 1
and include covariance matrices.
"""
import numpy as np
from astropy.table import Table, Column
from astropy import units as u
from chronostar.component import SphereComponent
############################################
comps_filename = 'final_comps_21.fits'
data_filename_fit = 'scocen_5k_candidates_only_members_with_significant_membership_from_tims_3_subsets.fits'
# Prepare a table with good_comps only
good_comps = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'T', 'U']
############################################
comps = Table.read(comps_filename)
# Data used for the fit
data_fit = Table.read(data_filename_fit)
############################################
# Create components
comps_raw = SphereComponent.load_raw_components(comps_filename)
print('Number of components: %d'%len(comps_raw))
print('TODO: check if its using EPICYCLIC!!! as opposed to EMCEE')
result = Table()
result['ID'] = [' ']*len(comps)
result['Xt'] = [np.nan]*len(comps)
result['Yt'] = [np.nan]*len(comps)
result['Zt'] = [np.nan]*len(comps)
result['Ut'] = [np.nan]*len(comps)
result['Vt'] = [np.nan]*len(comps)
result['Wt'] = [np.nan]*len(comps)
result['Xt_error'] = [np.nan]*len(comps)
result['Yt_error'] = [np.nan]*len(comps)
result['Zt_error'] = [np.nan]*len(comps)
result['Ut_error'] = [np.nan]*len(comps)
result['Vt_error'] = [np.nan]*len(comps)
result['Wt_error'] = [np.nan]*len(comps)
result.add_column(Column(name='cov_t', dtype=np.float64, length=len(comps), shape=(6,6)))
result['X0'] = [np.nan]*len(comps)
result['Y0'] = [np.nan]*len(comps)
result['Z0'] = [np.nan]*len(comps)
result['U0'] = [np.nan]*len(comps)
result['V0'] = [np.nan]*len(comps)
result['W0'] = [np.nan]*len(comps)
result['X0_error'] = [np.nan]*len(comps)
result['V0_error'] = [np.nan]*len(comps)
result.add_column(Column(name='cov_0', dtype=np.float64, length=len(comps), shape=(6,6)))
result['Age'] = [np.nan]*len(comps)
result['Age_error'] = [np.nan]*len(comps)
#~ result['Nfit'] = [np.nan]*len(comps)
result.add_column(Column(name='Nfit', dtype=np.int, length=len(comps)))
# ADD UNITS
result['Xt'].unit = u.pc
result['Yt'].unit = u.pc
result['Zt'].unit = u.pc
result['Ut'].unit = u.km/u.s
result['Vt'].unit = u.km/u.s
result['Wt'].unit = u.km/u.s
result['Xt_error'].unit = u.pc
result['Yt_error'].unit = u.pc
result['Zt_error'].unit = u.pc
result['Ut_error'].unit = u.km/u.s
result['Vt_error'].unit = u.km/u.s
result['Wt_error'].unit = u.km/u.s
result['X0'].unit = u.pc
result['Y0'].unit = u.pc
result['Z0'].unit = u.pc
result['U0'].unit = u.km/u.s
result['V0'].unit = u.km/u.s
result['W0'].unit = u.km/u.s
result['X0_error'].unit = u.pc
result['V0_error'].unit = u.km/u.s
result['Age'].unit = u.Myr
result['Age_error'].unit = u.Myr
i=0
for c, c_raw in zip(comps, comps_raw):
comp_id = c['comp_ID']
age=c['Age']
sigma_age = c['Crossing_time']
mean_now = c_raw.get_mean_now()
covmatrix_now = c_raw.get_covmatrix_now()
covmatrix0 = c_raw.get_covmatrix()
# Component at time NOW (today in the sky)
Xt = mean_now[0]
Yt = mean_now[1]
Zt = mean_now[2]
Ut = mean_now[3]
Vt = mean_now[4]
Wt = mean_now[5]
sigmaXt = np.sqrt(covmatrix_now[0,0])
sigmaYt = np.sqrt(covmatrix_now[1,1])
sigmaZt = np.sqrt(covmatrix_now[2,2])
sigmaUt = np.sqrt(covmatrix_now[3,3])
sigmaVt = np.sqrt(covmatrix_now[4,4])
sigmaWt = np.sqrt(covmatrix_now[5,5])
# Component at time 0 (at birth)
X0 = c['X']
Y0 = c['Y']
Z0 = c['Z']
U0 = c['U']
V0 = c['V']
W0 = c['W']
sigmaX0 = c['dX']
sigmaV0 = c['dV']
# Number of members used in the fit
pmin_membership = 0.5
mask = data_fit['membership%s'%comp_id] >= pmin_membership
Nfit = np.sum(mask)
result[i]['ID'] = comp_id
result[i]['Xt'] = Xt
result[i]['Yt'] = Yt
result[i]['Zt'] = Zt
result[i]['Ut'] = Ut
result[i]['Vt'] = Vt
result[i]['Wt'] = Wt
result[i]['Xt_error'] = sigmaXt
result[i]['Yt_error'] = sigmaYt
result[i]['Zt_error'] = sigmaZt
result[i]['Ut_error'] = sigmaUt
result[i]['Vt_error'] = sigmaVt
result[i]['Wt_error'] = sigmaWt
result[i]['cov_t'] = covmatrix_now
result[i]['X0'] = X0
result[i]['Y0'] = Y0
result[i]['Z0'] = Z0
result[i]['U0'] = U0
result[i]['V0'] = V0
result[i]['W0'] = W0
result[i]['X0_error'] = sigmaX0
result[i]['V0_error'] = sigmaV0
result[i]['cov_0'] = covmatrix0
result[i]['Age'] = age
result[i]['Age_error'] = sigma_age # Crossing time
result[i]['Nfit'] = Nfit
i+=1
# Take only good_comps
mask = np.in1d(result['ID'], good_comps)
result = result[mask]
print(result)
result.write('scocen_components_with_covariance_matrices.fits')
| 28.267442
| 108
| 0.621349
|
4a0e150df2a47272aecf9316913adde57cabb343
| 3,337
|
py
|
Python
|
paper2/tests/test_bigEr.py
|
dfm/mapping_stellar_surfaces
|
52d4ba1a726c65868e4a1290a801fe046fb2155f
|
[
"MIT"
] | 10
|
2021-01-21T17:03:26.000Z
|
2021-12-19T17:49:28.000Z
|
paper2/tests/test_bigEr.py
|
dfm/mapping_stellar_surfaces
|
52d4ba1a726c65868e4a1290a801fe046fb2155f
|
[
"MIT"
] | 10
|
2021-01-21T15:55:53.000Z
|
2021-03-30T14:35:16.000Z
|
paper2/tests/test_bigEr.py
|
dfm/mapping_stellar_surfaces
|
52d4ba1a726c65868e4a1290a801fe046fb2155f
|
[
"MIT"
] | 2
|
2021-01-21T15:41:58.000Z
|
2021-01-25T16:26:15.000Z
|
import numpy as np
from scipy.integrate import quad
from scipy.special import legendre as P
def b(r, K=1000, s=0.0033, **kwargs):
"""
The sigmoid spot profile.
"""
theta = np.linspace(0, np.pi, K)
return 1 / (1 + np.exp((r - theta) / s)) - 1
def get_Bp(K=1000, lmax=5, eps=1e-9, sigma=15, **kwargs):
"""
Return the matrix B+. This expands the
spot profile `b` in Legendre polynomials.
"""
theta = np.linspace(0, np.pi, K)
cost = np.cos(theta)
B = np.hstack(
[
np.sqrt(2 * l + 1) * P(l)(cost).reshape(-1, 1)
for l in range(lmax + 1)
]
)
BInv = np.linalg.solve(B.T @ B + eps * np.eye(lmax + 1), B.T)
l = np.arange(lmax + 1)
i = l * (l + 1)
S = np.exp(-0.5 * i / sigma ** 2)
BInv = S[:, None] * BInv
return BInv
def bigEr_dzero(r, s=0.0033, **kwargs):
"""Return the longitude expectation integral E_r for delta r = 0."""
L = (get_Bp(**kwargs) @ b(r)).reshape(-1, 1)
return L @ L.T
def bigEr(r, dr, s=0.0033, cutoff=1.5, **kwargs):
"""Return the longitude expectation integral E_r for delta r > 0."""
# Generate an array in `theta`
# NOTE: For theta > r + dr, the `C` matrix drops
# to zero VERY quickly. In practice we get better
# numerical stability if we just set those elements
# to zero without evaluating them, especially since
# we can get NaNs from operations involving the extremely
# large dynamic range between the `exp` and `ln` terms.
# TODO: It's likely we can find a more numerically stable
# expression for `C`...
K = kwargs.get("K", 1000)
theta = np.linspace(0, np.pi, K).reshape(-1, 1)
kmax = np.argmax(theta / (r + dr) > cutoff)
chim = np.exp((r - dr - theta[:kmax]) / s)
chip = np.exp((r + dr - theta[:kmax]) / s)
exp = np.exp((theta[:kmax] - theta[:kmax].T) / s)
term = np.log((1 + chim) / (1 + chip))
C0 = (exp * term - term.T) / (1 - exp)
# When k = kp, we must take the limit, given below
C0[np.diag_indices_from(C0)] = (
1 / (1 + chip) + chim / (1 + chim) - term - 1
).flatten()
# Normalization
C0 *= s / (2 * dr)
# Fill in the full matrix
C = np.zeros((K, K))
C[:kmax, :kmax] = C0
# Rotate into ylm space
Bp = get_Bp(**kwargs)
return Bp @ C @ Bp.T
def bigEr_numerical(r, dr, **kwargs):
"""Return the longitude expectation integral E_r, computed numerically."""
lmax = kwargs.get("lmax", 5)
Bp = get_Bp(**kwargs)
integrand = lambda r, llp: np.inner(Bp[llp[0]], b(r, **kwargs)) * np.inner(
Bp[llp[1]], b(r, **kwargs)
)
return [
[
(1.0 / (2 * dr)) * quad(integrand, r - dr, r + dr, args=[l, lp])[0]
for l in range(lmax + 1)
]
for lp in range(lmax + 1)
]
def test_bigEr():
"""
Show that our expression for the second moment
integral of the radius distribution agrees
with a numerical estimate.
"""
# Check that our analytic expression agrees with the
# numerical integral
r = 20 * np.pi / 180
dr = 5 * np.pi / 180
assert np.allclose(bigEr(r, dr), bigEr_numerical(r, dr))
# Check our expression in the limit dr --> 0
r = 20 * np.pi / 180
dr = 1e-8
assert np.allclose(bigEr_dzero(r), bigEr_numerical(r, dr))
| 29.530973
| 79
| 0.566377
|
4a0e15ac52649fac04b077773c1df95401010867
| 13,751
|
py
|
Python
|
scripts/dct/obj/ChipObj.py
|
ABM-Community-Ports/droidboot_device_planet-cosmocom
|
4e157f7f3def69cc47e2c5c8fec5346feaea2a8c
|
[
"MIT"
] | 10
|
2020-07-17T14:51:36.000Z
|
2022-03-12T03:35:42.000Z
|
scripts/dct/obj/ChipObj.py
|
ABM-Community-Ports/droidboot_device_planet-cosmocom
|
4e157f7f3def69cc47e2c5c8fec5346feaea2a8c
|
[
"MIT"
] | 6
|
2020-07-23T19:33:25.000Z
|
2021-02-23T18:21:59.000Z
|
scripts/dct/obj/ChipObj.py
|
ABM-Community-Ports/droidboot_device_planet-cosmocom
|
4e157f7f3def69cc47e2c5c8fec5346feaea2a8c
|
[
"MIT"
] | 4
|
2020-11-12T03:07:39.000Z
|
2022-03-23T19:30:20.000Z
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import os
import collections
import xml.dom.minidom
from GpioObj import GpioObj
from GpioObj import GpioObj_whitney
from GpioObj import GpioObj_MT6759
from GpioObj import GpioObj_MT6739
from GpioObj import GpioObj_MT6771
from GpioObj import GpioObj_MT6763
from EintObj import EintObj
from EintObj import EintObj_MT6750S
from EintObj import EintObj_MT6739
from AdcObj import AdcObj
from ClkObj import ClkObj
from ClkObj import ClkObj_Everest
from ClkObj import ClkObj_Olympus
from ClkObj import ClkObj_Rushmore
from ClkObj import ClkObj_MT6779
from I2cObj import I2cObj
from I2cObj import I2cObj_MT6759
from I2cObj import I2cObj_MT6775
from PmicObj import PmicObj
from PmicObj import PmicObj_MT6758
from Md1EintObj import Md1EintObj
from Md1EintObj import Md1EintObj_MT6739
from PowerObj import PowerObj
from KpdObj import KpdObj
from ModuleObj import ModuleObj
from utility.util import log
from utility.util import LogLevel
para_map = {'adc':['adc_h', 'adc_dtsi'],\
'clk':['clk_buf_h', 'clk_buf_dtsi'],\
'i2c':['i2c_h', 'i2c_dtsi'],\
'eint':['eint_h', 'eint_dtsi'],\
'gpio':['gpio_usage_h', 'gpio_boot_h', 'gpio_dtsi', 'scp_gpio_usage_h', 'pinfunc_h', \
'pinctrl_h', 'gpio_usage_mapping_dtsi'],\
'md1_eint':['md1_eint_h', 'md1_eint_dtsi'],\
'kpd':['kpd_h', 'kpd_dtsi'],\
'pmic':['pmic_drv_h', 'pmic_drv_c', 'pmic_h', 'pmic_c', 'pmic_dtsi'],\
'power':['power_h']}
class ChipObj:
def __init__(self, path, dest):
self.__epFlag = False
self.__path = path
ModuleObj.set_genPath(dest)
self.__objs = collections.OrderedDict()
self.init_objs()
def init_objs(self):
self.__objs['adc'] = AdcObj()
self.__objs['clk'] = ClkObj()
self.__objs["i2c"] = I2cObj()
self.__objs["gpio"] = GpioObj()
# eint obj need gpio data
self.__objs["eint"] = EintObj(self.__objs['gpio'])
self.__objs["md1_eint"] = Md1EintObj()
self.__objs["pmic"] = PmicObj()
self.__objs["power"] = PowerObj()
self.__objs["kpd"] = KpdObj()
def replace_obj(self, tag, obj):
if not tag in self.__objs.keys():
return False
self.__objs[tag] = obj
def get_gpioObj(self):
return self.__objs['gpio']
def refresh_eintGpioMap(self):
self.__objs['eint'].set_gpioObj(self.__objs['gpio'])
def append_obj(self, tag, obj):
if tag in self.__objs.keys():
return False
self.__objs[tag] = obj
@staticmethod
def get_chipId(path):
if not os.path.exists(path):
msg = '%s is not a available path!' %(path)
log(LogLevel.error, msg)
return False
data = xml.dom.minidom.parse(path)
root = data.documentElement
# get 'general' node
node = root.getElementsByTagName('general')
return node[0].getAttribute('chip')
def parse(self):
if not os.path.exists(self.__path):
msg = '%s is not a available path!' %(self.__path)
log(LogLevel.error, msg)
return False
data = xml.dom.minidom.parse(self.__path)
root = data.documentElement
# get 'general' node
node = root.getElementsByTagName('general')
# get chip name and project name
ModuleObj.set_chipId(node[0].getAttribute('chip'))
# get early porting flag
epNode = node[0].getElementsByTagName('ep')
if len(epNode) != 0 and epNode[0].childNodes[0].nodeValue=="True":
self.__epFlag = True
msg = 'Chip ID : %s' %(node[0].getAttribute('chip'))
log(LogLevel.info, msg)
msg = 'Project Info: %s' %(node[0].getElementsByTagName('proj')[0].childNodes[0].nodeValue)
log(LogLevel.info, msg)
# initialize the objects mapping table
self.init_objs()
# get module nodes from DWS file
nodes = node[0].getElementsByTagName('module')
for node in nodes:
tag = node.getAttribute('name')
obj = self.create_obj(tag)
if obj == None:
msg = 'can not find %s node in DWS!' %(tag)
log(LogLevel.error, msg)
return False
obj.parse(node)
return True
def generate(self, paras):
if len(paras) == 0:
for obj in self.__objs.values():
obj.gen_files()
self.gen_custDtsi()
else:
self.gen_spec(paras)
return True
def create_obj(self, tag):
obj = None
if tag in self.__objs.keys():
obj = self.__objs[tag]
return obj
def gen_spec(self, paras):
# if cmp(paras[0], 'cust_dtsi') == 0:
# self.gen_custDtsi()
# return True
for para in paras:
if cmp(para, 'cust_dtsi') == 0:
self.gen_custDtsi()
continue
idx = 0
name = ''
if para.strip() != '':
for value in para_map.values():
if para in value:
name = para_map.keys()[idx]
break
idx += 1
if name != '':
log(LogLevel.info, 'Start to generate %s file...' %(para))
obj = self.__objs[name]
obj.gen_spec(para)
log(LogLevel.info, 'Generate %s file successfully!' %(para))
else:
log(LogLevel.warn, '%s can not be recognized!' %(para))
# sys.exit(-1)
return True
def gen_custDtsi(self):
log(LogLevel.info, 'Start to generate cust_dtsi file...')
fp = open(os.path.join(ModuleObj.get_genPath(), 'cust.dtsi'), 'w')
gen_str = ModuleObj.writeComment()
# if early porting, gen empty dtsi file for kernel
if self.__epFlag:
fp.write(gen_str)
fp.close()
return
#sorted_list = sorted(self.__objs.keys())
#for tag in sorted_list:
for tag in self.__objs.keys():
if cmp(tag, 'gpio') == 0:
gpioObj = self.create_obj(tag)
gen_str += ModuleObj.writeHeader(gpioObj.get_dtsiFileName())
gen_str += gpioObj.fill_mapping_dtsiFile()
gen_str += gpioObj.fill_init_default_dtsiFile()
else:
obj = self.create_obj(tag)
gen_str += ModuleObj.writeHeader(obj.get_dtsiFileName())
gen_str += obj.fill_dtsiFile()
gen_str += '''\n\n'''
fp.write(gen_str)
fp.close()
log(LogLevel.info, 'Generate cust_dtsi file successfully!')
class Everest(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
self.init_objs()
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'clk', ClkObj_Everest())
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class Olympus(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'clk', ClkObj_Olympus())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6759())
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class MT6757_P25(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'clk', ClkObj_Olympus())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6759())
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class Rushmore(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'clk', ClkObj_Rushmore())
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class Whitney(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'gpio', GpioObj_whitney())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6759())
ChipObj.refresh_eintGpioMap(self)
def parse(self):
log(LogLevel.info, 'Whitney parse')
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class MT6759(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'gpio', GpioObj_MT6759())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6759())
ChipObj.refresh_eintGpioMap(self)
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class MT6758(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'pmic', PmicObj_MT6758())
ChipObj.replace_obj(self, 'gpio', GpioObj_MT6759())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6759())
ChipObj.refresh_eintGpioMap(self)
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class MT6763(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'gpio', GpioObj_MT6763())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6759())
ChipObj.refresh_eintGpioMap(self)
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class MT6739(MT6763):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'pmic', PmicObj_MT6758())
ChipObj.replace_obj(self, 'gpio', GpioObj_MT6739())
ChipObj.replace_obj(self, 'eint', EintObj_MT6739(ChipObj.get_gpioObj(self)))
ChipObj.replace_obj(self, 'md1_eint', Md1EintObj_MT6739())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6759())
ChipObj.refresh_eintGpioMap(self)
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class MT6750S(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'clk', ClkObj_Olympus())
ChipObj.replace_obj(self, 'eint', EintObj_MT6750S(ChipObj.get_gpioObj(self)))
ChipObj.refresh_eintGpioMap(self)
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class MT8695(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'gpio', GpioObj_whitney())
ChipObj.refresh_eintGpioMap(self)
def parse(self):
return ChipObj.parse(self)
def generate(self, paras):
return ChipObj.generate(self, paras)
class MT6771(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'pmic', PmicObj_MT6758())
ChipObj.replace_obj(self, 'gpio', GpioObj_MT6771())
ChipObj.replace_obj(self, 'eint', EintObj_MT6739(ChipObj.get_gpioObj(self)))
ChipObj.replace_obj(self, 'md1_eint', Md1EintObj_MT6739())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6775())
ChipObj.refresh_eintGpioMap(self)
class MT6775(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'pmic', PmicObj_MT6758())
ChipObj.replace_obj(self, 'gpio', GpioObj_MT6739())
ChipObj.replace_obj(self, 'eint', EintObj_MT6739(ChipObj.get_gpioObj(self)))
ChipObj.replace_obj(self, 'md1_eint', Md1EintObj_MT6739())
ChipObj.replace_obj(self, 'i2c', I2cObj_MT6775())
ChipObj.refresh_eintGpioMap(self)
class MT6779(ChipObj):
def __init__(self, dws_path, gen_path):
ChipObj.__init__(self, dws_path, gen_path)
def init_objs(self):
ChipObj.init_objs(self)
ChipObj.replace_obj(self, 'clk', ClkObj_MT6779())
ChipObj.replace_obj(self, 'pmic', PmicObj_MT6758())
ChipObj.replace_obj(self, 'gpio', GpioObj_MT6771())
ChipObj.replace_obj(self, 'eint', EintObj_MT6739(ChipObj.get_gpioObj(self)))
ChipObj.replace_obj(self, 'md1_eint', Md1EintObj_MT6739())
ChipObj.replace_obj(self, "i2c", I2cObj_MT6775())
ChipObj.refresh_eintGpioMap(self)
| 31.040632
| 99
| 0.622064
|
4a0e15ebde96a66219c7b32ededca6b6707a1f35
| 538
|
py
|
Python
|
server/lib/python/cartodb_services/cartodb_services/google/exceptions.py
|
digideskio/dataservices-api
|
246ec135dbeaa3f9a52717fdac50a4ab040ce22b
|
[
"BSD-3-Clause"
] | 22
|
2016-03-11T17:33:31.000Z
|
2021-02-22T04:00:43.000Z
|
server/lib/python/cartodb_services/cartodb_services/google/exceptions.py
|
digideskio/dataservices-api
|
246ec135dbeaa3f9a52717fdac50a4ab040ce22b
|
[
"BSD-3-Clause"
] | 338
|
2016-02-16T16:13:13.000Z
|
2022-03-30T15:50:17.000Z
|
server/lib/python/cartodb_services/cartodb_services/google/exceptions.py
|
CartoDB/dataservices-api
|
d0f28cc002ef11df9f371d5d1fd2d0901c245f97
|
[
"BSD-3-Clause"
] | 14
|
2016-09-22T15:29:33.000Z
|
2021-02-08T03:46:40.000Z
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import json
class InvalidGoogleCredentials(Exception):
pass
class BadGeocodingParams(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr('Bad geocoding params: ' + json.dumps(self.value))
class NoGeocodingParams(Exception):
def __str__(self):
return repr('No params for geocoding specified')
class MalformedResult(Exception):
def __str__(self):
return repr('Result structure is malformed')
| 22.416667
| 70
| 0.684015
|
4a0e16ac098fb0f5b0b56f6922b321c5075201f9
| 6,833
|
py
|
Python
|
analysis.py
|
jkfids/forest-fire
|
c0555929725be55369aa508ef6582f9d3f97ca4b
|
[
"MIT"
] | 1
|
2021-04-09T04:02:01.000Z
|
2021-04-09T04:02:01.000Z
|
analysis.py
|
jkfids/forest-fire
|
c0555929725be55369aa508ef6582f9d3f97ca4b
|
[
"MIT"
] | null | null | null |
analysis.py
|
jkfids/forest-fire
|
c0555929725be55369aa508ef6582f9d3f97ca4b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 16:24:11 2021
@author: Fidel
"""
# Import standard libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from matplotlib import colors
from matplotlib.animation import FuncAnimation
from time import time
# Import ForestFire class
from forestfire import ForestFire
def animate_forest(forest, interval=100, frames=200, name='forestfire.gif'):
"""Animate a forest fire for a given number of frames (i.e. timesteps)"""
start = time()
cmap = colors.ListedColormap(['red', 'black', 'green'])
bounds = [-1, -0.5, 0.5, 1]
norm = colors.BoundaryNorm(bounds, cmap.N)
fig, ax = plt.subplots()
ax.axis('off')
fig = plt.figure(frameon=False)
fig.set_size_inches(10,10)
ax = plt.Axes(fig, [0., 0., 1., 1.])
fig.add_axes(ax)
def init_frame():
ax.imshow(forest.grid, cmap=cmap, norm=norm, aspect='auto')
def animate(i):
plt.cla()
ax.imshow(forest.grid, cmap=cmap, norm=norm, aspect='auto')
forest.step()
#print(f'frame {i}')
anim = FuncAnimation(fig, animate, init_func=init_frame, interval=interval, frames=frames)
anim.save('animations/' + name)
end = time()
print(f'Time elapsed: {round((end - start), 2)} seconds')
def plot_fractionvt(forest, t_max, plot_green=True):
"""Plot fraction of green and red vs t"""
fig, ax = plt.subplots()
ax.set_xlabel('Time')
ax.set_ylabel('Grid State Fractions')
ax.grid(True)
props = dict(boxstyle='square', facecolor='white')
textbox = (
f'L = {forest.height}\n'
f'p = {forest.p}\n'
f'f = {forest.f}'
)
ax.text(0.865, 0.965, textbox, transform=ax.transAxes, fontsize=9,
verticalalignment='top', bbox=props)
forest.step(t_max)
y1 = np.array(forest.s_history)/forest.size
x = range(len(y1))
ax.plot(x, y1, color='red')
if plot_green:
y2 = np.array(forest.g_history)/forest.size
ax.plot(x, y2, color='green')
def gaussian(x, mu, sigma):
return (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-(x-mu)**2/(2*sigma**2))
def plot_firesizepd(forest, t, N, p0=[0, 0, 0], fit=False):
""""Constructs a histogram of probability vs. fire size after certain time"""
start = time()
forest.step(t+N)
firesizes = forest.s_history[t:]
if fit:
bin_heights, bin_borders, _ = plt.hist(firesizes, density=True, bins='auto')
bin_centers = bin_borders[:-1] + np.diff(bin_borders)/2
popt, _ = curve_fit(gaussian, bin_centers, bin_heights, p0 = [1/200,7500,1000])
X = np.linspace(bin_borders[0], bin_borders[-1], 10000)
plt.plot(X, gaussian(X, *popt))
plt.ylabel('Probability')
plt.xlabel('Fire Size')
plt.title('Fire Size Probability Distribution')
end = time()
print(f'Time elapsed: {round((end - start), 2)} seconds')
print(f'Amplitude = {popt[0]}')
print(f'Mean = {popt[1]}')
print(f'Standard deviation = {popt[2]}')
if fit:
return popt
# Fire size pdf subplots
def plot_firesizepd_multi(forest1, forest2, forest3, t, N):
"""Plot multiple fire size probability distributions"""
start = time()
forest1.step(t[0]+N)
forest2.step(t[1]+N)
forest3.step(t[2]+N)
firesizes_history1 = forest1.s_history[t[0]:]
firesizes_history2 = forest2.s_history[t[0]:]
firesizes_history3 = forest3.s_history[t[0]:]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4), dpi=144)
ax1.title.set_text(f'f = {forest1.f}, p = {forest1.p}')
ax2.title.set_text(f'f = {forest2.f}, p = {forest2.p}')
ax3.title.set_text(f'f = {forest3.f}, p = {forest3.p}')
#ax1.set_ylabel('Probability')
#fig.text(0.5, 0.05, 'Total Fire Size', ha='center')
ax1.set_ylim(top=0.00675)
ax3.set_ylim(top=0.00675)
#weights1 = np.ones(len(firesizes_history1))/len(firesizes_history1)
weights2 = np.ones(len(firesizes_history2))/len(firesizes_history2)
#weights3 = np.ones(len(firesizes_history3))/len(firesizes_history3)
bin_heights1, bin_borders1, _ = ax1.hist(firesizes_history1, density=True, bins='auto')
#bin_heights1, bin_borders1, _ = ax1.hist(firesizes_history1, weights=weights1, bins=100)
ax2.hist(firesizes_history2, weights=weights2, bins=100)
bin_heights3, bin_borders3, _ = ax3.hist(firesizes_history3, density=True, bins='auto')
#bin_heights3, bin_borders3, _ = ax3.hist(firesizes_history3, weights=weights3, bins=100)
bin_centers1 = bin_borders1[:-1] + np.diff(bin_borders1)/2
popt1, _ = curve_fit(gaussian, bin_centers1, bin_heights1, p0 = [7500, 100])
X1 = np.linspace(bin_borders1[0], bin_borders1[-1], 10000)
ax1.plot(X1, gaussian(X1, *popt1), label=f'μ = {round(popt1[0])}, σ = {round(popt1[1], 2)}')
ax1.legend(loc='upper center')
bin_centers3 = bin_borders3[:-1] + np.diff(bin_borders3)/2
popt3, _ = curve_fit(gaussian, bin_centers3, bin_heights3, p0 = [250, 50])
X3 = np.linspace(bin_borders3[0], bin_borders3[-1], 10000)
ax3.plot(X3, gaussian(X3, *popt3), label=f'μ = {round(popt3[0])}, σ = {round(popt3[1], 2)}')
ax3.legend(loc='upper center')
end = time()
fig.savefig('plots/' + 'firesizepds')
print(f'Time elapsed: {round((end - start), 2)} seconds')
return popt1, popt3
def plot_waitingtimespd_multi(forest1, forest2, forest3, t, N):
"""
Multiple plots of the probability distribution for waiting times
between fires in individual sites
"""
start = time()
forest1.step(t+N)
forest2.step(t+N)
forest3.step(t+N)
w_history1 = forest1.w_history[-100000:]
w_history2 = forest2.w_history
w_history3 = forest3.w_history[-500000:]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4), dpi=144)
ax1.title.set_text(f'f = {forest1.f}, p = {forest1.p}')
ax2.title.set_text(f'f = {forest2.f}, p = {forest2.p}')
ax3.title.set_text(f'f = {forest3.f}, p = {forest3.p}')
ax1.xaxis.set_ticks(range(2, 13, 1))
ax1.set_xlim(left=2, right=12)
ax2.set_xlim(right=60)
ax3.set_xlim(right=600)
weights1 = np.ones(len(w_history1))/len(w_history1)
ax1.hist(w_history1, weights=weights1, bins=100)
ax2.hist(w_history2, density=True, bins='auto')
ax3.hist(w_history3, density=True, bins='auto')
end = time()
fig.savefig('plots/' + 'waitingtimespds')
print(f'Time elapsed: {round((end - start), 2)} seconds')
def calc_steadystate(f, p):
"""Calculate steady state fractions"""
fp1 = f*(p + 1)
root = np.sqrt(fp1**2 + 10*p*fp1 + 9*p**2)
#root = np.sqrt((fp1 + 9*p)*(fp1 + p))
x_r = (3*p - fp1 + root)/(8*(p + 1))
#x_g = (5*p + fp1 - root)/(8*p)
x_g = 1 - (p + 1)*x_r/p
return x_r, x_g
| 37.543956
| 96
| 0.63647
|
4a0e17b0923ba0f74bf85bc55ae59de364af6882
| 11,238
|
py
|
Python
|
sdk/turing/ensembler.py
|
ashwinath/turing
|
dcaf3132cea4a511b4b2cd3466096cac3914b0a5
|
[
"Apache-2.0"
] | 57
|
2020-10-21T10:36:11.000Z
|
2022-03-11T17:50:25.000Z
|
sdk/turing/ensembler.py
|
ashwinath/turing
|
dcaf3132cea4a511b4b2cd3466096cac3914b0a5
|
[
"Apache-2.0"
] | 82
|
2020-10-21T08:24:51.000Z
|
2022-03-31T07:08:29.000Z
|
sdk/turing/ensembler.py
|
ashwinath/turing
|
dcaf3132cea4a511b4b2cd3466096cac3914b0a5
|
[
"Apache-2.0"
] | 18
|
2020-10-21T08:07:50.000Z
|
2022-03-20T18:20:54.000Z
|
import abc
from typing import Optional, Union, List, Any, Dict
import mlflow
import numpy
import pandas
import turing.generated.models
from turing.generated.models import EnsemblerType
from turing._base_types import ApiObject, ApiObjectSpec
from turing.batch import EnsemblingJob
from turing.batch.config import EnsemblingJobConfig
class EnsemblerBase(abc.ABC):
@abc.abstractmethod
def ensemble(
self,
features: pandas.Series,
predictions: pandas.Series,
treatment_config: Optional[dict]) -> Any:
"""
Ensembler should have an ensemble method, that implements the logic on how to
ensemble final prediction results from individual model predictions and a treatment
configuration.
:param features: pandas.Series, containing a single row with input features
:param predictions: pandas.Series, containing a single row with all models predictions
`predictions['model-a']` will contain prediction results from the model-a
:param treatment_config: dictionary, representing the configuration of a treatment,
that should be applied to a given record. If the experiment engine is not configured
for this Batch experiment, then `treatment_config` will be `None`
:returns ensembling result (one of str, int, float, double or array)
"""
pass
class PyFunc(EnsemblerBase, mlflow.pyfunc.PythonModel, abc.ABC):
"""
Abstract implementation of PyFunc Ensembler.
It leverages the contract of mlflow's PythonModel and implements its `predict` method.
"""
PREDICTION_COLUMN_PREFIX = '__predictions__'
def load_context(self, context):
self.initialize(context.artifacts)
@abc.abstractmethod
def initialize(self, artifacts: dict):
"""
Implementation of EnsemblerBase can specify initialization step which
will be called one time during model initialization.
:param artifacts: dictionary of artifacts passed to log_component method
"""
pass
def predict(self, context, model_input: pandas.DataFrame) -> \
Union[numpy.ndarray, pandas.Series, pandas.DataFrame]:
prediction_columns = {
col: col[len(PyFunc.PREDICTION_COLUMN_PREFIX):]
for col in model_input.columns if col.startswith(PyFunc.PREDICTION_COLUMN_PREFIX)
}
return model_input \
.rename(columns=prediction_columns) \
.apply(lambda row:
self.ensemble(
features=row.drop(prediction_columns.values()),
predictions=row[prediction_columns.values()],
treatment_config=None
), axis=1, result_type='expand')
@ApiObjectSpec(turing.generated.models.Ensembler)
class Ensembler(ApiObject):
"""
API entity for Ensembler
"""
def __init__(self, name: str, type: EnsemblerType, project_id: int = None, **kwargs):
super(Ensembler, self).__init__(**kwargs)
self._project_id = project_id
self._name = name
self._type = type
@property
def name(self) -> str:
return self._name
@property
def type(self) -> 'EnsemblerType':
return self._type
@property
def project_id(self) -> int:
return self._project_id
@classmethod
def list(
cls,
ensembler_type: Optional[EnsemblerType] = None,
page: Optional[int] = None,
page_size: Optional[int] = None) -> List['Ensembler']:
"""
List ensemblers saved in the active project
:param ensembler_type: (optional) filter ensemblers by type
:param page: (optional) pagination parameters – page number
:param page_size: (optional) pagination parameters - page size
:return: list of ensemblers
"""
response = turing.active_session.list_ensemblers(
ensembler_type=ensembler_type,
page=page,
page_size=page_size
)
return [Ensembler.from_open_api(item) for item in response.results]
@ApiObjectSpec(turing.generated.models.PyFuncEnsembler)
class PyFuncEnsembler(Ensembler):
"""
API entity for PyFuncEnsembler
"""
TYPE = EnsemblerType("pyfunc")
DEFAULT_ENSEMBLER_PATH = "ensembler"
def __init__(
self,
mlflow_experiment_id: int = None,
mlflow_run_id: str = None,
artifact_uri: str = None,
**kwargs):
kwargs.pop('type', None)
super(PyFuncEnsembler, self).__init__(type=PyFuncEnsembler.TYPE, **kwargs)
self._mlflow_experiment_id = mlflow_experiment_id
self._mlflow_run_id = mlflow_run_id
self._artifact_uri = artifact_uri
@property
def mlflow_experiment_id(self) -> int:
return self._mlflow_experiment_id
@mlflow_experiment_id.setter
def mlflow_experiment_id(self, mlflow_experiment_id: int):
self._mlflow_experiment_id = mlflow_experiment_id
@property
def mlflow_run_id(self) -> str:
return self._mlflow_run_id
@mlflow_run_id.setter
def mlflow_run_id(self, mlflow_run_id):
self._mlflow_run_id = mlflow_run_id
@property
def artifact_uri(self) -> str:
return self._artifact_uri
@artifact_uri.setter
def artifact_uri(self, artifact_uri):
self._artifact_uri = artifact_uri
@classmethod
def _experiment_name(cls, project_name: str, ensembler_name: str) -> str:
return f"{project_name}/ensemblers/{ensembler_name}"
def _save(self):
self.__dict__.update(
PyFuncEnsembler.from_open_api(
turing.active_session.update_ensembler(self.to_open_api())
).__dict__
)
def update(
self,
name: Optional[str] = None,
ensembler_instance: Optional[EnsemblerBase] = None,
conda_env: Optional[Union[str, Dict[str, Any]]] = None,
code_dir: Optional[List[str]] = None,
artifacts: Optional[Dict[str, str]] = None):
"""
Update existing pyfunc ensembler. Ensembler's data will be updated in-place
:param name: (optional) new name
:param ensembler_instance: (optional) updated implementation of the ensembler
:param conda_env: (optional) either a dictionary representation of a Conda
environment or the path to a Conda environment yaml file
:param code_dir: (optional) a list of local filesystem paths to Python file dependencies
(or directories containing file dependencies). These files are prepended to the
system path before the ensembler is loaded
:param artifacts: (optional) dictionary of artifact that will be stored together
with the model. This will be passed to turing.ensembler.PyFunc.initialize().
Example: {"config" : "config/staging.yaml"}
"""
if name:
self._name = name
if ensembler_instance:
project_name = turing.active_session.active_project.name
mlflow.set_experiment(experiment_name=self._experiment_name(project_name, self.name))
mlflow.start_run()
mlflow.pyfunc.log_model(
PyFuncEnsembler.DEFAULT_ENSEMBLER_PATH,
python_model=ensembler_instance,
conda_env=conda_env,
code_path=code_dir,
artifacts=artifacts,
)
run = mlflow.active_run()
self.mlflow_experiment_id = int(run.info.experiment_id)
self.mlflow_run_id = run.info.run_id
self.artifact_uri = mlflow.get_artifact_uri()
mlflow.end_run()
self._save()
def submit_job(self, job_config: EnsemblingJobConfig) -> 'EnsemblingJob':
"""
Submit a batch ensembling jobs
:param job_config: ensembling job configuration
:return: instance of a submitted ensembling job
"""
return EnsemblingJob.submit(self.id, job_config)
@classmethod
def get_by_id(cls, ensembler_id: int) -> 'PyFuncEnsembler':
"""
Get the instance of a pyfunc ensembler with given ID
:param ensembler_id:
:return: instance of pyfunc ensembler
"""
return PyFuncEnsembler.from_open_api(
turing.active_session.get_ensembler(ensembler_id))
@classmethod
def list(
cls,
page: Optional[int] = None,
page_size: Optional[int] = None,
**kwargs) -> List['PyFuncEnsembler']:
"""
List pyfunc ensemblers saved in the active project
:param page: (optional) pagination parameters – page number
:param page_size: (optional) pagination parameters - page size
:return: list of pyfunc ensemblers
"""
response = turing.active_session.list_ensemblers(
ensembler_type=PyFuncEnsembler.TYPE,
page=page,
page_size=page_size
)
return [PyFuncEnsembler.from_open_api(item) for item in response.results]
@classmethod
def create(
cls,
name: str,
ensembler_instance: PyFunc,
conda_env: Union[str, Dict[str, Any]],
code_dir: Optional[List[str]] = None,
artifacts: Dict[str, str] = None,
) -> 'PyFuncEnsembler':
"""
Save new pyfunc ensembler in the active project
:param name: ensembler's name. Must be unique among all ensemblers within the project
:param ensembler_instance: implementation of PyFunc ensembler
:param conda_env: either a dictionary representation of a Conda environment or
the path to a Conda environment yaml file
:param code_dir: (optional) a list of local filesystem paths to Python file dependencies
(or directories containing file dependencies). These files are prepended to the
system path before the ensembler is loaded
:param artifacts: dictionary of artifact that will be stored together with the model.
This will be passed to turing.ensembler.PyFunc.initialize().
Example: {"config" : "config/staging.yaml"}
:return: saved instance of PyFuncEnsembler
"""
project_name = turing.active_session.active_project.name
mlflow.set_experiment(experiment_name=cls._experiment_name(project_name, name))
mlflow.start_run()
mlflow.pyfunc.log_model(
PyFuncEnsembler.DEFAULT_ENSEMBLER_PATH,
python_model=ensembler_instance,
conda_env=conda_env,
code_path=code_dir,
artifacts=artifacts,
)
run = mlflow.active_run()
ensembler = PyFuncEnsembler(
name=name,
mlflow_experiment_id=int(run.info.experiment_id),
mlflow_run_id=run.info.run_id,
artifact_uri=mlflow.get_artifact_uri()
)
mlflow.end_run()
return PyFuncEnsembler.from_open_api(
turing.active_session.create_ensembler(ensembler.to_open_api()))
| 35.451104
| 100
| 0.644421
|
4a0e18631da8725fbc5035ca5b80b3936dd8761d
| 1,012
|
py
|
Python
|
vsts/vsts/core/v4_1/models/web_api_connected_service_ref.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/core/v4_1/models/web_api_connected_service_ref.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/core/v4_1/models/web_api_connected_service_ref.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class WebApiConnectedServiceRef(Model):
"""WebApiConnectedServiceRef.
:param id:
:type id: str
:param url:
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, url=None):
super(WebApiConnectedServiceRef, self).__init__()
self.id = id
self.url = url
| 33.733333
| 94
| 0.466403
|
4a0e18d27a2421621712689ef4baf5f36721245c
| 508
|
py
|
Python
|
tests/conftest.py
|
w0rmr1d3r/cloner
|
a7c00f6006504119d764ab6bd0aa5830f1bc8779
|
[
"MIT"
] | 5
|
2021-11-04T11:55:23.000Z
|
2022-01-16T01:28:51.000Z
|
tests/conftest.py
|
w0rmr1d3r/cloner
|
a7c00f6006504119d764ab6bd0aa5830f1bc8779
|
[
"MIT"
] | 1
|
2022-03-01T06:05:45.000Z
|
2022-03-01T06:05:45.000Z
|
tests/conftest.py
|
w0rmr1d3r/cloner
|
a7c00f6006504119d764ab6bd0aa5830f1bc8779
|
[
"MIT"
] | null | null | null |
import queue
import threading
import pytest
from cloner.repository import Repository
@pytest.fixture
def random_repository(faker) -> Repository:
name = faker.first_name()
return Repository(
name=name,
clone_url=f"https://github.com/organization/{name}.git",
repo_id=faker.random_number(digits=4, fix_len=False),
)
@pytest.fixture
def queue_lock() -> threading.Lock:
return threading.Lock()
@pytest.fixture
def repository_list_queue():
return queue.Queue()
| 18.814815
| 64
| 0.714567
|
4a0e18ee5724a84fd408fe4ad8b3e51813a88761
| 700
|
py
|
Python
|
examples/scripts/01_site.py
|
buildfail/frontera
|
84f9e1034d2868447db88e865596c0fbb32e70f6
|
[
"BSD-3-Clause"
] | 1,267
|
2015-04-15T04:47:12.000Z
|
2022-03-29T07:55:15.000Z
|
examples/scripts/01_site.py
|
buildfail/frontera
|
84f9e1034d2868447db88e865596c0fbb32e70f6
|
[
"BSD-3-Clause"
] | 316
|
2015-04-14T21:28:26.000Z
|
2021-05-31T05:31:15.000Z
|
examples/scripts/01_site.py
|
buildfail/frontera
|
84f9e1034d2868447db88e865596c0fbb32e70f6
|
[
"BSD-3-Clause"
] | 250
|
2015-04-20T07:15:10.000Z
|
2022-03-28T15:17:15.000Z
|
"""
Graph manager example with single site
"""
from frontera import graphs
SITE = [
("A", ["B", "C"]),
("B", ["D", "E"]),
("C", ["F", "G"]),
]
SITE_WITH_STATUS_CODES = [
((200, "A"), ["B", "C"]),
((404, "B"), ["D", "E"]),
((500, "C"), ["F", "G"]),
]
def test_site(site):
# Create graph
graph = graphs.Manager()
# Add site to graph
graph.add_site(site)
# Show graph pages
print '-'*80
for page in graph.pages:
print page, page.status
# Show single page
a_page = graph.get_page("A")
print a_page.url, [link.url for link in a_page.links]
if __name__ == '__main__':
test_site(SITE)
test_site(SITE_WITH_STATUS_CODES)
| 18.421053
| 57
| 0.545714
|
4a0e194d35f4699266b824a64b66cfecd47a380b
| 107
|
py
|
Python
|
example/settings_file/app.py
|
rroden12/dynaconf
|
9495fdd2145dfc93a9af700c104f7841e52221b2
|
[
"MIT"
] | 2,293
|
2015-08-14T22:39:31.000Z
|
2022-03-31T12:44:49.000Z
|
example/settings_file/app.py
|
rroden12/dynaconf
|
9495fdd2145dfc93a9af700c104f7841e52221b2
|
[
"MIT"
] | 676
|
2015-08-20T19:29:56.000Z
|
2022-03-31T13:45:51.000Z
|
example/settings_file/app.py
|
rroden12/dynaconf
|
9495fdd2145dfc93a9af700c104f7841e52221b2
|
[
"MIT"
] | 255
|
2015-12-02T21:16:33.000Z
|
2022-03-20T22:03:46.000Z
|
from dynaconf import settings
assert settings.MESSAGE == "Hello from tmp"
print(settings.MESSAGE) # noqa
| 21.4
| 43
| 0.775701
|
4a0e19cf3c23aa24f6a622613a501a62fb5b822c
| 25,730
|
py
|
Python
|
mvpa2/misc/support.py
|
thomastweets/PyMVPA
|
a9c05acd7569639bb636aed3c22a13b21559ca02
|
[
"MIT"
] | 1
|
2016-08-23T05:04:09.000Z
|
2016-08-23T05:04:09.000Z
|
mvpa2/misc/support.py
|
thomastweets/PyMVPA
|
a9c05acd7569639bb636aed3c22a13b21559ca02
|
[
"MIT"
] | null | null | null |
mvpa2/misc/support.py
|
thomastweets/PyMVPA
|
a9c05acd7569639bb636aed3c22a13b21559ca02
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Support function -- little helpers in everyday life"""
__docformat__ = 'restructuredtext'
import itertools
import math
import random
import re, os, sys
# for SmartVersion
from distutils.version import Version
import numpy as np
#import numpy.random as npr
from mvpa2.base import warning
from mvpa2.support.copy import copy, deepcopy
from mvpa2.base.types import is_sequence_type
if __debug__:
from mvpa2.base import debug
##REF: Name was automagically refactored
def reuse_absolute_path(file1, file2, force=False):
"""Use path to file1 as the path to file2 is no absolute
path is given for file2
Parameters
----------
force : bool
if True, force it even if the file2 starts with /
"""
if not file2.startswith(os.path.sep) or force:
# lets reuse path to file1
return os.path.join(os.path.dirname(file1), file2.lstrip(os.path.sep))
else:
return file2
##REF: Name was automagically refactored
def transform_with_boxcar(data, startpoints, boxlength, offset=0, fx=np.mean):
"""This function extracts boxcar windows from an array. Such a boxcar is
defined by a starting point and the size of the window along the first axis
of the array (`boxlength`). Afterwards a customizable function is applied
to each boxcar individually (Default: averaging).
:param data: An array with an arbitrary number of dimensions.
:type data: array
:param startpoints: Boxcar startpoints as index along the first array axis
:type startpoints: sequence
:param boxlength: Length of the boxcar window in #array elements
:type boxlength: int
:param offset: Optional offset between the configured starting point and the
actual begining of the boxcar window.
:type offset: int
:rtype: array (len(startpoints) x data.shape[1:])
"""
if boxlength < 1:
raise ValueError, "Boxlength lower than 1 makes no sense."
# check for illegal boxes
for sp in startpoints:
if ( sp + offset + boxlength - 1 > len(data)-1 ) \
or ( sp + offset < 0 ):
raise ValueError, \
'Illegal box: start: %i, offset: %i, length: %i' \
% (sp, offset, boxlength)
# build a list of list where each sublist contains the indexes of to be
# averaged data elements
selector = [ range( i + offset, i + offset + boxlength ) \
for i in startpoints ]
# average each box
selected = [ fx( data[ np.array(box) ], axis=0 ) for box in selector ]
return np.array( selected )
def xunique_combinations(L, n):
"""Generator of unique combinations form a list L of objects in
groups of size n.
Parameters
----------
L : list
list of unique ids
n : int
grouping size
Adopted from Li Daobing
http://code.activestate.com/recipes/190465/
(MIT license, according to activestate.com's policy)
Also good discussions on combinations/variations/permutations
with various implementations are available at
http://mail.python.org/pipermail/python-list/2004-October/286054.html
"""
if n == 0:
yield []
else:
for i in xrange(len(L)-n+1):
for cc in xunique_combinations(L[i+1:], n-1):
yield [L[i]]+cc
def __xrandom_unique_combinations(L, n, k=None):
"""Generator of unique combinations form a list L of objects in
groups of size n produced in random order
"Elegant" but incorrect since pretty much samples the "tail"
Parameters
----------
L : list
list of unique ids
n : int
grouping size
k : int or None, optional
limit number of combinations. All of combinations are produced
if k is None (default)
Based on xunique_combinations adopted from Li Daobing
http://code.activestate.com/recipes/190465/
(MIT license, according to activestate.com's policy)
"""
if k is not None:
# Just a helper for convenient limiting
g = xrandom_unique_combinations(L, n)
for i in xrange(k):
yield next(g)
elif n == 0:
yield []
else:
for i in npr.permutation(len(L)-n+1):
for cc in xrandom_unique_combinations(
npr.permutation(L[i+1:]), n-1):
yield [L[i]]+cc
def ncombinations(n, k):
"""
A fast way to calculate binomial coefficients by Andrew Dalke
Source: http://stackoverflow.com/questions/3025162/statistics-combinations-in-python/3025194
Alternative implementations:
scipy.misc.comb() -- approximation
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
def xrandom_unique_combinations(L, n, k=None):
"""Generator of unique combinations form a list L of objects in
groups of size n produced in random order
Parameters
----------
L : list
list of unique ids
n : int
grouping size
k : int or None
limit number of combinations. All of combinations are produced
if k is None (default)
"""
ncomb = ncombinations(len(L), n)
if k is None:
k = ncomb
if (ncomb < 1e6 or k > math.sqrt(ncomb)) \
and sys.version_info[:2] >= (2, 6):
# so there is no sense really to mess with controlling for
# non-repeats -- we can pre-generate all of them and just
# choose needed number of random samples
# Python2.5 doesn't have itertools.combinations
for s in random.sample(list(itertools.combinations(L, n)), k):
yield list(s)
else:
# Let's cycle through permutations while tracking
# repeats
seen = set()
indexes = range(len(L)) # switch to indices so we could
# reliably hash them
while len(seen) < min(k, ncomb):
np.random.shuffle(indexes)
sample = tuple(sorted(indexes[:n]))
if not (sample in seen):
yield [L[x] for x in sample]
seen.add(sample)
def unique_combinations(L, n, sort=False):
"""Return unique combinations form a list L of objects in groups of size n.
Parameters
----------
L : list
list of unique ids
n : int
length of the subsets to return
sort : bool, optional
if True -- result is sorted before returning
If you are intended to use only a small subset of possible
combinations, it is advised to use a generator
`xunique_combinations`.
"""
res = list(xunique_combinations(L, n))
if sort:
res = sorted(res)
return res
##REF: Name was automagically refactored
def indent_doc(v):
"""Given a `value` returns a string where each line is indented
Needed for a cleaner __repr__ output
`v` - arbitrary
"""
return re.sub('\n', '\n ', str(v))
def idhash(val):
"""Craft unique id+hash for an object
"""
res = "%s" % id(val)
if isinstance(val, list):
val = tuple(val)
elif isinstance(val, dict):
val = tuple(val.items())
try:
if sys.version_info[0] >= 3:
# TODO: bytes is just a workaround and is slower
# Anyway -- research joblib for hashing
res += ":%s" % hash(bytes(val))
else:
res += ":%s" % hash(buffer(val))
except:
try:
res += ":%s" % hash(val)
except:
pass
pass
return res
##REF: Name was automagically refactored
def is_sorted(items):
"""Check if listed items are in sorted order.
Parameters
----------
`items`: iterable container
:return: `True` if were sorted. Otherwise `False` + Warning
"""
items_sorted = deepcopy(items)
items_sorted.sort()
equality = items_sorted == items
# XXX yarik forgotten analog to isiterable
if hasattr(equality, '__iter__'):
equality = np.all(equality)
return equality
##REF: Name was automagically refactored
def is_in_volume(coord, shape):
"""For given coord check if it is within a specified volume size.
Returns True/False. Assumes that volume coordinates start at 0.
No more generalization (arbitrary minimal coord) is done to save
on performance
"""
for i in xrange(len(coord)):
if coord[i] < 0 or coord[i] >= shape[i]:
return False
return True
def array_whereequal(a, x):
"""Reliable comparison for `numpy.ndarray`
`numpy.ndarray` (as of 1.5.0.dev) fails to compare tuples in array of
dtype object, e.g.
>>> import numpy as np; a=np.array([1, (0,1)], dtype=object); print a == (0,1), a[1] == (0,1)
[False False] True
This function checks if dtype is object and just does list
comprehension in that case
"""
if a.dtype is np.dtype('object'):
return np.array([i==x for i in a], dtype=bool)
else:
return a == x
def version_to_tuple(v):
"""Convert literal string into a tuple, if possible of ints
Tuple of integers constructed by splitting at '.' or interleaves
of numerics and alpha numbers
"""
if isinstance(v, basestring):
v = v.split('.')
elif isinstance(v, tuple) or isinstance(v, list):
# assure tuple
pass
else:
raise ValueError, "Do not know how to treat version '%s'" % str(v)
# Try to convert items into ints
vres = []
regex = re.compile('(?P<numeric>[0-9]*)'
'(?P<alpha>[~+-]*[A-Za-z]*)(?P<suffix>.*)')
for x in v:
try:
vres += [int(x)]
except ValueError:
# try to split into sequences of literals and numerics
suffix = x
while suffix != '':
res = regex.search(suffix)
if res:
resd = res.groupdict()
if resd['numeric'] != '':
vres += [int(resd['numeric'])]
if resd['alpha'] != '':
vres += [resd['alpha']]
suffix = resd['suffix']
else:
# We can't detech anything meaningful -- let it go as is
resd += [suffix]
break
v = tuple(vres)
return v
class SmartVersion(Version):
"""A bit evolved comparison of versions
The reason for not using python's distutil.version is that it
seems to have no clue about somewhat common conventions of using
'-dev' or 'dev' or 'rc' suffixes for upcoming releases (so major
version does contain upcoming release already).
So here is an ad-hoc and not as nice implementation
"""
def parse(self, vstring):
self.vstring = vstring
self.version = version_to_tuple(vstring)
def __str__(self):
try:
return self.vstring
except AttributeError:
# Version.__init__ doesn't take care about assigning
# .vstring if None is given, so let's just treat as it is
# an empty string
return ""
def __cmp__(self, other):
if isinstance(other, (str, tuple, list)):
other = SmartVersion(other)
elif isinstance(other, SmartVersion):
pass
elif isinstance(other, Version):
other = SmartVersion(other.vstring)
else:
raise ValueError("Do not know how to treat version %s"
% str(other))
if sys.version >= '3':
def cmp(a, b):
"""Compatibility with Python3 -- regular (deprecated
in 3) cmp operation should be sufficient for our needs"""
return (a > b) - (a < b)
else:
# having above cmp overloads builtin cmp for this function so we
# need manually rebind it or just resort to above cmp in general
# (why not?)
from __builtin__ import cmp
# Do ad-hoc comparison of strings
i = 0
s, o = self.version, other.version
regex_prerelease = re.compile('~|-?dev|-?rc|-?svn|-?pre|-?beta|-?alpha', re.I)
for i in xrange(max(len(s), len(o))):
if i < len(s): si = s[i]
else: si = None
if i < len(o): oi = o[i]
else: oi = None
if si == oi:
continue
for x,y,mult in ((si, oi, 1), (oi, si, -1)):
if x is None:
if isinstance(y, int):
return -mult # we got '.1' suffix
if isinstance(y, str):
if (regex_prerelease.match(y)):
return mult # so we got something to signal
# pre-release, so first one won
else:
# otherwise the other one wins
return -mult
else:
raise RuntimeError, "Should not have got here with %s" \
% y
elif isinstance(x, int):
if not isinstance(y, int):
return mult
return mult*cmp(x, y) # both are ints
elif isinstance(x, str):
if isinstance(y, str):
return mult*cmp(x,y)
return 0
if sys.version >= '3':
# version.py logic in python3 does not rely on deprecated
# __cmp__ but renames it into _cmp and wraps in those various
# comparators... thus our good old __cmp__ should be ok for our
# purposes here
_cmp = __cmp__
##REF: Name was automagically refactored
def get_break_points(items, contiguous=True):
"""Return a list of break points.
Parameters
----------
items : iterable
list of items, such as chunks
contiguous : bool
if `True` (default) then raise Value Error if items are not
contiguous, i.e. a label occur in multiple contiguous sets
:raises: ValueError
:return: list of indexes for every new set of items
"""
prev = None # pylint happiness event!
known = []
"""List of items which was already seen"""
result = []
"""Resultant list"""
for index in xrange(len(items)):
item = items[index]
if item in known:
if index > 0:
if prev != item: # breakpoint
if contiguous:
raise ValueError, \
"Item %s was already seen before" % str(item)
else:
result.append(index)
else:
known.append(item)
result.append(index)
prev = item
return result
##REF: Name was automagically refactored
def rfe_history_to_maps(history):
"""Convert history generated by RFE into the array of binary maps
Example:
history2maps(np.array( [ 3,2,1,0 ] ))
results in
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 0.],
[ 1., 1., 0., 0.],
[ 1., 0., 0., 0.]])
"""
# assure that it is an array
history = np.array(history)
nfeatures, steps = len(history), max(history) - min(history) + 1
history_maps = np.zeros((steps, nfeatures))
for step in xrange(steps):
history_maps[step, history >= step] = 1
return history_maps
class MapOverlap(object):
"""Compute some overlap stats from a sequence of binary maps.
When called with a sequence of binary maps (e.g. lists or arrays) the
fraction of mask elements that are non-zero in a customizable proportion
of the maps is returned. By default this threshold is set to 1.0, i.e.
such an element has to be non-zero in *all* maps.
Three additional maps (same size as original) are computed:
* overlap_map: binary map which is non-zero for each overlapping element.
* spread_map: binary map which is non-zero for each element that is
non-zero in any map, but does not exceed the overlap
threshold.
* ovstats_map: map of float with the raw elementwise fraction of overlap.
All maps are available via class members.
"""
def __init__(self, overlap_threshold=1.0):
"""Nothing to be seen here.
"""
self.__overlap_threshold = overlap_threshold
# pylint happiness block
self.overlap_map = None
self.spread_map = None
self.ovstats_map = None
def __call__(self, maps):
"""Returns fraction of overlapping elements.
"""
ovstats = np.mean(maps, axis=0)
self.overlap_map = (ovstats >= self.__overlap_threshold )
self.spread_map = np.logical_and(ovstats > 0.0,
ovstats < self.__overlap_threshold)
self.ovstats_map = ovstats
return np.mean(ovstats >= self.__overlap_threshold)
class Event(dict):
"""Simple class to define properties of an event.
The class is basically a dictionary. Any properties can
be passed as keyword arguments to the constructor, e.g.:
>>> ev = Event(onset=12, duration=2.45)
Conventions for keys:
`onset`
The onset of the event in some unit.
`duration`
The duration of the event in the same unit as `onset`.
`label`
E.g. the condition this event is part of.
`chunk`
Group this event is part of (if any), e.g. experimental run.
`features`
Any amount of additional features of the event. This might include
things like physiological measures, stimulus intensity. Must be a mutable
sequence (e.g. list), if present.
"""
_MUSTHAVE = ['onset']
def __init__(self, **kwargs):
"""
Parameters
----------
**kwargs : dict
All keys to describe the Event to initialize its dict.
"""
# store everything
dict.__init__(self, **kwargs)
# basic checks
for k in Event._MUSTHAVE:
if not self.has_key(k):
raise ValueError, "Event must have '%s' defined." % k
##REF: Name was automagically refactored
def as_descrete_time(self, dt, storeoffset=False, offsetattr='offset'):
"""Convert `onset` and `duration` information into descrete timepoints.
Parameters
----------
dt : float
Temporal distance between two timepoints in the same unit as `onset`
and `duration`.
storeoffset : bool
If True, the temporal offset between original `onset` and
descretized onset is stored as an additional item.
offsetattr : str
The name of the attribute that is used to store the computed offset
in case the `storeoffset` is enabled.
Returns
-------
A copy of the original `Event` with `onset` and optionally `duration`
replaced by their corresponding descrete timepoint. The new onset will
correspond to the timepoint just before or exactly at the original
onset. The new duration will be the number of timepoints covering the
event from the computed onset timepoint till the timepoint exactly at
the end, or just after the event.
Note again, that the new values are expressed as #timepoint and not
in their original unit!
"""
dt = float(dt)
onset = self['onset']
out = copy(self)
# get the timepoint just prior the onset
out['onset'] = int(np.floor(onset / dt))
if storeoffset:
# compute offset
offset = onset - (out['onset'] * dt)
out[offsetattr] = offset
if out.has_key('duration'):
# how many timepoint cover the event (from computed onset
# to the one timepoint just after the end of the event
out['duration'] = int(np.ceil((onset + out['duration']) / dt) \
- out['onset'])
return out
def value2idx(val, x, solv='round'):
"""Convert a value into an index of the closes matching array element.
Parameters
----------
val : scalar
Value that is to be converted.
x : array or sequence
One-dimensional array whose elements are used for comparision.
solv : {'round', 'floor', 'ceil'}
Resolver strategie: absolute closest element (round), closest smaller
element (floor), or closest larger element (ceil).
Returns
-------
int
"""
# distance to val
x = np.asanyarray(x) - val
if solv == 'round':
pass
elif solv == 'ceil':
x[x<0] = np.inf
elif solv == 'floor':
x[x>0] = np.inf
else:
ValueError("Unkown resolving method '%s'." % solv)
x = np.abs(x)
idx = np.argmin(x)
return idx
def mask2slice(mask):
"""Convert a boolean mask vector into an equivalent slice (if possible).
Parameters
----------
mask: boolean array
The mask.
Returns
-------
slice or boolean array
If possible the boolean mask is converted into a `slice`. If this is not
possible the unmodified boolean mask is returned.
"""
# the filter should be a boolean array
# TODO Could be easily extended to also accept index arrays
if not len(mask):
raise ValueError("Got an empty mask.")
# get indices of non-zero filter elements
idx = mask.nonzero()[0]
if not len(idx):
return slice(0)
idx_start = idx[0]
idx_end = idx[-1] + 1
idx_step = None
if len(idx) > 1:
# we need to figure out if there is a regular step-size
# between elements
stepsizes = np.unique(idx[1:] - idx[:-1])
if len(stepsizes) > 1:
# multiple step-sizes -> slicing is not possible -> return
# orginal filter
return mask
else:
idx_step = stepsizes[0]
sl = slice(idx_start, idx_end, idx_step)
if __debug__:
debug("SPL", "Boolean mask conversion to slice is possible (%s)." % sl)
return sl
def get_limit_filter(limit, collection):
"""Create a filter array from a limit definition.
Parameters
-----------
limit : None or str or dict
If ``None`` all elements will be included in the filter. If an single
attribute name is given, its unique values will be used to define
chunks of data that are marked in the filter as unique integers. Finally,
if a dictionary is provided, its keys define attribute names and its
values (single value or sequence thereof) attribute value, where all
key-value combinations across all given items define a "selection" of
elements to be included in the filter (OR combination).
collection : Collection
Dataset attribute collection instance that contains all attributes
referenced in the limit specification, as well as defines the shape of
the filter.
Returns
-------
array
This array is either boolean, where a `True` elements represent including
in the filter, or the array is numerical, where it unique integer values
defines individual chunks of a filter.
"""
attr_length = collection.attr_length
if limit is None:
# no limits
limit_filter = np.ones(attr_length, dtype='bool')
elif isinstance(limit, str):
# use the unique values of this attribute to permute each chunk
# individually
lattr = collection[limit]
lattr_data = lattr.value
limit_filter = np.zeros(attr_length, dtype='int')
for i, uv in enumerate(lattr.unique):
limit_filter[lattr_data == uv] = i
elif isinstance(limit, dict):
limit_filter = np.zeros(attr_length, dtype='bool')
for a in limit:
if is_sequence_type(limit[a]):
for v in limit[a]:
# enable the samples matching the value 'v' of the
# current limit attribute 'a'
limit_filter[collection[a].value == v] = True
else:
limit_filter[collection[a].value == limit[a]] = True
else:
raise RuntimeError("Unhandle condition")
return limit_filter
def get_nelements_per_value(data):
"""Returns the number of elements per unique value of some sequence.
Parameters
----------
data : sequence
This can be any sequence. In addition also ArrayCollectables are supported
and this function will make use of any available pre-cached list of unique
values.
Returns
-------
dict with the number of elements (value) per unique value (key) in the
sequence.
"""
if hasattr(data, 'unique'):
# if this is an ArrayAttribute save some time by using pre-cached unique
# values
uniquevalues = data.unique
values = data.value
else:
uniquevalues = np.unique(data)
values = data
# use dictionary to cope with arbitrary values
result = dict(zip(uniquevalues, [ 0 ] * len(uniquevalues)))
for l in values:
result[l] += 1
return result
| 32.122347
| 98
| 0.587175
|
4a0e1a3377976a9363bfd72112c1f365814bb8e8
| 17,453
|
py
|
Python
|
coverage/html.py
|
nascimento/coveragepy
|
9adb80c9612d3d364362bd3b6551532fe9ec7d5f
|
[
"Apache-2.0"
] | null | null | null |
coverage/html.py
|
nascimento/coveragepy
|
9adb80c9612d3d364362bd3b6551532fe9ec7d5f
|
[
"Apache-2.0"
] | null | null | null |
coverage/html.py
|
nascimento/coveragepy
|
9adb80c9612d3d364362bd3b6551532fe9ec7d5f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""HTML reporting for coverage.py."""
import collections
import datetime
import json
import os
import re
import shutil
import coverage
from coverage import env
from coverage.backward import iitems, SimpleNamespace
from coverage.data import add_data_to_hash
from coverage.files import flat_rootname
from coverage.misc import CoverageException, ensure_dir, file_be_gone, Hasher, isolate_module
from coverage.report import get_analysis_to_report
from coverage.results import Numbers
from coverage.templite import Templite
os = isolate_module(os)
# Static files are looked for in a list of places.
STATIC_PATH = [
# The place Debian puts system Javascript libraries.
"/usr/share/javascript",
# Our htmlfiles directory.
os.path.join(os.path.dirname(__file__), "htmlfiles"),
]
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that sub-directory.
"""
tried = []
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
else:
tried.append(static_filename)
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
else:
tried.append(static_filename)
raise CoverageException(
"Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
)
def read_data(fname):
"""Return the contents of a data file of ours."""
with open(data_filename(fname)) as data_file:
return data_file.read()
def write_html(fname, html):
"""Write `html` to `fname`, properly encoded."""
html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
with open(fname, "wb") as fout:
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
class HtmlDataGeneration(object):
"""Generate structured data to be turned into HTML reports."""
EMPTY = "(empty)"
def __init__(self, cov):
self.coverage = cov
self.config = self.coverage.config
data = self.coverage.get_data()
self.has_arcs = data.has_arcs()
if self.config.show_contexts:
if data.measured_contexts() == set([""]):
self.coverage._warn("No contexts were measured")
data.set_query_contexts(self.config.report_contexts)
def data_for_file(self, fr, analysis):
"""Produce the data needed for one file's report."""
if self.has_arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
arcs_executed = analysis.arcs_executed()
if self.config.show_contexts:
contexts_by_lineno = analysis.data.contexts_by_lineno(fr.filename)
else:
contexts_by_lineno = collections.defaultdict(list)
lines = []
for lineno, tokens in enumerate(fr.source_token_lines(), start=1):
# Figure out how to mark this line.
category = None
short_annotations = []
long_annotations = []
if lineno in analysis.excluded:
category = 'exc'
elif lineno in analysis.missing:
category = 'mis'
elif self.has_arcs and lineno in missing_branch_arcs:
category = 'par'
for b in missing_branch_arcs[lineno]:
if b < 0:
short_annotations.append("exit")
else:
short_annotations.append(b)
long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed))
elif lineno in analysis.statements:
category = 'run'
contexts = contexts_label = None
context_list = None
if category and self.config.show_contexts:
contexts = sorted(c or self.EMPTY for c in contexts_by_lineno[lineno])
if contexts == [self.EMPTY]:
contexts_label = self.EMPTY
else:
contexts_label = "{} ctx".format(len(contexts))
context_list = contexts
lines.append(SimpleNamespace(
tokens=tokens,
number=lineno,
category=category,
statement=(lineno in analysis.statements),
contexts=contexts,
contexts_label=contexts_label,
context_list=context_list,
short_annotations=short_annotations,
long_annotations=long_annotations,
))
file_data = SimpleNamespace(
relative_filename=fr.relative_filename(),
nums=analysis.numbers,
lines=lines,
)
return file_data
class HtmlReporter(object):
"""HTML reporting."""
# These files will be copied from the htmlfiles directory to the output
# directory.
STATIC_FILES = [
("style.css", ""),
("jquery.min.js", "jquery"),
("jquery.ba-throttle-debounce.min.js", "jquery-throttle-debounce"),
("jquery.hotkeys.js", "jquery-hotkeys"),
("jquery.isonscreen.js", "jquery-isonscreen"),
("jquery.tablesorter.min.js", "jquery-tablesorter"),
("coverage_html.js", ""),
("keybd_closed.png", ""),
("keybd_open.png", ""),
]
def __init__(self, cov):
self.coverage = cov
self.config = self.coverage.config
self.directory = self.config.html_dir
title = self.config.html_title
if env.PY2:
title = title.decode("utf8")
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
else:
self.extra_css = None
self.data = self.coverage.get_data()
self.has_arcs = self.data.has_arcs()
self.file_summaries = []
self.all_files_nums = []
self.incr = IncrementalChecker(self.directory)
self.datagen = HtmlDataGeneration(self.coverage)
self.totals = Numbers()
self.template_globals = {
# Functions available in the templates.
'escape': escape,
'pair': pair,
'len': len,
# Constants for this report.
'__url__': coverage.__url__,
'__version__': coverage.__version__,
'title': title,
'time_stamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M'),
'extra_css': self.extra_css,
'has_arcs': self.has_arcs,
'show_contexts': self.config.show_contexts,
# Constants for all reports.
# These css classes determine which lines are highlighted by default.
'category': {
'exc': 'exc show_exc',
'mis': 'mis show_mis',
'par': 'par run show_par',
'run': 'run',
}
}
self.pyfile_html_source = read_data("pyfile.html")
self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
def report(self, morfs):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or file names.
"""
# Read the status data and check that this run used the same
# global data as the last run.
self.incr.read()
self.incr.check_global_data(self.config, self.pyfile_html_source)
# Process all the files.
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.html_file(fr, analysis)
if not self.all_files_nums:
raise CoverageException("No data to report.")
self.totals = sum(self.all_files_nums)
# Write the index file.
self.index_file()
self.make_local_static_report_files()
return self.totals.n_statements and self.totals.pc_covered
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
for static, pkgdir in self.STATIC_FILES:
shutil.copyfile(
data_filename(static, pkgdir),
os.path.join(self.directory, static)
)
# The user may have extra CSS they want copied.
if self.extra_css:
shutil.copyfile(
self.config.extra_css,
os.path.join(self.directory, self.extra_css)
)
def html_file(self, fr, analysis):
"""Generate an HTML file for one source file."""
rootname = flat_rootname(fr.relative_filename())
html_filename = rootname + ".html"
ensure_dir(self.directory)
html_path = os.path.join(self.directory, html_filename)
# Get the numbers for this file.
nums = analysis.numbers
self.all_files_nums.append(nums)
if self.config.skip_covered:
# Don't report on 100% files.
no_missing_lines = (nums.n_missing == 0)
no_missing_branches = (nums.n_partial_branches == 0)
if no_missing_lines and no_missing_branches:
# If there's an existing file, remove it.
file_be_gone(html_path)
return
# Find out if the file on disk is already correct.
if self.incr.can_skip_file(self.data, fr, rootname):
self.file_summaries.append(self.incr.index_info(rootname))
return
# Write the HTML page for this file.
file_data = self.datagen.data_for_file(fr, analysis)
for ldata in file_data.lines:
# Build the HTML for the line.
html = []
for tok_type, tok_text in ldata.tokens:
if tok_type == "ws":
html.append(escape(tok_text))
else:
tok_html = escape(tok_text) or ' '
html.append(
u'<span class="{}">{}</span>'.format(tok_type, tok_html)
)
ldata.html = ''.join(html)
if ldata.short_annotations:
# 202F is NARROW NO-BREAK SPACE.
# 219B is RIGHTWARDS ARROW WITH STROKE.
ldata.annotate = u", ".join(
u"{} ↛ {}".format(ldata.number, d)
for d in ldata.short_annotations
)
else:
ldata.annotate = None
if ldata.long_annotations:
longs = ldata.long_annotations
if len(longs) == 1:
ldata.annotate_long = longs[0]
else:
ldata.annotate_long = u"{:d} missed branches: {}".format(
len(longs),
u", ".join(
u"{:d}) {}".format(num, ann_long)
for num, ann_long in enumerate(longs, start=1)
),
)
else:
ldata.annotate_long = None
css_classes = []
if ldata.category:
css_classes.append(self.template_globals['category'][ldata.category])
ldata.css_class = ' '.join(css_classes) or "pln"
html = self.source_tmpl.render(file_data.__dict__)
write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
'html_filename': html_filename,
'relative_filename': fr.relative_filename(),
}
self.file_summaries.append(index_info)
self.incr.set_index_info(rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(read_data("index.html"), self.template_globals)
html = index_tmpl.render({
'files': self.file_summaries,
'totals': self.totals,
})
write_html(os.path.join(self.directory, "index.html"), html)
# Write the latest hashes for next time.
self.incr.write()
class IncrementalChecker(object):
"""Logic and data to support incremental reporting."""
STATUS_FILE = "status.json"
STATUS_FORMAT = 2
# pylint: disable=wrong-spelling-in-comment,useless-suppression
# The data looks like:
#
# {
# "format": 2,
# "globals": "540ee119c15d52a68a53fe6f0897346d",
# "version": "4.0a1",
# "files": {
# "cogapp___init__": {
# "hash": "e45581a5b48f879f301c0f30bf77a50c",
# "index": {
# "html_filename": "cogapp___init__.html",
# "relative_filename": "cogapp/__init__",
# "nums": [ 1, 14, 0, 0, 0, 0, 0 ]
# }
# },
# ...
# "cogapp_whiteutils": {
# "hash": "8504bb427fc488c4176809ded0277d51",
# "index": {
# "html_filename": "cogapp_whiteutils.html",
# "relative_filename": "cogapp/whiteutils",
# "nums": [ 1, 59, 0, 1, 28, 2, 2 ]
# }
# }
# }
# }
def __init__(self, directory):
self.directory = directory
self.reset()
def reset(self):
"""Initialize to empty. Causes all files to be reported."""
self.globals = ''
self.files = {}
def read(self):
"""Read the information we stored last time."""
usable = False
try:
status_file = os.path.join(self.directory, self.STATUS_FILE)
with open(status_file) as fstatus:
status = json.load(fstatus)
except (IOError, ValueError):
usable = False
else:
usable = True
if status['format'] != self.STATUS_FORMAT:
usable = False
elif status['version'] != coverage.__version__:
usable = False
if usable:
self.files = {}
for filename, fileinfo in iitems(status['files']):
fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
self.files[filename] = fileinfo
self.globals = status['globals']
else:
self.reset()
def write(self):
"""Write the current status."""
status_file = os.path.join(self.directory, self.STATUS_FILE)
files = {}
for filename, fileinfo in iitems(self.files):
fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
files[filename] = fileinfo
status = {
'format': self.STATUS_FORMAT,
'version': coverage.__version__,
'globals': self.globals,
'files': files,
}
with open(status_file, "w") as fout:
json.dump(status, fout, separators=(',', ':'))
def check_global_data(self, *data):
"""Check the global data that can affect incremental reporting."""
m = Hasher()
for d in data:
m.update(d)
these_globals = m.hexdigest()
if self.globals != these_globals:
self.reset()
self.globals = these_globals
def can_skip_file(self, data, fr, rootname):
"""Can we skip reporting this file?
`data` is a CoverageData object, `fr` is a `FileReporter`, and
`rootname` is the name being used for the file.
"""
m = Hasher()
m.update(fr.source().encode('utf-8'))
add_data_to_hash(data, fr.filename, m)
this_hash = m.hexdigest()
that_hash = self.file_hash(rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
return True
else:
self.set_file_hash(rootname, this_hash)
return False
def file_hash(self, fname):
"""Get the hash of `fname`'s contents."""
return self.files.get(fname, {}).get('hash', '')
def set_file_hash(self, fname, val):
"""Set the hash of `fname`'s contents."""
self.files.setdefault(fname, {})['hash'] = val
def index_info(self, fname):
"""Get the information for index.html for `fname`."""
return self.files.get(fname, {}).get('index', {})
def set_index_info(self, fname, info):
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, {})['index'] = info
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in `t`.
This is only suitable for HTML text, not attributes.
"""
# Convert HTML special chars into HTML entities.
return t.replace("&", "&").replace("<", "<")
def pair(ratio):
"""Format a pair of numbers so JavaScript can read them in an attribute."""
return "%s %s" % ratio
| 34.288802
| 97
| 0.566607
|
4a0e1a9be0e7e9fae83c2a8307a002029de78257
| 1,045
|
py
|
Python
|
evaluate/evaluate_detrac_coco_api/centernet_config_detrac_test.py
|
yuanliangxie/YOLOv3_simple_baseline
|
325e2963ae770e6f45912f3142941d3bddaf9d6e
|
[
"Apache-2.0"
] | 1
|
2022-02-26T10:13:08.000Z
|
2022-02-26T10:13:08.000Z
|
evaluate/evaluate_detrac_coco_api/centernet_config_detrac_test.py
|
yuanliangxie/YOLOv3_simple_baseline
|
325e2963ae770e6f45912f3142941d3bddaf9d6e
|
[
"Apache-2.0"
] | null | null | null |
evaluate/evaluate_detrac_coco_api/centernet_config_detrac_test.py
|
yuanliangxie/YOLOv3_simple_baseline
|
325e2963ae770e6f45912f3142941d3bddaf9d6e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# project
# test
TEST = {
"device_id": 0,
"PROJECT_PATH": "/home/xyl/PycharmProjects/YOLOV3_SUPER",
"test_path": "../../data/detrac/test.txt",
"test_ignore_region": "../../data/detrac/test_ignore_region.txt",
"test_labels_path": "../../data/detrac/labels_test",
'DATA':{#"CLASSES":['car', 'bus', 'van', 'others'],
"CLASSES":['car'],
"NUM":1},
"TEST_IMG_SIZE":640,
"BATCH_SIZE":32,
"NUMBER_WORKERS":0,
"CONF_THRESH":0.1,
"NMS_THRESH":0.5,
"MULTI_SCALE_TEST":False,
"FLIP_TEST":False,
"model": {
"classes": 1,
},
"pretrain_snapshot": "/home/xyl/PycharmProjects/YOLOV3_SUPER/darknet53/Multi-scale_try_centernet_test_UA_detrac/20210103214308/model_map_0.983.pth",
#/home/xyl/PycharmProjects/YOLOV3_SUPER/darknet53/Multi-scale_try_centernet_test_UA_detrac/20210103202155/model.pth
#/home/xyl/PycharmProjects/YOLOV3_SUPER/darknet53/Multi-scale_try_centernet_test_UA_detrac/20201016224944/model_map_0.761.pth
"generate_analyze_figure": False,
"generate_analyze_figure_dir_name":"analyze_loss_centernet_18"
}
| 34.833333
| 149
| 0.747368
|
4a0e1af90c8104d462c8619d482e865c24cc3686
| 3,656
|
py
|
Python
|
taskA.py
|
gvmossato/numerico-ep1
|
10b360e7ea4724ea90093f033718d0029afa28b6
|
[
"MIT"
] | null | null | null |
taskA.py
|
gvmossato/numerico-ep1
|
10b360e7ea4724ea90093f033718d0029afa28b6
|
[
"MIT"
] | null | null | null |
taskA.py
|
gvmossato/numerico-ep1
|
10b360e7ea4724ea90093f033718d0029afa28b6
|
[
"MIT"
] | null | null | null |
# ============================== #
# Módulo de execução da tarefa A #
# ============================== #
import numpy as np
import matplotlib.pyplot as plt
from EPLib import QR, gen_tridiagonal, gen_eign, print_table, ctext
# ================ #
# Executa a tarefa #
# ================ #
def run(epsilon, n_vals):
amount = len(n_vals) # Quantidade de execuções únicas
count = 0 # Conta a execução atual
results = [] # Armazena resultado das execuções
valid = [] # Armazena autovalores e autovetores analiticamente corretos
# Armazena demais informações das execuções
infos = {
'Teste' : [], # Número do teste (execução)
'Desloc.' : [], # Com ou sem deslocamento
'n' : [], # Dimensão da matriz
'k' : [] # Iterações
}
# Para cada n, executa com e sem deslocamento
for shifted in [True, False]:
for n in n_vals:
A = gen_tridiagonal(alpha=2, beta=-1, n=n)
Q, R, k = QR(A, epsilon, shifted)
Lambda = np.diag(R)
results.append((Q, Lambda))
valid.append(gen_eign(n))
infos['Teste'].append(str(count))
infos['Desloc.'].append(shifted)
infos['k'].append(k)
infos['n'].append(n)
count += 1
progress = np.round(count/(2*amount) * 100, 2)
print(f"Progresso: {progress}% ", end='\r')
print()
print(f"\n{ctext('Concluído!', 'g')} Comparação dos resultados:\n")
print_table(infos) # Imprime a tabela de informações
# ==== #
# Plot #
# ==== #
while True:
# Input com entrada padrão 's'
plot_graph = input(f"\nExibir gráfico de iterações por dimensão da matriz? ([{ctext('s', 'g')}]/{ctext('n', 'r')}): ") or 's'
if plot_graph.lower() == 's': # Exibe gráfico
plt.style.use('seaborn') # Estilo: 'seaborn'
plt.plot(n_vals, infos['k'][ :amount], marker='o') # Com deslocamento
plt.plot(n_vals, infos['k'][amount: ], marker='o') # Sem deslocamento
plt.title('Influência do deslocamento na eficiência do algoritmo')
plt.xlabel('Dimensão da matriz (n)')
plt.ylabel('Iterações até a convergência (k)')
plt.legend(['Com deslocamento', 'Sem deslocamento'])
plt.show()
break
elif plot_graph.lower() == 'n': # Pula exibição do gráfico
break
else:
print(ctext('Entrada inválida.', 'r'))
# ========================= #
# Autovalores e Autovetores #
# ========================= #
while True: # Permite visualização das saídas de forma concisa
print(f"\nDeseja verificar {ctext('autovalores', 'm')} e {ctext('autovetores', 'm')} para algum teste?")
num = input(f"Entre com o {ctext('número de um teste', 'y')} ou digite {ctext('sair', 'r')} para encerrar a tarefa: ")
if num.lower() == 'sair':
break
elif num in infos['Teste']:
num = int(num)
print('\033[35m')
print('\n' + 50*'=')
print(f"> Teste #{infos['Teste'][num]}")
print(50*'=')
print('\033[0m')
print(ctext('> OBTIDOS', 'b'))
print('Autovalores:\n', results[num][1], end='\n\n')
print('Autovetores:\n', results[num][0])
print(ctext('\n> ESPERADOS', 'b'))
print('Autovalores:\n', valid[num][1], end='\n\n')
print('Autovetores:\n', valid[num][0])
else:
print(ctext('Entrada inválida.', 'r'))
return
| 32.353982
| 133
| 0.511761
|
4a0e1b38308e77155ddfeca1a59f2f2ab468d2ee
| 12,505
|
py
|
Python
|
src/metapath2vec.py
|
erdiolmezogullari/Metapath2vec-1
|
d71e89d9beb3b888f0bc411e9d94eba7453b3619
|
[
"MIT"
] | 1
|
2020-11-15T07:54:22.000Z
|
2020-11-15T07:54:22.000Z
|
src/metapath2vec.py
|
erdiolmezogullari/loginaway_metapath2vec
|
d71e89d9beb3b888f0bc411e9d94eba7453b3619
|
[
"MIT"
] | null | null | null |
src/metapath2vec.py
|
erdiolmezogullari/loginaway_metapath2vec
|
d71e89d9beb3b888f0bc411e9d94eba7453b3619
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import tensorflow as tf
import numpy as np
import argparse
from helper import getData, writeData, set_gpu
from random import choices
from time import time
class metapath2vec():
def load_data(self):
'''
Load data from files.
'''
self.node2id, self.type2set, self.maxsentlen, self.sent_num= getData(self.args.filename, self.args.typestr)
# Compute max number of windows in a sentence
self.num_windows = self.maxsentlen
self.id2node = {self.node2id[key]: key for key in self.node2id}
# Specify default index, used for not-existing words, corresponding to the last line of embed_matrix.
self.default_ind = len(self.node2id)
def neg_sample(self, cont_list):
'''
Conduct negative sampling for cont_list.
Args;
cont_list: A 2 dimensional context node id list.
Returns:
neg_list: A 3 dimensional tensor (python list) of form
[batch, windows_size, neg_samples].
'''
neg_list = []
for context in cont_list:
line = []
for id_ in context:
if id_ == self.default_ind:
id_set = tuple()
avlbl_size = 0
else:
id_set = self.type2set[self.id2node[id_][0]].difference((id_,))
avlbl_size = min(len(id_set), self.args.neg_size)
line.extend(choices(tuple(id_set), k=avlbl_size)+[self.default_ind for _ in range(self.args.neg_size - avlbl_size)])
neg_list.append(line)
return neg_list
def get_batch(self):
'''
Generate a batch of size self.args.batch_size.
Returns:
A generator that generate batches, each batch is of the form
batch <dict> :
'neg_ind': Negative samples indexes tensor with size (batch_size, num_windows, neg*2*neighbour_size).
'cor_ind': Core samples indexes tensor with size (batch_size, num_windows, 1).
'cont_ind': Context samples indexes tensor with size (batch_size, num_windows, 2*neighbour_size).
'''
batch = {}
sent_count = 0
batch['neg_ind'] = np.empty(\
(self.args.batch_size, self.num_windows, self.args.neg_size * 2 * self.args.neighbour_size), dtype=np.int32)
batch['cor_ind'] = np.empty(\
(self.args.batch_size, self.num_windows, 1), dtype=np.int32)
batch['cont_ind'] = np.empty(\
(self.args.batch_size, self.num_windows, 2 * self.args.neighbour_size), dtype=np.int32)
patch_list = [self.default_ind for _ in range(self.args.neighbour_size)]
for line in open(self.args.filename, 'r'):
if len(line) < 2: continue
# generate word id list (from a sentence)
wrd_list = patch_list + [self.node2id[wrd] for wrd in line.strip().split()] + patch_list
# generate core id list
core_list = [wrd_list[i] \
for i in range(self.args.neighbour_size, len(wrd_list) - self.args.neighbour_size)]
# generate context id list
cont_list = [wrd_list[cor_ind - self.args.neighbour_size: cor_ind] + \
wrd_list[cor_ind + 1: cor_ind + self.args.neighbour_size + 1] \
for cor_ind in range(self.args.neighbour_size, len(wrd_list) - self.args.neighbour_size)]
# generate negative samples
neg_list = self.neg_sample(cont_list)
batch['cor_ind'][sent_count] = np.reshape(core_list, (len(core_list), 1))
batch['cont_ind'][sent_count] = cont_list
batch['neg_ind'][sent_count] = neg_list
sent_count += 1
if sent_count == self.args.batch_size:
sent_count = 0
yield batch
batch['neg_ind'] = np.empty(\
(self.args.batch_size, self.num_windows, self.args.neg_size * 2 * self.args.neighbour_size), dtype=np.int32)
batch['cor_ind'] = np.empty(\
(self.args.batch_size, self.num_windows, 1), dtype=np.int32)
batch['cont_ind'] = np.empty(\
(self.args.batch_size, self.num_windows, 2 * self.args.neighbour_size), dtype=np.int32)
batch = {key: batch[key][: sent_count] for key in batch}
yield batch
def add_placeholders(self):
'''
Add placeholders for metapath2vec.
'''
self.negative_ind = tf.placeholder(tf.int32, \
(None, self.num_windows, self.args.neg_size * 2 * self.args.neighbour_size))
self.core_ind = tf.placeholder(tf.int32, \
(None, self.num_windows, 1))
self.context_ind = tf.placeholder(tf.int32, \
(None, self.num_windows, 2 * self.args.neighbour_size))
def create_feed_dict(self, batch):
'''
Create feed dict for training.
Args:
batch <dict>: Batch generated from next(batch_generator), where batch_generator is
the return of self.get_batch().
Returns:
feed_dict <dict>: the feed dictionary mapping from placeholders to values.
'''
feed_dict = {}
feed_dict[self.negative_ind] = batch['neg_ind']
feed_dict[self.core_ind] = batch['cor_ind']
feed_dict[self.context_ind] = batch['cont_ind']
return feed_dict
def add_embedding(self):
'''
Add embedding parameters in the computing.
'''
with tf.variable_scope('Embeddings'):
embed_matrix = tf.get_variable('embed_matrix',
[len(self.node2id), self.args.embed_dim], tf.float32,
initializer=tf.random_normal_initializer(),
regularizer=self.regularizer
)
padding = tf.get_variable('padding',
[1, self.args.embed_dim], tf.float32,
initializer=tf.zeros_initializer(),
trainable=False
)
self.embed_matrix = tf.concat([embed_matrix, padding], axis=0)
def add_model(self):
'''
Build metapath2vec structure.
Returns:
loss: Loss of the estimation of the model.
'''
with tf.name_scope('Main_Model'):
neg_embed = tf.nn.embedding_lookup(self.embed_matrix, self.negative_ind)
core_embed = tf.nn.embedding_lookup(self.embed_matrix, self.core_ind)
cont_embed = tf.nn.embedding_lookup(self.embed_matrix, self.context_ind)
neg_core = tf.matmul(core_embed, tf.transpose(neg_embed, [0, 1, 3, 2]))
cont_core = tf.matmul(core_embed, tf.transpose(cont_embed, [0, 1, 3, 2]))
sec_neg = tf.log(tf.clip_by_value(tf.sigmoid(tf.negative(neg_core)), 1e-6, 1.0))
sec_cont = tf.log(tf.clip_by_value(tf.sigmoid(cont_core), 1e-6, 1.0))
objective = tf.reduce_sum(sec_neg) + tf.reduce_sum(sec_cont)
loss = tf.negative(objective)
if self.regularizer != None:
loss += tf.contrib.layers.apply_regularization(self.regularizer,
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
return loss
def add_optimizer(self, loss):
'''
Add optimizer for estimating the parameters.
Args:
loss: Model loss from add_model().
Returns:
train_op: The train operation. Run sess.run(train_op) to estimate the model.
'''
with tf.name_scope('Optimizer'):
optimizer = tf.train.AdamOptimizer(self.args.learning_rate)
train_op = optimizer.minimize(loss)
return train_op
def run_epoch(self, sess, epoch_number):
'''
Runs an epoch of training.
Args:
sess: tf.Session() object.
Returns:
average_loss: Average mini-batch loss on this epoch.
'''
loss_list = []
current_sent_num = 0
st = time()
for step, batch in enumerate(self.get_batch()):
feed_dict = self.create_feed_dict(batch)
batch_loss, _ = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
loss_list.append(batch_loss)
current_sent_num += self.args.batch_size
if step % 10 == 0:
print('[Epoch {} -- {}/{} ({})]: Train Loss:\t {}\r'.format\
(epoch_number, current_sent_num, self.sent_num, current_sent_num/self.sent_num,
np.mean(loss_list)))
now = time()
if now - st > 1800:
print()
self.check_point(np.mean(loss_list), epoch_number, sess)
st = time()
print()
return np.mean(loss_list)
def check_point(self, loss, epoch, sess):
'''
Check the current score and dump the output that has the best performance.
Args:
loss: Mean loss of the current epoch.
epoch: Epoch number.
sess: tf.Session() object.
'''
print('Checkpoint at Epoch {}, Current loss: {}\t| History best: {}'.format(epoch, loss, self.best_loss))
if loss < self.best_loss:
self.best_loss = loss
savedir ='./output/'
embeddings = sess.run(self.embed_matrix)
writeData(savedir + self.args.outname, embeddings, self.node2id)
print('Embeddings are successfully output to file.')
def fit(self, sess):
'''
Start estimating Metapath2vec model.
Args:
sess: tf.Session() object.
'''
for epoch in range(self.args.epoch):
loss = self.run_epoch(sess, epoch)
self.check_point(loss, epoch, sess)
def __init__(self, args):
'''
Initialize metapath2vec model with args.
Args:
args: An instance of class argparse. Details are in if __name__ == '__main__' clause.
'''
self.args = args
self.load_data()
if self.args.l2 != 0:
self.regularizer = tf.contrib.layers.l2_regularizer(scale=self.args.l2)
else:
self.regularizer = None
self.add_placeholders()
self.add_embedding()
self.loss = self.add_model()
self.train_op = self.add_optimizer(self.loss)
self.best_loss = 1e10
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Metapath2Vec')
parser.add_argument('-file', dest='filename', default='walks.txt', help='The random walks filename')
parser.add_argument('-embed_dim', dest='embed_dim', default=64, type=int, help='The length of latent embedding')
parser.add_argument('-n', dest='neighbour_size', default=7, type=int, help='The neighbourhood size k')
parser.add_argument('-epoch', dest='epoch', default=10, type=int, help='Num of iterations for heterogeneous skipgram')
parser.add_argument('-types', dest='typestr', default='a', type=str, help='Specify types occurring in the data')
parser.add_argument('-batch', dest='batch_size', default=4, type=int, help='The number of the data used in each iter')
parser.add_argument('-neg', dest='neg_size', default=5, type=int, help='The size of negative samples')
parser.add_argument('-gpu', dest='gpu', default='0', help='Run the model on gpu')
parser.add_argument('-l2', dest='l2', default=1e-3, type=float,help='L2 regularization scale (default 0.001)')
parser.add_argument('-lr', dest='learning_rate',default=1e-2, type=float, help='Learning rate.')
parser.add_argument('-outname', dest='outname', default='meta_embeddings.txt', help='Name of the output file.')
args = parser.parse_args()
set_gpu(args.gpu)
tf.reset_default_graph()
model = metapath2vec(args)
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
config.gpu_options.per_process_gpu_memory_fraction = 0.75
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
model.fit(sess)
| 41.407285
| 136
| 0.582327
|
4a0e1c554eb67a6e14a623dab443d8cb8f1fc37a
| 654
|
py
|
Python
|
setup.py
|
being24/TA7291P_driver
|
4954f8cc3fca1e685a749b074699f1ed32cdd1e2
|
[
"MIT"
] | null | null | null |
setup.py
|
being24/TA7291P_driver
|
4954f8cc3fca1e685a749b074699f1ed32cdd1e2
|
[
"MIT"
] | null | null | null |
setup.py
|
being24/TA7291P_driver
|
4954f8cc3fca1e685a749b074699f1ed32cdd1e2
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="TA7291P_driver",
version="0.0.2",
author="being24",
author_email="being24@gmail.com",
description="TA7291P driver with pigpio and pca9685 on Raspberry pi",
install_requires=[
"PCA9685_wrapper",
],
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/being24/TA7291P_driver",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
)
| 28.434783
| 73
| 0.669725
|
4a0e1cb8ed12b326a9e2150723da4a05f021fcc9
| 1,312
|
py
|
Python
|
statistics.test.py
|
clean-code-craft-tcq-2/sense-py-kumarSudhirTCQ
|
39731555be800cd9fb0518fa0f385b3b83ecb1eb
|
[
"MIT"
] | null | null | null |
statistics.test.py
|
clean-code-craft-tcq-2/sense-py-kumarSudhirTCQ
|
39731555be800cd9fb0518fa0f385b3b83ecb1eb
|
[
"MIT"
] | null | null | null |
statistics.test.py
|
clean-code-craft-tcq-2/sense-py-kumarSudhirTCQ
|
39731555be800cd9fb0518fa0f385b3b83ecb1eb
|
[
"MIT"
] | null | null | null |
import unittest
import statistics
import math
from alerter import*
class StatsTest(unittest.TestCase):
def test_report_min_max_avg(self):
computedStats = statistics.calculateStats([1.5, 8.9, 3.2, 4.5])
epsilon = 0.001
self.assertAlmostEqual(computedStats["avg"], 4.525, delta=epsilon)
self.assertAlmostEqual(computedStats["max"], 8.9, delta=epsilon)
self.assertAlmostEqual(computedStats["min"], 1.5, delta=epsilon)
def test_avg_is_nan_for_empty_input(self):
computedStats = statistics.calculateStats([])
# All fields of computedStats (average, max, min) must be
# nan (not-a-number), as defined in the math package
# Design the assert here.
# Use nan and isnan in https://docs.python.org/3/library/math.html
self.assertTrue( math.isnan( computedStats["avg"] ) )
self.assertTrue( math.isnan( computedStats["max"] ) )
self.assertTrue( math.isnan( computedStats["min"] ) )
def test_raise_alerts_when_max_above_threshold(self):
emailAlert = EmailAlert()
ledAlert = LEDAlert()
maxThreshold = 10.5
statsAlerter = StatsAlerter(maxThreshold, [emailAlert, ledAlert])
statsAlerter.checkAndAlert([22.6, 12.5, 3.7])
self.assertTrue(emailAlert.emailSent)
self.assertTrue(ledAlert.ledGlows)
if __name__ == "__main__":
unittest.main()
| 37.485714
| 70
| 0.724085
|
4a0e1e17c3faa5ce3e058463e9163643afe0ba79
| 1,152
|
py
|
Python
|
experiments/cifar10/parser.py
|
dwromero/att_gconvs
|
872259cad49763fdcfa3e96e80b6b5c331adf084
|
[
"MIT"
] | 53
|
2020-07-07T11:06:30.000Z
|
2022-03-26T02:42:49.000Z
|
experiments/cifar10/parser.py
|
dwromero/att_gconvs
|
872259cad49763fdcfa3e96e80b6b5c331adf084
|
[
"MIT"
] | null | null | null |
experiments/cifar10/parser.py
|
dwromero/att_gconvs
|
872259cad49763fdcfa3e96e80b6b5c331adf084
|
[
"MIT"
] | 2
|
2020-09-19T12:10:33.000Z
|
2020-10-29T19:37:08.000Z
|
import argparse
def _get_parser():
# Running settings
parser = argparse.ArgumentParser(description='cifar10 experiments.')
# Parse
parser.add_argument('--model', type=str, default='p4_allcnnc', metavar='M', help='type of model to use.')
parser.add_argument('--lr', default=1e-3, type=float, metavar='LR', help='initial learning rate (default: 1e-3)')
parser.add_argument('--weight_decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument("--device", type=str, default="cuda", help="Where to deploy the model {cuda, cpu}")
parser.add_argument('--seed', type=int, default=0, metavar='S', help='random seed (default: 0)')
parser.add_argument('--pretrained', default=False, action='store_true', help='use pre-trained model. If false, the model will be trained.')
parser.add_argument('--augment', default=False, action='store_true', help='If false, no data augmentation will be used.')
parser.add_argument('--extra_comment', type=str, default="")
# Return parser
return parser
def parse_args():
parser = _get_parser()
return parser.parse_args()
| 48
| 143
| 0.697049
|
4a0e1e7851e1ced90bedde28bf56e55ace00c59a
| 3,762
|
py
|
Python
|
clustering.py
|
grypesc/graduateAdmissionsAnalysis
|
e236b218fa6b512a6022b519a9b2bd312c61f2a5
|
[
"MIT"
] | 1
|
2019-06-11T14:53:47.000Z
|
2019-06-11T14:53:47.000Z
|
clustering.py
|
grypesc/graduateAdmissions
|
e236b218fa6b512a6022b519a9b2bd312c61f2a5
|
[
"MIT"
] | null | null | null |
clustering.py
|
grypesc/graduateAdmissions
|
e236b218fa6b512a6022b519a9b2bd312c61f2a5
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.mixture import GaussianMixture
wh = pd.read_csv('datasetVer1.1.csv')
clusters = 2
properKmeans = 0
properGausian = 0
properHierarchical = 0
data = wh[['GRE Score','TOEFL Score','University Rating','SOP','LOR' ,'CGPA', 'Chance of Admit']]
data2 = wh[["Research"]]
cor = data.corr() #Calculate the correlation of the above variables
sns.heatmap(cor, square=True) #Plot the correlation as heat map
#Scaling of data
ss = StandardScaler()
ss.fit_transform(data)
#mapping features into one. Now we can use doKmeans()
#with x_pca instead of data
pca = PCA(n_components = 1, whiten= True ) # whitten = normalize
pca.fit(data)
x_pca = pca.transform(data)
#clustering by different methods
hierarchical_cluster = AgglomerativeClustering(n_clusters = clusters,affinity= "euclidean",linkage = "ward")
clusters_hierarchical = hierarchical_cluster.fit_predict(data)
data["label_hierarchical"] = clusters_hierarchical
kmeans = KMeans(n_clusters=clusters)
clusters_knn = kmeans.fit_predict(data)
data["label_kmeans"] = clusters_knn
gausianMixture = GaussianMixture(n_components=clusters,init_params='kmeans')
clusters_gausianMixture = gausianMixture.fit_predict(data)
data["label_gausian"] = clusters_gausianMixture
def plotScatter(label):
fig = plt.figure()
ax = fig.add_subplot(111)
if label == "Research":
scatter = ax.scatter(data['TOEFL Score'],data['Chance of Admit'],c=data2["Research"],s=50)
ax.set_title('Research')
else:
scatter = ax.scatter(data['TOEFL Score'],data['Chance of Admit'],c=data[label],s=50)
label=label[6:]
ax.set_title(label+' clustering')
ax.set_xlabel('TOEFL Score')
ax.set_ylabel('Chance of Admit')
plt.colorbar(scatter)
plt.show()
#clustering gives values of 0 and 1 randomly to the Research.
#In case in which properKmeans is too small it means clustering
#asigned 0 instead of 1 to predicted researches
def checkError(labelProperCounts, name):
if (labelProperCounts > len(output)/2):
error = labelProperCounts/len(output)
if (labelProperCounts < len(output)/2):
error = (len(output)-labelProperCounts)/len(output)
print("\nAverage error for "+name+": %.6f"%error)
#Plot real results
plotScatter("Research")
plotScatter("label_kmeans")
plotScatter("label_gausian")
plotScatter("label_hierarchical")
#writing to output file
classification = pd.DataFrame(data, columns=['Serial No.','GRE Score','TOEFL Score','University Rating','SOP','LOR'
,'CGPA', 'Chance of Admit', 'label_kmeans', "label_hierarchical", "label_gausian" ]).round(3).to_csv('./clusteringOutputs/kmeans.csv')
#calculating error
output = pd.read_csv('./clusteringOutputs/kmeans.csv')
researches = wh[['Research']]
label_kmeans = output[["label_kmeans"]]
label_gausian = output[["label_gausian"]]
label_hierarchical = output[["label_hierarchical"]]
researches = np.squeeze(np.asarray(researches))
label_kmeans = np.squeeze(np.asarray(label_kmeans))
label_gausian = np.squeeze(np.asarray(label_gausian))
label_hierarchical = np.squeeze(np.asarray(label_hierarchical))
for i in range(len(output)):
if researches[i] != label_kmeans[i]:
properKmeans+=1
if researches[i] != label_gausian[i]:
properGausian+=1
if researches[i] != label_hierarchical[i]:
properHierarchical+=1
print('\n*** Errors ***')
checkError(properKmeans, "K means")
checkError(properGausian, "Gausian Mixture")
checkError(properHierarchical, "Hierarchical Clustering")
print('\n\n')
| 35.828571
| 134
| 0.744285
|
4a0e1eac0196a69d04d49816d0c8d9f6f50adfd5
| 5,152
|
py
|
Python
|
src/meshrefine/refine_and_export.py
|
mahyar-osn/meshrefine
|
3bfdf35384c26487a794a6e0f2af8a715b5aa430
|
[
"Apache-2.0"
] | null | null | null |
src/meshrefine/refine_and_export.py
|
mahyar-osn/meshrefine
|
3bfdf35384c26487a794a6e0f2af8a715b5aa430
|
[
"Apache-2.0"
] | null | null | null |
src/meshrefine/refine_and_export.py
|
mahyar-osn/meshrefine
|
3bfdf35384c26487a794a6e0f2af8a715b5aa430
|
[
"Apache-2.0"
] | null | null | null |
"""
A script to refine a given Zinc mesh and export it to Zinc and VTK file formats.
Requires: OpenCMISS-ZINC, OpenCMISS.utils, ScaffoldMaker.
"""
import os
import argparse
from opencmiss.zinc.context import Context as ZincContext
from opencmiss.utils.zinc.field import get_group_list
from scaffoldmaker.utils.meshrefinement import MeshRefinement
from scaffoldmaker.annotation.annotationgroup import AnnotationGroup
from scaffoldmaker.utils.exportvtk import ExportVtk
class ProgramArguments(object):
pass
class RefineAndExport:
def __init__(self, input_zinc_file, input_exelem_file=None, refine=None, output_zinc_file=None, output_vtk_file=None):
self._context = ZincContext("RefineContext")
self._region = self._context.getDefaultRegion()
self._input_zinc_file = input_zinc_file
self._region.readFile(self._input_zinc_file)
if input_exelem_file is not None:
self._region.readFile(input_exelem_file)
self._field_module = self._region.getFieldmodule()
self._annotation_groups = [AnnotationGroup(self._region,
(group.getName(), None)) for group in get_group_list(self._field_module)]
self._field_module.defineAllFaces()
for group in self._annotation_groups:
group.addSubelements()
""" Refine """
self._refine_factor = refine
self._refined_region, self._refined_annotation_groups = self._refine()
""" Export to Zinc file"""
self._refined_region.writeFile(output_zinc_file)
""" Export to VTK """
description = "A Zinc model scaffold"
exportvtk = ExportVtk(self._refined_region, description, self._refined_annotation_groups)
exportvtk.writeFile(output_vtk_file)
def _refine(self):
target_region = self._region.createChild('RefinedRegion')
mesh_refinement = MeshRefinement(self._region, target_region, self._annotation_groups)
mesh = self._get_mesh()
element_iterator = mesh.createElementiterator()
element = element_iterator.next()
while element.isValid():
number_in_xi1 = self._refine_factor[0]
if len(self._refine_factor) > 1:
number_in_xi2 = self._refine_factor[1]
else:
number_in_xi2 = 1
if len(self._refine_factor) > 2:
number_in_xi3 = self._refine_factor[2]
else:
number_in_xi3 = 1
mesh_refinement.refineElementCubeStandard3d(element, number_in_xi1, number_in_xi2, number_in_xi3)
element = element_iterator.next()
return target_region, mesh_refinement.getAnnotationGroups()
def _get_mesh(self):
for dimension in range(3, 0, -1):
mesh = self._field_module.findMeshByDimension(dimension)
if mesh.getSize() > 0:
return mesh
raise ValueError('Model contains no mesh')
def main():
args = parse_args()
if os.path.exists(args.input_ex):
if args.output_ex is None:
filename = os.path.basename(args.input_ex)
dirname = os.path.dirname(args.input_ex)
output_ex = os.path.join(dirname, filename.split('.')[0] + '_refined.' + filename.split('.')[1])
else:
output_ex = args.output_ex
if args.output_vtk is None:
filename = os.path.basename(args.input_ex)
dirname = os.path.dirname(args.input_ex)
output_vtk = os.path.join(dirname, filename.split('.')[0] + '_refined.vtk')
else:
output_vtk = args.output_vtk
if args.exelem is not None:
exelem_file = args.exelem
if args.refine_factor is None:
refine_factor = [4, 4, 1]
else:
refine_factor = [int(i) for i in list(args.refine_factor)]
RefineAndExport(args.input_ex,
input_exelem_file=None,
refine=refine_factor,
output_zinc_file=output_ex,
output_vtk_file=output_vtk)
def parse_args():
parser = argparse.ArgumentParser(description="Refine and export a given ZINC model scaffold.")
parser.add_argument("input_ex", help="Location of the input EX file.")
parser.add_argument("-exelem", "--exelem", help="Optional - Location of the exelem file.")
parser.add_argument("-r", "--refine_factor", help="Refine factor for each xi coordinate direction."
"[default is '4,4,1'.")
parser.add_argument("-oe", "--output_ex", help="Location of the output Zinc file."
"[defaults to the location of the input file if not set.]")
parser.add_argument("-ov", "--output_vtk", help="Location of the output vtk file. "
"[defaults to the location of the input file if not set.]")
program_arguments = ProgramArguments()
parser.parse_args(namespace=program_arguments)
return program_arguments
if __name__ == "__main__":
main()
| 40.25
| 124
| 0.637422
|
4a0e1ebf1f30f5dc0eba4762262e827a787552d2
| 48,932
|
py
|
Python
|
airbyte-integrations/connectors/source-hubspot/source_hubspot/streams.py
|
alvaroqueiroz/airbyte
|
a9641ea32c4c9c78dd557f6f196c0b7ac07122c1
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-hubspot/source_hubspot/streams.py
|
alvaroqueiroz/airbyte
|
a9641ea32c4c9c78dd557f6f196c0b7ac07122c1
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-hubspot/source_hubspot/streams.py
|
alvaroqueiroz/airbyte
|
a9641ea32c4c9c78dd557f6f196c0b7ac07122c1
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import sys
import time
import urllib.parse
from abc import ABC, abstractmethod
from functools import lru_cache, partial
from http import HTTPStatus
from typing import Any, Dict, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union
import backoff
import pendulum as pendulum
import requests
from airbyte_cdk.entrypoint import logger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.requests_native_auth import Oauth2Authenticator
from airbyte_cdk.sources.utils.sentry import AirbyteSentry
from requests import codes
from source_hubspot.errors import HubspotAccessDenied, HubspotInvalidAuth, HubspotRateLimited, HubspotTimeout
# The value is obtained experimentally, HubSpot allows the URL length up to ~16300 symbols,
# so it was decided to limit the length of the `properties` parameter to 15000 characters.
PROPERTIES_PARAM_MAX_LENGTH = 15000
# we got this when provided API Token has incorrect format
CLOUDFLARE_ORIGIN_DNS_ERROR = 530
VALID_JSON_SCHEMA_TYPES = {
"string",
"integer",
"number",
"boolean",
"object",
"array",
}
KNOWN_CONVERTIBLE_SCHEMA_TYPES = {
"bool": ("boolean", None),
"enumeration": ("string", None),
"date": ("string", "date"),
"date-time": ("string", "date-time"),
"datetime": ("string", "date-time"),
"json": ("string", None),
"phone_number": ("string", None),
}
CUSTOM_FIELD_TYPE_TO_VALUE = {
bool: "boolean",
str: "string",
float: "number",
int: "integer",
}
CUSTOM_FIELD_VALUE_TO_TYPE = {v: k for k, v in CUSTOM_FIELD_TYPE_TO_VALUE.items()}
def split_properties(properties_list: List[str]) -> Iterator[Tuple[str]]:
summary_length = 0
local_properties = []
for property_ in properties_list:
if len(property_) + summary_length + len(urllib.parse.quote(",")) >= PROPERTIES_PARAM_MAX_LENGTH:
yield local_properties
local_properties = []
summary_length = 0
local_properties.append(property_)
summary_length += len(property_) + len(urllib.parse.quote(","))
if local_properties:
yield local_properties
def retry_connection_handler(**kwargs):
"""Retry helper, log each attempt"""
def log_retry_attempt(details):
_, exc, _ = sys.exc_info()
logger.info(str(exc))
logger.info(f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} more seconds then retrying...")
def giveup_handler(exc):
if isinstance(exc, (HubspotInvalidAuth, HubspotAccessDenied)):
return True
return exc.response is not None and HTTPStatus.BAD_REQUEST <= exc.response.status_code < HTTPStatus.INTERNAL_SERVER_ERROR
return backoff.on_exception(
backoff.expo,
requests.exceptions.RequestException,
jitter=None,
on_backoff=log_retry_attempt,
giveup=giveup_handler,
**kwargs,
)
def retry_after_handler(fixed_retry_after=None, **kwargs):
"""Retry helper when we hit the call limit, sleeps for specific duration"""
def sleep_on_ratelimit(_details):
_, exc, _ = sys.exc_info()
if isinstance(exc, HubspotRateLimited):
# HubSpot API does not always return Retry-After value for 429 HTTP error
retry_after = fixed_retry_after if fixed_retry_after else int(exc.response.headers.get("Retry-After", 3))
logger.info(f"Rate limit reached. Sleeping for {retry_after} seconds")
time.sleep(retry_after + 1) # extra second to cover any fractions of second
def log_giveup(_details):
logger.error("Max retry limit reached")
return backoff.on_exception(
backoff.constant,
HubspotRateLimited,
jitter=None,
on_backoff=sleep_on_ratelimit,
on_giveup=log_giveup,
interval=0, # skip waiting part, we will wait in on_backoff handler
**kwargs,
)
class API:
"""HubSpot API interface, authorize, retrieve and post, supports backoff logic"""
BASE_URL = "https://api.hubapi.com"
USER_AGENT = "Airbyte"
def get_authenticator(self, credentials):
return Oauth2Authenticator(
token_refresh_endpoint=self.BASE_URL + "/oauth/v1/token",
client_id=credentials["client_id"],
client_secret=credentials["client_secret"],
refresh_token=credentials["refresh_token"],
)
def __init__(self, credentials: Mapping[str, Any]):
self._session = requests.Session()
credentials_title = credentials.get("credentials_title")
if credentials_title == "OAuth Credentials":
self._session.auth = self.get_authenticator(credentials)
elif credentials_title == "API Key Credentials":
self._session.params["hapikey"] = credentials.get("api_key")
else:
raise Exception("No supported `credentials_title` specified. See spec.json for references")
self._session.headers = {
"Content-Type": "application/json",
"User-Agent": self.USER_AGENT,
}
@staticmethod
def _parse_and_handle_errors(response) -> Union[MutableMapping[str, Any], List[MutableMapping[str, Any]]]:
"""Handle response"""
message = "Unknown error"
if response.headers.get("content-type") == "application/json;charset=utf-8" and response.status_code != HTTPStatus.OK:
message = response.json().get("message")
if response.status_code == HTTPStatus.FORBIDDEN:
"""Once hit the forbidden endpoint, we return the error message from response."""
pass
elif response.status_code in (HTTPStatus.UNAUTHORIZED, CLOUDFLARE_ORIGIN_DNS_ERROR):
raise HubspotInvalidAuth(message, response=response)
elif response.status_code == HTTPStatus.TOO_MANY_REQUESTS:
retry_after = response.headers.get("Retry-After")
raise HubspotRateLimited(
f"429 Rate Limit Exceeded: API rate-limit has been reached until {retry_after} seconds."
" See https://developers.hubspot.com/docs/api/usage-details",
response=response,
)
elif response.status_code in (HTTPStatus.BAD_GATEWAY, HTTPStatus.SERVICE_UNAVAILABLE):
raise HubspotTimeout(message, response=response)
else:
response.raise_for_status()
return response.json()
@retry_connection_handler(max_tries=5, factor=5)
@retry_after_handler(max_tries=3)
def get(
self, url: str, params: MutableMapping[str, Any] = None
) -> Tuple[Union[MutableMapping[str, Any], List[MutableMapping[str, Any]]], requests.Response]:
response = self._session.get(self.BASE_URL + url, params=params)
return self._parse_and_handle_errors(response), response
def post(
self, url: str, data: Mapping[str, Any], params: MutableMapping[str, Any] = None
) -> Tuple[Union[Mapping[str, Any], List[Mapping[str, Any]]], requests.Response]:
response = self._session.post(self.BASE_URL + url, params=params, json=data)
return self._parse_and_handle_errors(response), response
class Stream(HttpStream, ABC):
"""Base class for all streams. Responsible for data fetching and pagination"""
entity: str = None
updated_at_field: str = None
created_at_field: str = None
more_key: str = None
data_field = "results"
page_filter = "offset"
page_field = "offset"
limit_field = "limit"
limit = 100
offset = 0
primary_key = None
filter_old_records: bool = True
@property
def url_base(self) -> str:
return "https://api.hubapi.com"
@property
@abstractmethod
def url(self):
"""Default URL to read from"""
def path(
self,
*,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> str:
return self.url
def __init__(self, api: API, start_date: str = None, credentials: Mapping[str, Any] = None, **kwargs):
super().__init__(**kwargs)
self._api: API = api
self._start_date = pendulum.parse(start_date)
if credentials["credentials_title"] == "API Key Credentials":
self._session.params["hapikey"] = credentials.get("api_key")
def backoff_time(self, response: requests.Response) -> Optional[float]:
if response.status_code == codes.too_many_requests:
return float(response.headers.get("Retry-After", 3))
def request_headers(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> Mapping[str, Any]:
return {
"Content-Type": "application/json",
"User-Agent": self._api.USER_AGENT,
}
def get_json_schema(self) -> Mapping[str, Any]:
json_schema = super().get_json_schema()
if self.properties:
json_schema["properties"]["properties"] = {"type": "object", "properties": self.properties}
return json_schema
def handle_request(
self,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
params: Mapping[str, Any] = None,
) -> requests.Response:
request_headers = self.request_headers(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token)
request_params = self.request_params(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token)
if params:
request_params.update(params)
request = self._create_prepared_request(
path=self.path(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token),
headers=dict(request_headers, **self.authenticator.get_auth_header()),
params=request_params,
json=self.request_body_json(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token),
data=self.request_body_data(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token),
)
request_kwargs = self.request_kwargs(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token)
if self.use_cache:
# use context manager to handle and store cassette metadata
with self.cache_file as cass:
self.cassete = cass
# vcr tries to find records based on the request, if such records exist, return from cache file
# else make a request and save record in cache file
response = self._send_request(request, request_kwargs)
else:
response = self._send_request(request, request_kwargs)
return response
def _read_stream_records(
self,
properties_list: List[str],
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Tuple[dict, Any]:
# TODO: Additional processing was added due to the fact that users receive 414 errors while syncing their streams (issues #3977 and #5835).
# We will need to fix this code when the HubSpot developers add the ability to use a special parameter to get all properties for an entity.
# According to HubSpot Community (https://community.hubspot.com/t5/APIs-Integrations/Get-all-contact-properties-without-explicitly-listing-them/m-p/447950)
# and the official documentation, this does not exist at the moment.
stream_records = {}
response = None
for properties in split_properties(properties_list):
params = {"properties": ",".join(properties)}
response = self.handle_request(
stream_slice=stream_slice, stream_state=stream_state, next_page_token=next_page_token, params=params
)
for record in self._transform(self.parse_response(response, stream_state=stream_state)):
if record["id"] not in stream_records:
stream_records[record["id"]] = record
elif stream_records[record["id"]].get("properties"):
stream_records[record["id"]]["properties"].update(record.get("properties", {}))
return stream_records, response
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
stream_state = stream_state or {}
pagination_complete = False
next_page_token = None
with AirbyteSentry.start_transaction("read_records", self.name), AirbyteSentry.start_transaction_span("read_records"):
while not pagination_complete:
properties_list = list(self.properties.keys())
if properties_list:
stream_records, response = self._read_stream_records(
properties_list=properties_list,
stream_slice=stream_slice,
stream_state=stream_state,
next_page_token=next_page_token,
)
records = [value for key, value in stream_records.items()]
else:
response = self.handle_request(stream_slice=stream_slice, stream_state=stream_state, next_page_token=next_page_token)
records = self._transform(self.parse_response(response, stream_state=stream_state, stream_slice=stream_slice))
if self.filter_old_records:
records = self._filter_old_records(records)
yield from records
next_page_token = self.next_page_token(response)
if not next_page_token:
pagination_complete = True
# Always return an empty generator just in case no records were ever yielded
yield from []
@staticmethod
def _convert_datetime_to_string(dt: pendulum.datetime, declared_format: str = None) -> str:
if declared_format == "date":
return dt.to_date_string()
elif declared_format == "date-time":
return dt.to_datetime_string()
@classmethod
def _cast_datetime(cls, field_name: str, field_value: Any, declared_format: str = None) -> Any:
"""
If format is date/date-time, but actual value is timestamp, convert timestamp to date/date-time string.
"""
if not field_value:
return field_value
try:
dt = pendulum.parse(field_value)
return cls._convert_datetime_to_string(dt, declared_format=declared_format)
except (ValueError, TypeError) as ex:
logger.warning(
f"Couldn't parse date/datetime string in {field_name}, trying to parse timestamp... Field value: {field_value}. Ex: {ex}"
)
try:
dt = pendulum.from_timestamp(int(field_value) / 1000)
return cls._convert_datetime_to_string(dt, declared_format=declared_format)
except (ValueError, TypeError) as ex:
logger.warning(f"Couldn't parse timestamp in {field_name}. Field value: {field_value}. Ex: {ex}")
return field_value
@classmethod
def _cast_value(cls, declared_field_types: List, field_name: str, field_value: Any, declared_format: str = None) -> Any:
"""
Convert record's received value according to its declared catalog json schema type / format / attribute name.
:param declared_field_types type from catalog schema
:param field_name value's attribute name
:param field_value actual value to cast
:param declared_format format field value from catalog schema
:return Converted value for record
"""
if "null" in declared_field_types:
if field_value is None:
return field_value
# Sometime hubspot output empty string on field with format set.
# Set it to null to avoid errors on destination' normalization stage.
if declared_format and field_value == "":
return None
if declared_format in ["date", "date-time"]:
field_value = cls._cast_datetime(field_name, field_value, declared_format=declared_format)
actual_field_type = type(field_value)
actual_field_type_name = CUSTOM_FIELD_TYPE_TO_VALUE.get(actual_field_type)
if actual_field_type_name in declared_field_types:
return field_value
target_type_name = next(filter(lambda t: t != "null", declared_field_types))
target_type = CUSTOM_FIELD_VALUE_TO_TYPE.get(target_type_name)
if target_type_name == "number":
# do not cast numeric IDs into float, use integer instead
target_type = int if field_name.endswith("_id") else target_type
if target_type_name != "string" and field_value == "":
# do not cast empty strings, return None instead to be properly casted.
field_value = None
return field_value
try:
casted_value = target_type(field_value)
except ValueError:
logger.exception(f"Could not cast `{field_value}` to `{target_type}`")
return field_value
return casted_value
def _cast_record_fields_if_needed(self, record: Mapping, properties: Mapping[str, Any] = None) -> Mapping:
if not self.entity or not record.get("properties"):
return record
properties = properties or self.properties
for field_name, field_value in record["properties"].items():
declared_field_types = properties[field_name].get("type", [])
if not isinstance(declared_field_types, Iterable):
declared_field_types = [declared_field_types]
format = properties[field_name].get("format")
record["properties"][field_name] = self._cast_value(
declared_field_types=declared_field_types, field_name=field_name, field_value=field_value, declared_format=format
)
return record
def _transform(self, records: Iterable) -> Iterable:
"""Preprocess record before emitting"""
for record in records:
record = self._cast_record_fields_if_needed(record)
if self.created_at_field and self.updated_at_field and record.get(self.updated_at_field) is None:
record[self.updated_at_field] = record[self.created_at_field]
yield record
@staticmethod
def _field_to_datetime(value: Union[int, str]) -> pendulum.datetime:
if isinstance(value, int):
value = pendulum.from_timestamp(value / 1000.0)
elif isinstance(value, str):
value = pendulum.parse(value)
else:
raise ValueError(f"Unsupported type of datetime field {type(value)}")
return value
def _filter_old_records(self, records: Iterable) -> Iterable:
"""Skip records that was updated before our start_date"""
for record in records:
updated_at = record[self.updated_at_field]
if updated_at:
updated_at = self._field_to_datetime(updated_at)
if updated_at < self._start_date:
continue
yield record
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
default_params = {self.limit_field: self.limit}
params = {**default_params}
if next_page_token:
params.update(next_page_token)
return params
def _parse_response(self, response: requests.Response):
return self._api._parse_and_handle_errors(response)
def parse_response(
self,
response: requests.Response,
*,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
response = self._parse_response(response)
if isinstance(response, Mapping):
if response.get("status", None) == "error":
"""
When the API Key doen't have the permissions to access the endpoint,
we break the read, skip this stream and log warning message for the user.
Example:
response.json() = {
'status': 'error',
'message': 'This hapikey (....) does not have proper permissions! (requires any of [automation-access])',
'correlationId': '111111-2222-3333-4444-55555555555'}
"""
self.logger.warning(f"Stream `{self.name}` cannot be procced. {response.get('message')}")
return
if response.get(self.data_field) is None:
"""
When the response doen't have the stream's data, raise an exception.
"""
raise RuntimeError("Unexpected API response: {} not in {}".format(self.data_field, response.keys()))
yield from response[self.data_field]
else:
response = list(response)
yield from response
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
response = self._parse_response(response)
if isinstance(response, Mapping):
if "paging" in response: # APIv3 pagination
if "next" in response["paging"]:
return {"after": response["paging"]["next"]["after"]}
else:
if not response.get(self.more_key, False):
return
if self.page_field in response:
return {self.page_filter: response[self.page_field]}
else:
if len(response) >= self.limit:
self.offset += self.limit
return {self.page_filter: self.offset}
@staticmethod
def _get_field_props(field_type: str) -> Mapping[str, List[str]]:
if field_type in VALID_JSON_SCHEMA_TYPES:
return {
"type": ["null", field_type],
}
converted_type, field_format = KNOWN_CONVERTIBLE_SCHEMA_TYPES.get(field_type) or (None, None)
if not converted_type:
converted_type = "string"
logger.warn(f"Unsupported type {field_type} found")
field_props = {
"type": ["null", converted_type or field_type],
}
if field_format:
field_props["format"] = field_format
return field_props
@property
@lru_cache()
def properties(self) -> Mapping[str, Any]:
"""Some entities has dynamic set of properties, so we trying to resolve those at runtime"""
if not self.entity:
return {}
props = {}
data, response = self._api.get(f"/properties/v2/{self.entity}/properties")
for row in data:
props[row["name"]] = self._get_field_props(row["type"])
return props
def _flat_associations(self, records: Iterable[MutableMapping]) -> Iterable[MutableMapping]:
"""When result has associations we prefer to have it flat, so we transform this:
"associations": {
"contacts": {
"results": [{"id": "201", "type": "company_to_contact"}, {"id": "251", "type": "company_to_contact"}]}
}
}
to this:
"contacts": [201, 251]
"""
for record in records:
if "associations" in record:
associations = record.pop("associations")
for name, association in associations.items():
record[name] = [row["id"] for row in association.get("results", [])]
yield record
class IncrementalStream(Stream, ABC):
"""Stream that supports state and incremental read"""
state_pk = "timestamp"
limit = 1000
# Flag which enable/disable chunked read in read_chunked method
# False -> chunk size is max (only one slice), True -> chunk_size is 30 days
need_chunk = True
state_checkpoint_interval = 500
@property
def cursor_field(self) -> Union[str, List[str]]:
return self.updated_at_field
@property
@abstractmethod
def updated_at_field(self):
"""Name of the field associated with the state"""
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
records = super().read_records(sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state)
latest_cursor = None
for record in records:
cursor = self._field_to_datetime(record[self.updated_at_field])
latest_cursor = max(cursor, latest_cursor) if latest_cursor else cursor
yield record
self._update_state(latest_cursor=latest_cursor)
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
if self.state:
return self.state
return (
{self.updated_at_field: int(self._start_date.timestamp() * 1000)}
if self.state_pk == "timestamp"
else {self.updated_at_field: str(self._start_date)}
)
@property
def state(self) -> Optional[Mapping[str, Any]]:
"""Current state, if wasn't set return None"""
if self._state:
return (
{self.updated_at_field: int(self._state.timestamp() * 1000)}
if self.state_pk == "timestamp"
else {self.updated_at_field: str(self._state)}
)
return None
@state.setter
def state(self, value):
state_value = value[self.updated_at_field]
self._state = (
pendulum.parse(str(pendulum.from_timestamp(state_value / 1000)))
if isinstance(state_value, int)
else pendulum.parse(state_value)
)
self._start_date = max(self._state, self._start_date)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._state = None
def _update_state(self, latest_cursor):
if latest_cursor:
new_state = max(latest_cursor, self._state) if self._state else latest_cursor
if new_state != self._state:
logger.info(f"Advancing bookmark for {self.name} stream from {self._state} to {latest_cursor}")
self._state = new_state
self._start_date = self._state
def stream_slices(
self, *, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
chunk_size = pendulum.duration(days=30)
slices = []
now_ts = int(pendulum.now().timestamp() * 1000)
start_ts = int(self._start_date.timestamp() * 1000)
max_delta = now_ts - start_ts
chunk_size = int(chunk_size.total_seconds() * 1000) if self.need_chunk else max_delta
for ts in range(start_ts, now_ts, chunk_size):
end_ts = ts + chunk_size
slices.append(
{
"startTimestamp": ts,
"endTimestamp": end_ts,
}
)
return slices
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token)
if stream_slice:
params.update(stream_slice)
return params
class CRMSearchStream(IncrementalStream, ABC):
limit = 100 # This value is used only when state is None.
state_pk = "updatedAt"
updated_at_field = "updatedAt"
last_modified_field: str = None
associations: List[str] = None
@property
def url(self):
return f"/crm/v3/objects/{self.entity}/search" if self.state else f"/crm/v3/objects/{self.entity}"
def __init__(
self,
include_archived_only: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self._state = None
self._include_archived_only = include_archived_only
@retry_connection_handler(max_tries=5, factor=5)
@retry_after_handler(fixed_retry_after=1, max_tries=3)
def search(
self, url: str, data: Mapping[str, Any], params: MutableMapping[str, Any] = None
) -> Tuple[Union[Mapping[str, Any], List[Mapping[str, Any]]], requests.Response]:
# We can safely retry this POST call, because it's a search operation.
# Given Hubspot does not return any Retry-After header (https://developers.hubspot.com/docs/api/crm/search)
# from the search endpoint, it waits one second after trying again.
# As per their docs: `These search endpoints are rate limited to four requests per second per authentication token`.
return self._api.post(url=url, data=data, params=params)
def _process_search(
self,
properties_list: List[str],
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Tuple[dict, requests.Response]:
stream_records = {}
payload = (
{
"filters": [{"value": int(self._state.timestamp() * 1000), "propertyName": self.last_modified_field, "operator": "GTE"}],
"properties": properties_list,
"limit": 100,
}
if self.state
else {}
)
if next_page_token:
payload.update(next_page_token["payload"])
response, raw_response = self.search(url=self.url, data=payload)
for record in self._transform(self.parse_response(raw_response, stream_state=stream_state, stream_slice=stream_slice)):
stream_records[record["id"]] = record
return stream_records, raw_response
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
stream_state = stream_state or {}
pagination_complete = False
next_page_token = None
latest_cursor = None
with AirbyteSentry.start_transaction("read_records", self.name), AirbyteSentry.start_transaction_span("read_records"):
while not pagination_complete:
properties_list = list(self.properties.keys())
if self.state:
stream_records, raw_response = self._process_search(
properties_list,
next_page_token=next_page_token,
stream_state=stream_state,
stream_slice=stream_slice,
)
else:
stream_records, raw_response = self._read_stream_records(
properties_list=properties_list,
stream_slice=stream_slice,
stream_state=stream_state,
next_page_token=next_page_token,
)
records = [value for key, value in stream_records.items()]
records = self._filter_old_records(records)
records = self._flat_associations(records)
for record in records:
cursor = self._field_to_datetime(record[self.updated_at_field])
latest_cursor = max(cursor, latest_cursor) if latest_cursor else cursor
yield record
next_page_token = self.next_page_token(raw_response)
if not next_page_token:
pagination_complete = True
self._update_state(latest_cursor=latest_cursor)
# Always return an empty generator just in case no records were ever yielded
yield from []
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = {"archived": str(self._include_archived_only).lower(), "associations": self.associations, "limit": self.limit}
if next_page_token:
params.update(next_page_token.get("params", {}))
return params
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
response = self._parse_response(response)
params = {}
payload = {}
if "paging" in response and "next" in response["paging"] and "after" in response["paging"]["next"]:
params["after"] = int(response["paging"]["next"]["after"])
payload["after"] = int(response["paging"]["next"]["after"])
return {"params": params, "payload": payload}
def stream_slices(
self, *, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
return [None]
class CRMObjectStream(Stream):
"""Unified stream interface for CRM objects.
You need to provide `entity` parameter to read concrete stream, possible values are:
company, contact, deal, line_item, owner, product, ticket, quote
You can also include associated records (IDs), provide associations parameter - a list of entity names:
contacts, tickets, deals, engagements
see https://developers.hubspot.com/docs/api/crm/understanding-the-crm for more details
"""
entity: Optional[str] = None
associations: List[str] = []
updated_at_field = "updatedAt"
created_at_field = "createdAt"
@property
def url(self):
"""Entity URL"""
return f"/crm/v3/objects/{self.entity}"
def __init__(self, include_archived_only: bool = False, **kwargs):
super().__init__(**kwargs)
self._include_archived_only = include_archived_only
if not self.entity:
raise ValueError("Entity must be set either on class or instance level")
class CRMObjectIncrementalStream(CRMObjectStream, IncrementalStream):
state_pk = "updatedAt"
limit = 100
need_chunk = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = IncrementalStream.request_params(
self, stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token
)
params.update(
{
"archived": str(self._include_archived_only).lower(),
"associations": self.associations,
}
)
return params
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
records = IncrementalStream.read_records(
self,
sync_mode,
cursor_field=cursor_field,
stream_slice=stream_slice,
stream_state=stream_state,
)
yield from self._flat_associations(records)
class Campaigns(Stream):
"""Email campaigns, API v1
There is some confusion between emails and campaigns in docs, this endpoint returns actual emails
Docs: https://legacydocs.hubspot.com/docs/methods/email/get_campaign_data
"""
url = "/email/public/v1/campaigns"
more_key = "hasMore"
data_field = "campaigns"
limit = 500
updated_at_field = "lastUpdatedTime"
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
for row in super().read_records(sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state):
record, response = self._api.get(f"/email/public/v1/campaigns/{row['id']}")
yield {**row, **record}
class ContactLists(IncrementalStream):
"""Contact lists, API v1
Docs: https://legacydocs.hubspot.com/docs/methods/lists/get_lists
"""
url = "/contacts/v1/lists"
data_field = "lists"
more_key = "has-more"
updated_at_field = "updatedAt"
created_at_field = "createdAt"
limit_field = "count"
need_chunk = False
class ContactsListMemberships(Stream):
"""Contacts list Memberships, API v1
The Stream was created due to issue #8477, where supporting List Memberships in Contacts stream was requested.
According to the issue this feature is supported in API v1 by setting parameter showListMemberships=true
in get all contacts endpoint. API will return list memberships for each contact record.
But for syncing Contacts API v3 is used, where list memberships for contacts isn't supported.
Therefore, new stream was created based on get all contacts endpoint of API V1.
Docs: https://legacydocs.hubspot.com/docs/methods/contacts/get_contacts
"""
url = "/contacts/v1/lists/all/contacts/all"
updated_at_field = "timestamp"
more_key = "has-more"
data_field = "contacts"
page_filter = "vidOffset"
page_field = "vid-offset"
def _transform(self, records: Iterable) -> Iterable:
"""Extracting list membership records from contacts
According to documentation Contacts may have multiple vids,
but the canonical-vid will be the primary ID for a record.
Docs: https://legacydocs.hubspot.com/docs/methods/contacts/contacts-overview
"""
for record in super()._transform(records):
canonical_vid = record.get("canonical-vid")
for item in record.get("list-memberships", []):
yield {"canonical-vid": canonical_vid, **item}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token)
params.update({"showListMemberships": True})
return params
class Deals(CRMSearchStream):
"""Deals, API v3"""
entity = "deal"
last_modified_field = "hs_lastmodifieddate"
associations = ["contacts", "companies"]
class DealPipelines(Stream):
"""Deal pipelines, API v1,
This endpoint requires the contacts scope the tickets scope.
Docs: https://legacydocs.hubspot.com/docs/methods/pipelines/get_pipelines_for_object_type
"""
url = "/crm-pipelines/v1/pipelines/deals"
updated_at_field = "updatedAt"
created_at_field = "createdAt"
class TicketPipelines(Stream):
"""Ticket pipelines, API v1
This endpoint requires the tickets scope.
Docs: https://developers.hubspot.com/docs/api/crm/pipelines
"""
url = "/crm/v3/pipelines/tickets"
updated_at_field = "updatedAt"
created_at_field = "createdAt"
class EmailEvents(IncrementalStream):
"""Email events, API v1
Docs: https://legacydocs.hubspot.com/docs/methods/email/get_events
"""
url = "/email/public/v1/events"
data_field = "events"
more_key = "hasMore"
updated_at_field = "created"
created_at_field = "created"
class Engagements(IncrementalStream):
"""Engagements, API v1
Docs: https://legacydocs.hubspot.com/docs/methods/engagements/get-all-engagements
https://legacydocs.hubspot.com/docs/methods/engagements/get-recent-engagements
"""
url = "/engagements/v1/engagements/paged"
more_key = "hasMore"
limit = 250
updated_at_field = "lastUpdated"
created_at_field = "createdAt"
@property
def url(self):
if self.state:
return "/engagements/v1/engagements/recent/modified"
return "/engagements/v1/engagements/paged"
def _transform(self, records: Iterable) -> Iterable:
yield from super()._transform({**record.pop("engagement"), **record} for record in records)
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = {self.limit_field: self.limit}
if self.state:
params["since"] = int(self._state.timestamp() * 1000)
return params
class Forms(Stream):
"""Marketing Forms, API v3
by default non-marketing forms are filtered out of this endpoint
Docs: https://developers.hubspot.com/docs/api/marketing/forms
"""
entity = "form"
url = "/marketing/v3/forms"
updated_at_field = "updatedAt"
created_at_field = "createdAt"
class FormSubmissions(Stream):
"""Marketing Forms, API v1
This endpoint requires the forms scope.
Docs: https://legacydocs.hubspot.com/docs/methods/forms/get-submissions-for-a-form
"""
url = "/form-integrations/v1/submissions/forms"
limit = 50
updated_at_field = "updatedAt"
def path(
self,
*,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> str:
return f"{self.url}/{stream_slice['form_id']}"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.forms = Forms(**kwargs)
def _transform(self, records: Iterable) -> Iterable:
for record in super()._transform(records):
keys = record.keys()
# There's no updatedAt field in the submission however forms fetched by using this field,
# so it has to be added to the submissions otherwise it would fail when calling _filter_old_records
if "updatedAt" not in keys:
record["updatedAt"] = record["submittedAt"]
yield record
def stream_slices(
self, *, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
slices = []
seen = set()
# To get submissions for all forms date filtering has to be disabled
self.forms.filter_old_records = False
for form in self.forms.read_records(sync_mode):
if form["id"] not in seen:
seen.add(form["id"])
slices.append({"form_id": form["id"]})
return slices
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
for record in super().read_records(sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state):
record["formId"] = stream_slice["form_id"]
yield record
class MarketingEmails(Stream):
"""Marketing Email, API v1
Docs: https://legacydocs.hubspot.com/docs/methods/cms_email/get-all-marketing-emails
"""
url = "/marketing-emails/v1/emails/with-statistics"
data_field = "objects"
limit = 250
updated_at_field = "updated"
created_at_field = "created"
class Owners(Stream):
"""Owners, API v3
Docs: https://legacydocs.hubspot.com/docs/methods/owners/get_owners
"""
url = "/crm/v3/owners"
updated_at_field = "updatedAt"
created_at_field = "createdAt"
class PropertyHistory(IncrementalStream):
"""Contacts Endpoint, API v1
Is used to get all Contacts and the history of their respective
Properties. Whenever a property is changed it is added here.
Docs: https://legacydocs.hubspot.com/docs/methods/contacts/get_contacts
"""
more_key = "has-more"
url = "/contacts/v1/lists/recently_updated/contacts/recent"
updated_at_field = "timestamp"
created_at_field = "timestamp"
data_field = "contacts"
page_field = "vid-offset"
page_filter = "vidOffset"
limit = 100
def list(self, fields) -> Iterable:
properties = self._api.get("/properties/v2/contact/properties")
properties_list = [single_property["name"] for single_property in properties]
params = {"propertyMode": "value_and_history", "property": properties_list}
yield from self.read(partial(self._api.get, url=self.url), params)
def _transform(self, records: Iterable) -> Iterable:
for record in records:
properties = record.get("properties")
vid = record.get("vid")
value_dict: Dict
for key, value_dict in properties.items():
versions = value_dict.get("versions")
if key == "lastmodifieddate":
# Skipping the lastmodifieddate since it only returns the value
# when one field of a contact was changed no matter which
# field was changed. It therefore creates overhead, since for
# every changed property there will be the date it was changed in itself
# and a change in the lastmodifieddate field.
continue
if versions:
for version in versions:
version["timestamp"] = self._field_to_datetime(version["timestamp"]).to_datetime_string()
version["property"] = key
version["vid"] = vid
yield version
class SubscriptionChanges(IncrementalStream):
"""Subscriptions timeline for a portal, API v1
Docs: https://legacydocs.hubspot.com/docs/methods/email/get_subscriptions_timeline
"""
url = "/email/public/v1/subscriptions/timeline"
data_field = "timeline"
more_key = "hasMore"
updated_at_field = "timestamp"
class Workflows(Stream):
"""Workflows, API v3
Docs: https://legacydocs.hubspot.com/docs/methods/workflows/v3/get_workflows
"""
url = "/automation/v3/workflows"
data_field = "workflows"
updated_at_field = "updatedAt"
created_at_field = "insertedAt"
class Companies(CRMSearchStream):
entity = "company"
last_modified_field = "hs_lastmodifieddate"
associations = ["contacts"]
class Contacts(CRMSearchStream):
entity = "contact"
last_modified_field = "lastmodifieddate"
associations = ["contacts", "companies"]
class EngagementsCalls(CRMSearchStream):
entity = "calls"
last_modified_field = "hs_lastmodifieddate"
associations = ["contacts", "deal", "company"]
class EngagementsEmails(CRMSearchStream):
entity = "emails"
last_modified_field = "hs_lastmodifieddate"
associations = ["contacts", "deal", "company"]
class EngagementsMeetings(CRMSearchStream):
entity = "meetings"
last_modified_field = "hs_lastmodifieddate"
associations = ["contacts", "deal", "company"]
class EngagementsNotes(CRMSearchStream):
entity = "notes"
last_modified_field = "hs_lastmodifieddate"
associations = ["contacts", "deal", "company"]
class EngagementsTasks(CRMSearchStream):
entity = "tasks"
last_modified_field = "hs_lastmodifieddate"
associations = ["contacts", "deal", "company"]
class FeedbackSubmissions(CRMObjectIncrementalStream):
entity = "feedback_submissions"
associations = ["contacts"]
class LineItems(CRMObjectIncrementalStream):
entity = "line_item"
class Products(CRMObjectIncrementalStream):
entity = "product"
class Tickets(CRMObjectIncrementalStream):
entity = "ticket"
associations = ["contacts", "deals", "companies"]
class Quotes(CRMObjectIncrementalStream):
entity = "quote"
| 37.64
| 164
| 0.640481
|
4a0e1f052b0dee7b94a0529a193360fc43eeaf70
| 917
|
py
|
Python
|
src/pywriter/file/sc_cr_filter.py
|
peter88213/PyWriter
|
9d0cb2304f29f40fb305c35c547bf6f4f5adbea7
|
[
"MIT"
] | 1
|
2020-09-04T08:20:38.000Z
|
2020-09-04T08:20:38.000Z
|
src/pywriter/file/sc_cr_filter.py
|
peter88213/PyWriter
|
9d0cb2304f29f40fb305c35c547bf6f4f5adbea7
|
[
"MIT"
] | 19
|
2020-01-04T10:43:27.000Z
|
2022-03-30T19:03:44.000Z
|
src/pywriter/file/sc_cr_filter.py
|
peter88213/PyWriter
|
9d0cb2304f29f40fb305c35c547bf6f4f5adbea7
|
[
"MIT"
] | null | null | null |
"""Provide a scene per character filter class for template-based file export.
Copyright (c) 2021 Peter Triesberger
For further information see https://github.com/peter88213/PyWriter
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
class ScCrFilter():
"""Filter Scene per character.
Strategy class, implementing filtering criteria
for template-based scene export.
"""
def __init__(self, crId=None):
self.character = crId
def accept(self, source, id):
"""Return True if a source scene's character matches.
"""
if self.character is not None:
try:
if self.character in source.scenes[id].characters:
return True
else:
return False
except:
return False
return True
| 26.2
| 82
| 0.592148
|
4a0e206a5d2387580acba8d2324b8818681cd113
| 312
|
py
|
Python
|
api_interview/ext/database.py
|
lfdivino/888-spectate-interview-project
|
394d68fc3e97cdbc1c7786f4f9296b3a08e9c721
|
[
"MIT"
] | 1
|
2020-11-24T15:43:06.000Z
|
2020-11-24T15:43:06.000Z
|
api_interview/ext/database.py
|
lfdivino/888-spectate-interview-project
|
394d68fc3e97cdbc1c7786f4f9296b3a08e9c721
|
[
"MIT"
] | 12
|
2019-12-02T14:13:30.000Z
|
2020-01-22T13:53:14.000Z
|
api_interview/ext/database.py
|
lfdivino/rest-api-interview-project
|
394d68fc3e97cdbc1c7786f4f9296b3a08e9c721
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask
from flask_pymongo import PyMongo
mongo = PyMongo()
def init_app(app: Flask):
mongo.init_app(app, uri=f"mongodb://{os.environ['MONGO_INITDB_ROOT_USERNAME']}:{os.environ['MONGO_INITDB_ROOT_PASSWORD']}@localhost:27017/{os.environ['MONGO_INITDB_DATABASE']}?authSource=admin")
| 28.363636
| 195
| 0.785256
|
4a0e20769e0655531c9ce1ec93f8612201fa496d
| 2,267
|
py
|
Python
|
release/stubs.min/Rhino/Geometry/__init___parts/SpaceMorph.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Rhino/Geometry/__init___parts/SpaceMorph.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Rhino/Geometry/__init___parts/SpaceMorph.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class SpaceMorph(object):
""" Represents a spacial,Euclidean morph. """
@staticmethod
def IsMorphable(geometry):
"""
IsMorphable(geometry: GeometryBase) -> bool
true if the geometry can be morphed by calling SpaceMorph.Morph(geometry)
"""
pass
def Morph(self,geometry):
"""
Morph(self: SpaceMorph,geometry: GeometryBase) -> bool
Apply the space morph to geometry.
geometry: Geometry to morph.
Returns: true on success,false on failure.
"""
pass
def MorphPoint(self,point):
"""
MorphPoint(self: SpaceMorph,point: Point3d) -> Point3d
Morphs an Euclidean point. This method is abstract.
point: A point that will be morphed by this function.
Returns: Resulting morphed point.
"""
pass
PreserveStructure=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""true if the morph should be done in a way that preserves the structure of the geometry.
In particular,for NURBS objects,true means that only the control points are moved.
The PreserveStructure value does not affect the way meshes and points are morphed.
The default is false.
Get: PreserveStructure(self: SpaceMorph) -> bool
Set: PreserveStructure(self: SpaceMorph)=value
"""
QuickPreview=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""true if the morph should be done as quickly as possible because the result
is being used for some type of dynamic preview. If QuickPreview is true,
the tolerance may be ignored.
The QuickPreview value does not affect the way meshes and points are morphed.
The default is false.
Get: QuickPreview(self: SpaceMorph) -> bool
Set: QuickPreview(self: SpaceMorph)=value
"""
Tolerance=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The desired accuracy of the morph. This value is primarily used for deforming
surfaces and breps. The default is 0.0 and any value <= 0.0 is ignored by
morphing functions. The Tolerance value does not affect the way meshes and points
are morphed.
Get: Tolerance(self: SpaceMorph) -> float
Set: Tolerance(self: SpaceMorph)=value
"""
| 22.009709
| 92
| 0.691222
|
4a0e238addccb29368fc28c7b5f34130bd2cc862
| 1,678
|
py
|
Python
|
do_Fib.py
|
CraneJen/Python-Study-Srouce-code
|
ff4bbb5025a4bf8d4ebef587f31929ad2ea84f29
|
[
"Apache-2.0"
] | null | null | null |
do_Fib.py
|
CraneJen/Python-Study-Srouce-code
|
ff4bbb5025a4bf8d4ebef587f31929ad2ea84f29
|
[
"Apache-2.0"
] | null | null | null |
do_Fib.py
|
CraneJen/Python-Study-Srouce-code
|
ff4bbb5025a4bf8d4ebef587f31929ad2ea84f29
|
[
"Apache-2.0"
] | null | null | null |
class Fib(object):
def __init__(self, max):
self.a, self.b = 0, 1
self.max = max
def __iter__(self):
return self
def __next__(self):
self.a, self.b = self.b, self.a + self.b
if self.a > self.max:
raise StopIteration()
return self.a
class Fib1(object):
def __getitem__(self, n):
a, b = 1, 1
for x in range(n):
a, b = b, a + b
return a
class Fib2(object):
def __getitem__(self, n):
if isinstance(n, int):
a, b = 1, 1
for x in range(n):
a, b = b, a + b
return a
if isinstance(n, slice):
start = n.start
stop = n.stop
step = n.step
if step is None:
step = 1
if start is None:
start = 0
a, b = 1, 1
L_slice = []
L_step = []
for x in range(stop):
if x >= start:
L_slice.append(a)
a, b = b, a + b
for x in L_slice[::step]:
L_step.append(x)
return L_step
def fib1(max):
a, b = 0, 1
while b < max:
print(b, end=' ')
a, b = b, a + b
def fib2(max):
result = []
a, b = 0, 1
while b < max:
result.append(b)
a, b = b, a + b
return result
def fib3(max):
a, b = 0, 1
while max > 0:
print(b, end=' ')
a, b = b, a + b
max -= 1
# yield
def fib4(max):
a, b = 0, 1
while max > 0:
yield b
a, b = b, a + b
max -= 1
print([i for i in fib4(10)])
| 18.644444
| 48
| 0.407628
|
4a0e24eb44ba5877791a7d8bdf7e8640e6579496
| 2,203
|
py
|
Python
|
steps/run_psi4_calculations.py
|
jamesclark-Zapata/qe-psi4
|
2bd6aaff8e6d6bf3d6f7a33d420f49a67b10ba2c
|
[
"Apache-2.0"
] | 7
|
2020-04-16T23:27:15.000Z
|
2021-11-17T11:37:52.000Z
|
steps/run_psi4_calculations.py
|
jamesclark-Zapata/qe-psi4
|
2bd6aaff8e6d6bf3d6f7a33d420f49a67b10ba2c
|
[
"Apache-2.0"
] | 13
|
2020-05-12T13:09:15.000Z
|
2021-09-28T22:18:03.000Z
|
steps/run_psi4_calculations.py
|
jamesclark-Zapata/qe-psi4
|
2bd6aaff8e6d6bf3d6f7a33d420f49a67b10ba2c
|
[
"Apache-2.0"
] | 2
|
2021-01-15T23:28:37.000Z
|
2021-07-28T14:32:17.000Z
|
import os, json
from qepsi4 import run_psi4 as _run_psi4
from zquantum.core.openfermion import save_interaction_operator, save_interaction_rdm
from zquantum.core.utils import SCHEMA_VERSION
def run_psi4(
basis,
method,
reference,
geometry,
freeze_core=False,
charge=0,
multiplicity=1,
save_hamiltonian=False,
save_rdms=False,
n_active_extract="None",
n_occupied_extract="None",
freeze_core_extract=False,
nthreads=1,
options="None",
wavefunction="None",
):
os.mkdir("/app/scr")
os.environ["PSI_SCRATCH"] = "/app/scr"
if n_active_extract == "None":
n_active_extract = None
if n_occupied_extract == "None":
n_occupied_extract = None
if options == "None":
options = None
else:
if isinstance(options, str):
options = json.loads(options)
if wavefunction == "None":
wavefunction = None
with open(geometry) as f:
geometry = json.load(f)
res = _run_psi4(
geometry,
basis=basis,
multiplicity=multiplicity,
charge=charge,
method=method,
reference=reference,
freeze_core=freeze_core,
save_hamiltonian=save_hamiltonian,
save_rdms=save_rdms,
options=options,
n_active_extract=n_active_extract,
n_occupied_extract=n_occupied_extract,
freeze_core_extract=freeze_core_extract,
)
results = res["results"]
results["schema"] = SCHEMA_VERSION + "-energy_calc"
with open("energycalc-results.json", "w") as f:
f.write(json.dumps(results, indent=2))
hamiltonian = res.get("hamiltonian", None)
if hamiltonian is not None:
save_interaction_operator(hamiltonian, "hamiltonian.json")
rdms = res.get("rdms", None)
if rdms is not None:
save_interaction_rdm(rdms, "rdms.json")
with open("n_alpha.txt", "w") as f:
f.write(str(results["n_alpha"]))
with open("n_beta.txt", "w") as f:
f.write(str(results["n_beta"]))
with open("n_mo.txt", "w") as f:
f.write(str(results["n_mo"]))
with open("n_frozen_core.txt", "w") as f:
f.write(str(results["n_frozen_core"]))
| 26.865854
| 85
| 0.638675
|
4a0e25157fa6b768a5faeec0cc988be43f033d99
| 882
|
py
|
Python
|
test/test_registered_faces_response.py
|
vtpl1/vtpl_api
|
d289c92254deb040de925205c583de69802a1c6b
|
[
"MIT"
] | null | null | null |
test/test_registered_faces_response.py
|
vtpl1/vtpl_api
|
d289c92254deb040de925205c583de69802a1c6b
|
[
"MIT"
] | null | null | null |
test/test_registered_faces_response.py
|
vtpl1/vtpl_api
|
d289c92254deb040de925205c583de69802a1c6b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import vtpl_api
from vtpl_api.models.registered_faces_response import RegisteredFacesResponse # noqa: E501
from vtpl_api.rest import ApiException
class TestRegisteredFacesResponse(unittest.TestCase):
"""RegisteredFacesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRegisteredFacesResponse(self):
"""Test RegisteredFacesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = vtpl_api.models.registered_faces_response.RegisteredFacesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.05
| 99
| 0.716553
|
4a0e257d34cff78fb524977c887a613a2d78e44d
| 4,666
|
py
|
Python
|
tests/test_bad_batch_request.py
|
oreillymedia/django-batch-requests
|
ca3f126fdf1a9e7abb0d7de205fac2b6633de933
|
[
"MIT"
] | 1
|
2019-03-03T08:46:13.000Z
|
2019-03-03T08:46:13.000Z
|
tests/test_bad_batch_request.py
|
oreillymedia/django-batch-requests
|
ca3f126fdf1a9e7abb0d7de205fac2b6633de933
|
[
"MIT"
] | null | null | null |
tests/test_bad_batch_request.py
|
oreillymedia/django-batch-requests
|
ca3f126fdf1a9e7abb0d7de205fac2b6633de933
|
[
"MIT"
] | 2
|
2019-02-26T03:21:35.000Z
|
2021-08-03T06:17:35.000Z
|
'''
@author: Rahul Tanwani
@summary: Test cases to check the behavior when the batch request is
not constructed properly.
'''
import json
import mock
import pytest
from django.test import TestCase
from tests import ensure_text_content
class TestBadBatchRequest(TestCase):
'''
Check the behavior of bad batch request.
'''
def _batch_request(self, method, path, data, headers={}):
'''
Prepares a batch request.
'''
return {"url": path, "method": method, "headers": headers, "body": data}
def test_invalid_http_method(self):
'''
Make a batch request with invalid HTTP method.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([self._batch_request("select", "/views", "", {})]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method validation is broken!")
self.assertEqual(resp.text.lower(), "invalid request method.", "Method validation is broken!")
def test_missing_http_method(self):
'''
Make a batch request without HTTP method.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"body": "/views"}]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method & URL validation is broken!")
self.assertEqual(resp.text.lower(), "request definition should have url, method defined.", "Method validation is broken!")
def test_missing_url(self):
'''
Make a batch request without the URL.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"method": "get"}]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method & URL validation is broken!")
self.assertEqual(resp.text.lower(), "request definition should have url, method defined.",
"Method validation is broken!")
def test_invalid_batch_request(self):
'''
Make a batch request without wrapping in the list.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps({"method": "get", "url": "/views/"}),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Batch requests should always be in list.")
self.assertEqual(resp.text.lower(), "the body of batch request should always be list!",
"List validation is broken!")
def test_view_that_raises_exception(self):
'''
Make a batch request to a view that raises exception.
'''
with self.settings(DEBUG=True):
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([
{"method": "get", "url": "/exception/"},
{"method": "get", "url": "/rate-limited/"},
{"method": "get", "url": "/views/"},
]),
content_type="application/json"
)
)
assert resp.status_code == 200
responses = json.loads(resp.text)
expected = {
0: (500, 'exception'),
1: (429, 'rate limited'),
2: (200, 'success!'),
}
for index, expects in expected.items():
resp = responses[index]
exp_status, exp_explanation = expects
assert resp['status_code'] == exp_status
assert resp['body'].lower() == exp_explanation
@pytest.mark.regression
def test_exception_view_with_deserialization(self):
"""Make batch request to exception endpoint AND try to deserialize the response"""
with mock.patch('batch_requests.views._settings.DESERIALIZE_RESPONSES') as mock_des:
mock_des.return_value = True
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"method": "get", "url": "/exception/"}]),
content_type="application/json"
)
)
assert resp.status_code == 200
responses = json.loads(resp.text)
assert responses[0]['status_code'] == 500
| 33.811594
| 130
| 0.539863
|
4a0e25b5e90cee73f77a3bf4bf782308d16cc0c6
| 399
|
py
|
Python
|
oggm/core/gcm_climate.py
|
skachuck/oggm
|
b391e6923fb0c5269e10ea260f5199a26d5e1082
|
[
"BSD-3-Clause"
] | 156
|
2015-10-11T16:38:43.000Z
|
2022-03-24T04:19:16.000Z
|
oggm/core/gcm_climate.py
|
skachuck/oggm
|
b391e6923fb0c5269e10ea260f5199a26d5e1082
|
[
"BSD-3-Clause"
] | 953
|
2015-10-11T16:26:14.000Z
|
2022-03-27T23:19:19.000Z
|
oggm/core/gcm_climate.py
|
skachuck/oggm
|
b391e6923fb0c5269e10ea260f5199a26d5e1082
|
[
"BSD-3-Clause"
] | 92
|
2015-10-19T08:53:23.000Z
|
2022-03-28T08:00:17.000Z
|
"""Climate data pre-processing"""
import warnings
warnings.warn('The module `oggm.core.gcm_climate` has moved to '
'oggm.shop.gcm_climate. This compatibility module will be '
'removed in future OGGM versions', FutureWarning)
from oggm.shop.gcm_climate import (process_gcm_data, process_cmip5_data,
process_cesm_data, process_cmip_data)
| 49.875
| 73
| 0.684211
|
4a0e261810ca9faa6cb74e84a1f7e521e0631bb0
| 14,019
|
py
|
Python
|
libraries/categories/models.py
|
chris-lawton/libraries_wagtail
|
60c2484b137bb33763da2e49b191b1a380f3d56f
|
[
"ECL-2.0"
] | null | null | null |
libraries/categories/models.py
|
chris-lawton/libraries_wagtail
|
60c2484b137bb33763da2e49b191b1a380f3d56f
|
[
"ECL-2.0"
] | null | null | null |
libraries/categories/models.py
|
chris-lawton/libraries_wagtail
|
60c2484b137bb33763da2e49b191b1a380f3d56f
|
[
"ECL-2.0"
] | null | null | null |
from django.conf import settings
from django.db import models
from django import forms
from django.shortcuts import redirect, render
from modelcluster.fields import ParentalKey
from wagtail.core.models import Page, Orderable
from wagtail.core.fields import RichTextField, StreamField
from wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, InlinePanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.search import index
from wagtail.core.blocks import ChoiceBlock, StructBlock, StreamBlock, CharBlock, FieldBlock, RichTextBlock, TextBlock, RawHTMLBlock, URLBlock
from wagtail.images.blocks import ImageChooserBlock
# we don't use this right now but it's here waiting to be added
# to ImageBlock() if need be
class ImageFormatChoiceBlock(FieldBlock):
field = forms.ChoiceField(choices=(
('left', 'Wrap left'),
('right', 'Wrap right'),
('mid', 'Mid width'),
('full', 'Full width'),
))
class ImageBlock(StructBlock):
image = ImageChooserBlock()
caption = RichTextBlock(features=settings.RICHTEXT_BASIC, required=False)
# alignment = ImageFormatChoiceBlock()
class Meta:
icon = "image"
template = "categories/blocks/image.html"
class LinkedImageBlock(StructBlock):
image = ImageChooserBlock()
caption = RichTextBlock(features=settings.RICHTEXT_BASIC, required=False)
# alignment = ImageFormatChoiceBlock()
link = URLBlock()
class Meta:
icon = "link"
template = "categories/blocks/linked-image.html"
class PullQuoteBlock(StructBlock):
quote = TextBlock("quote title")
name = CharBlock(required=False)
position = CharBlock(required=False, label="Position or affiliation")
class Meta:
icon = "openquote"
template = "categories/blocks/quote.html"
# no need for a template as raw HTML is what we want
class EmbedHTML(RawHTMLBlock):
html = RawHTMLBlock(
"Embed code or raw HTML",
help_text='Use this sparingly, if possible.',
)
# two blocks combined in one row
class RowBlock(StreamBlock):
distribution = ChoiceBlock(
blank=False,
choices=(
('left', 'left side bigger'),
('right', 'right side bigger'),
('equal', 'equal size sides'),
),
min_num=1,
max_num=1,
)
paragraph = RichTextBlock(
features=settings.RICHTEXT_ADVANCED,
template="categories/blocks/paragraph.html",
icon="pilcrow",
)
image = ImageBlock()
linked_image = LinkedImageBlock()
pullquote = PullQuoteBlock()
# questionable that this should be advanced HTML but we use callouts a lot
snippet = RichTextBlock(
features=settings.RICHTEXT_ADVANCED,
label="Callout",
template="categories/blocks/snippet.html")
class Meta:
help_text = "First child block is given 40% of the row width while the 2nd gets 60%."
icon = 'form'
template = "categories/blocks/row.html"
class BaseStreamBlock(StreamBlock):
subheading = CharBlock(
icon="title",
classname="title",
template="categories/blocks/subheading.html"
)
paragraph = RichTextBlock(
features=settings.RICHTEXT_ADVANCED,
template="categories/blocks/paragraph.html",
icon="pilcrow",
)
image = ImageBlock()
linked_image = LinkedImageBlock()
pullquote = PullQuoteBlock()
snippet = RichTextBlock(label="Callout", template="categories/blocks/snippet.html")
html = EmbedHTML(label="Embed code")
row = RowBlock(max_num=3)
# AboutUsPage has a much simpler template
class AboutUsStreamBlock(StreamBlock):
paragraph = RichTextBlock(
features=settings.RICHTEXT_ADVANCED,
icon="pilcrow",
)
# helper method—for child pages, return their category i.e. parent CategoryPage
# one of: services, collections, about us
def get_category(page):
return page.get_ancestors().type(CategoryPage).first()
class CategoryPage(Page):
parent_page_types = ['home.HomePage']
subpage_types = [
'categories.RowComponent',
'categories.AboutUsPage',
]
# add child RowComponent(s) to context
def get_context(self, request):
# if this is a preview of a draft RowComponent, include the draft
if request.GET.get('DRAFT'):
context = self.get_context(request)
rows = self.get_children()
else:
context = super(CategoryPage, self).get_context(request)
rows = self.get_children().live()
context['rows'] = rows
return context
# reuses blocks from the BlogPage template
class ServicePage(Page):
parent_page_types = [
'categories.RowComponent',
'categories.ServicePage',
'categories.AboutUsPage',
'categories.SpecialCollectionsPage',
]
subpage_types = [
'categories.ServicePage',
'categories.AboutUsPage',
'categories.SpecialCollectionsPage',
]
main_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.PROTECT,
related_name='+',
help_text='Displays 404px wide on page with a preserved aspect ratio. If this page shows in one of the Services/Collections/About rows, a thumbnail close to 230x115px is generated.'
)
staff = models.ForeignKey(
'staff.StaffMember',
blank=True,
help_text='Optional associated staff member',
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
display_staff_card = models.BooleanField(default=False, help_text='Display a small "card" showing contact information for the associated staff member.')
body = StreamField(
BaseStreamBlock(),
verbose_name='Page content',
null=True,
)
order = models.IntegerField(
default=1,
help_text='Defines the sort order in the parent row (lower numbers go first).',
)
search_fields = Page.search_fields + [ index.SearchField('body') ]
def category(self):
return get_category(self)
class Meta:
ordering = ["order", "-last_published_at"]
verbose_name = 'Complex text page'
content_panels = Page.content_panels + [
ImageChooserPanel('main_image'),
FieldRowPanel(
(SnippetChooserPanel('staff'), FieldPanel('display_staff_card'),)
),
StreamFieldPanel('body'),
]
promote_panels = Page.promote_panels + [
FieldPanel('order')
]
# does not have a matching template, should never be visited on its own
# but only used as a component of a CategoryPage
# also since the RowComponent is never directly rendered we can't use its
# get_context() method to retrieve child pages, that has to be done in template
class RowComponent(Page):
parent_page_types = ['categories.CategoryPage']
subpage_types = [
'categories.ServicePage',
'categories.AboutUsPage',
'categories.SpecialCollectionsPage',
'categories.ExternalLink',
'staff.StaffListPage',
'hours.HoursPage',
]
summary = RichTextField(features=settings.RICHTEXT_BASIC)
# do not index for search
search_fields = []
# no need for a promote tab since slug & search_desc aren't used
promote_panels = []
content_panels = Page.content_panels + [
FieldPanel('summary'),
]
def category(self):
return get_category(self)
# if a row is requested, redirect to its parent CategoryPage instead
def serve(self, request):
parent = self.get_parent()
return redirect(parent.url)
# rendering drafts is complicated, we need to let the parent know to
# include draft RowComponents in its context
def serve_preview(self, request, mode_name):
parent = self.get_parent()
request.GET = request.GET.copy()
request.GET['DRAFT'] = True
ctx = CategoryPage.get_context(parent, request)
return render(request, 'categories/category_page.html', context=ctx)
# Another child of RowComponent but with a very different structure & template
class SpecialCollectionsPage(Page):
parent_page_types = [
'categories.RowComponent',
'categories.ServicePage',
'categories.AboutUsPage',
'categories.SpecialCollectionsPage',
]
subpage_types = [
'categories.ServicePage',
'categories.AboutUsPage',
'categories.SpecialCollectionsPage',
]
order = models.IntegerField(
default=1,
help_text='Defines the sort order in the parent row (lower numbers go first).',
)
# needs an orderable struct of some sort which contains a title, richtext blurb,
# link to the external collection, and feature image _at least_
content_panels = Page.content_panels + [
InlinePanel('special_collections', label='Special Collection')
]
promote_panels = Page.promote_panels + [
FieldPanel('order')
]
# for search results—treat first SpecialCollection image as the page's image
@property
def main_image(self):
return self.specific.special_collections.first().image
def category(self):
return get_category(self)
class Meta:
ordering = ["order", "-last_published_at"]
# make page searchable by text of child special collections
search_fields = Page.search_fields + [
index.RelatedFields('special_collections', [
index.SearchField('title'),
index.SearchField('blurb'),
]),
]
class SpecialCollection(Orderable):
page = ParentalKey(SpecialCollectionsPage, related_name='special_collections')
title = models.CharField(max_length=255)
blurb = RichTextField(features=settings.RICHTEXT_BASIC)
# URLField lets this link be either internal or external
# Per Teri on 2017-08-09: some Spaces on a SpecColl page have no links
link = models.URLField(blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
help_text='Close to a 2.25-by-1 aspect ratio is bst, image is sized to 910x400px at its largest.',
null=True,
blank=True,
on_delete=models.CASCADE,
related_name='+'
)
panels = [
FieldPanel('title'),
FieldPanel('blurb'),
FieldPanel('link'),
ImageChooserPanel('image'),
]
# ServicePage & AboutUsPage are two different templates for the same
# sort of grandchild content (CategoryPage > RowComponent > Service/AboutUsPage)
class AboutUsPage(Page):
parent_page_types = [
'categories.RowComponent',
'categories.ServicePage',
'categories.AboutUsPage',
'categories.SpecialCollectionsPage',
]
subpage_types = [
'categories.ServicePage',
'categories.AboutUsPage',
'categories.SpecialCollectionsPage',
]
display_staff_card = models.BooleanField(default=False, help_text='Display a small "card" showing contact information for the associated staff member.')
body = StreamField(
AboutUsStreamBlock(),
verbose_name='Page content',
null=True,
)
main_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.PROTECT,
related_name='+',
help_text='Displays 404px wide on page with a preserved aspect ratio. If this page shows in one of the Services/Collections/About rows, a thumbnail close to 230x115px is generated.',
)
staff = models.ForeignKey(
'staff.StaffMember',
blank=True,
help_text='Optional associated staff member',
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
order = models.IntegerField(
default=1,
help_text='Defines the sort order in the parent row (lower numbers go first).',
)
search_fields = Page.search_fields + [ index.SearchField('body') ]
content_panels = Page.content_panels + [
ImageChooserPanel('main_image'),
FieldRowPanel(
(SnippetChooserPanel('staff'), FieldPanel('display_staff_card'),)
),
StreamFieldPanel('body'),
]
promote_panels = Page.promote_panels + [
FieldPanel('order')
]
def category(self):
return get_category(self)
class Meta:
ordering = ["order", "-last_published_at"]
verbose_name = 'Simple text page'
class ExternalLink(Page):
# only used for linking items in a row to external locations
parent_page_types = [
'categories.RowComponent',
]
# external link goes off the site, cannot have children
subpage_types = []
link = models.URLField(blank=False)
staff = models.ForeignKey(
'staff.StaffMember',
blank=True,
help_text='Optional associated staff member',
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
main_image = models.ForeignKey(
'wagtailimages.Image',
blank=True,
help_text='If this page shows in one of the Services/Collections/About rows, a thumbnail close to 230x115px is generated.',
null=True,
on_delete=models.PROTECT,
related_name='+',
)
order = models.IntegerField(
default=1,
help_text='Defines the sort order in the parent row (lower numbers go first).',
)
# no need for a promote, search_desc is on content & slug isn't used
promote_panels = [ FieldPanel('order') ]
content_panels = Page.content_panels + [
FieldPanel('link'),
FieldPanel('search_description'),
ImageChooserPanel('main_image'),
SnippetChooserPanel('staff'),
]
# redirect to external URL
def serve(self, request):
return redirect(self.link)
class Meta:
ordering = ["order", "-last_published_at"]
| 31.933941
| 190
| 0.663742
|
4a0e26b7fa2e426b494eb4c30afc58e83db7dc7b
| 1,140
|
py
|
Python
|
twistedlilypad/packets/request_packet.py
|
flaminscotsman/TwistedLilypad
|
3d43250af2663b33e3fef209a196920beaf696b7
|
[
"MIT"
] | 1
|
2015-01-17T20:01:33.000Z
|
2015-01-17T20:01:33.000Z
|
twistedlilypad/packets/request_packet.py
|
flaminscotsman/TwistedLilypad
|
3d43250af2663b33e3fef209a196920beaf696b7
|
[
"MIT"
] | null | null | null |
twistedlilypad/packets/request_packet.py
|
flaminscotsman/TwistedLilypad
|
3d43250af2663b33e3fef209a196920beaf696b7
|
[
"MIT"
] | null | null | null |
from struct import unpack_from, calcsize, pack
from .abstract_packet import AbstractPacket, AbstractPacketCodec
class PacketRequest(AbstractPacket):
opcode = 0x01
def __init__(self, sequence_id, request_id, payload):
self.sequenceID = sequence_id
self.requestID = request_id
self.payload = payload
@property
def payloadSize(self):
return len(self.payload)
def __eq__(self, other):
if not isinstance(other, PacketRequest):
return NotImplemented
return self.sequenceID == other.sequenceID and \
self.requestID == other.requestID and \
self.payload == other.payload
class PacketRequestCodec(AbstractPacketCodec):
@staticmethod
def encode(packet):
assert isinstance(packet, PacketRequest)
return pack('>iBH', packet.sequenceID, packet.requestID, packet.payloadSize) + packet.payload
@staticmethod
def decode(payload):
sequenceID, requestID, payloadSize = unpack_from('>iBH', payload)
payload = payload[calcsize('>iBH'):]
return PacketRequest(sequenceID, requestID, payload)
| 30
| 101
| 0.686842
|
4a0e2793aa826c9f3bf2ae0b03422b05b5a71af9
| 540
|
py
|
Python
|
manage.py
|
didid08/flashcms-django
|
471523168964c0124fb0091cccbda4f4b234be87
|
[
"MIT"
] | null | null | null |
manage.py
|
didid08/flashcms-django
|
471523168964c0124fb0091cccbda4f4b234be87
|
[
"MIT"
] | null | null | null |
manage.py
|
didid08/flashcms-django
|
471523168964c0124fb0091cccbda4f4b234be87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'flashcms.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.75
| 73
| 0.687037
|
4a0e28ade08bd5335f323778ed98d14b2ab68b28
| 2,551
|
py
|
Python
|
tasksapi/tests/requests_tests/user_queue_permissions_requests_tests.py
|
mwiens91/saltant
|
9e72175a896f5859ada304ad3ae4d84dfc3834db
|
[
"MIT"
] | 3
|
2018-12-08T01:18:29.000Z
|
2018-12-14T23:18:42.000Z
|
tasksapi/tests/requests_tests/user_queue_permissions_requests_tests.py
|
saltant-org/saltant
|
db498a1186fc74221f8214ad1819dd03bf4b08ac
|
[
"MIT"
] | 3
|
2019-05-23T07:43:13.000Z
|
2021-06-10T20:46:53.000Z
|
tasksapi/tests/requests_tests/user_queue_permissions_requests_tests.py
|
saltant-org/saltant
|
db498a1186fc74221f8214ad1819dd03bf4b08ac
|
[
"MIT"
] | 2
|
2019-03-13T22:31:09.000Z
|
2019-05-03T00:18:30.000Z
|
"""Contains requests tests for user queue permissions."""
from rest_framework import status
from rest_framework.test import APITestCase
# Put info about our fixtures data as constants here
NON_ADMIN_USER_AUTH_TOKEN = "02d205bc79d5e8f15f83e249ac227ef0085f953f"
NOT_USERS_PRIVATE_QUEUE_PK = 3
PUBLIC_INACTIVE_QUEUE_PK = 2
CONTAINER_TASK_TYPE_PK = 1
EXECUTABLE_TASK_TYPE_PK = 1
class UserQueuePermissionsRequestsTests(APITestCase):
"""Test user queue permissions."""
fixtures = ["test-fixture.yaml"]
def setUp(self):
"""Add in user's auth to client."""
self.client.credentials(
HTTP_AUTHORIZATION="Token " + NON_ADMIN_USER_AUTH_TOKEN
)
def test_posting_to_inactive_queue(self):
"""Test posting a job to an inactive queue."""
post_response_1 = self.client.post(
"/api/containertaskinstances/",
dict(
name="my-task-instance",
task_type=CONTAINER_TASK_TYPE_PK,
task_queue=PUBLIC_INACTIVE_QUEUE_PK,
),
format="json",
)
post_response_2 = self.client.post(
"/api/executabletaskinstances/",
dict(
name="my-task-instance",
task_type=EXECUTABLE_TASK_TYPE_PK,
task_queue=PUBLIC_INACTIVE_QUEUE_PK,
),
format="json",
)
self.assertEqual(
post_response_1.status_code, status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
post_response_2.status_code, status.HTTP_400_BAD_REQUEST
)
def test_posting_to_other_users_private_queue(self):
"""Test posting a job to another user's private queue."""
post_response_1 = self.client.post(
"/api/containertaskinstances/",
dict(
name="my-task-instance",
task_type=CONTAINER_TASK_TYPE_PK,
task_queue=NOT_USERS_PRIVATE_QUEUE_PK,
),
format="json",
)
post_response_2 = self.client.post(
"/api/executabletaskinstances/",
dict(
name="my-task-instance",
task_type=EXECUTABLE_TASK_TYPE_PK,
task_queue=NOT_USERS_PRIVATE_QUEUE_PK,
),
format="json",
)
self.assertEqual(
post_response_1.status_code, status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
post_response_2.status_code, status.HTTP_400_BAD_REQUEST
)
| 31.8875
| 70
| 0.609957
|
4a0e29aa5f3bcf54b6d3c34e13dd38882b5ec3a7
| 3,103
|
py
|
Python
|
achallonge/tournaments.py
|
retke/apychal
|
04ed960610680959c3edab283ea7046bfb15ff53
|
[
"BSD-2-Clause"
] | null | null | null |
achallonge/tournaments.py
|
retke/apychal
|
04ed960610680959c3edab283ea7046bfb15ff53
|
[
"BSD-2-Clause"
] | null | null | null |
achallonge/tournaments.py
|
retke/apychal
|
04ed960610680959c3edab283ea7046bfb15ff53
|
[
"BSD-2-Clause"
] | null | null | null |
from achallonge import api
import asyncio
async def index(**params):
"""Retrieve a set of tournaments created with your account."""
return await api.fetch_and_parse("GET", "tournaments", **params)
async def create(name, url, tournament_type="single elimination", **params):
"""Create a new tournament."""
params.update({
"name": name,
"url": url,
"tournament_type": tournament_type,
})
return await api.fetch_and_parse("POST", "tournaments", "tournament", **params)
async def show(tournament, **params):
"""Retrieve a single tournament record created with your account."""
return await api.fetch_and_parse("GET", f"tournaments/{tournament}", **params)
async def update(tournament, **params):
"""Update a tournament's attributes."""
await api.fetch("PUT", f"tournaments/{tournament}", "tournament", **params)
async def destroy(tournament):
"""Deletes a tournament along with all its associated records.
There is no undo, so use with care!
"""
await api.fetch("DELETE", f"tournaments/{tournament}")
async def process_check_ins(tournament, **params):
"""This should be invoked after a tournament's
check-in window closes before the tournament is started.
1) Marks participants who have not checked in as inactive.
2) Moves inactive participants to bottom seeds (ordered by original seed).
3) Transitions the tournament state from 'checking_in' to 'checked_in'
"""
return await api.fetch_and_parse(
"POST",
f"tournaments/{tournament}/process_check_ins",
**params)
async def abort_check_in(tournament, **params):
"""When your tournament is in a 'checking_in' or 'checked_in' state,
there's no way to edit the tournament's start time (start_at)
or check-in duration (check_in_duration).
You must first abort check-in, then you may edit those attributes.
1) Makes all participants active and clears their checked_in_at times.
2) Transitions the tournament state from 'checking_in' or 'checked_in' to 'pending'
"""
return await api.fetch_and_parse(
"POST",
f"tournaments/{tournament}/abort_check_in" ,
**params)
async def start(tournament, **params):
"""Start a tournament, opening up matches for score reporting.
The tournament must have at least 2 participants.
"""
return await api.fetch_and_parse(
"POST",
f"tournaments/{tournament}/start",
**params)
async def finalize(tournament, **params):
"""Finalize a tournament that has had all match scores submitted,
rendering its results permanent.
"""
return await api.fetch_and_parse(
"POST",
f"tournaments/{tournament}/finalize",
**params)
async def reset(tournament, **params):
"""Reset a tournament, clearing all of its scores and attachments.
You can then add/remove/edit participants before starting the
tournament again.
"""
return await api.fetch_and_parse(
"POST",
f"tournaments/{tournament}/reset",
**params)
| 29.552381
| 87
| 0.680309
|
4a0e29c10ef1143a53fe49a820f48f639722633e
| 1,876
|
py
|
Python
|
tests/contract_tests/KT1Sc2dHqCRLQV9gWLmUndQyowVrv12AtcJi/test_sc2dhq_sendFunds.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 98
|
2019-02-07T16:33:38.000Z
|
2022-03-31T15:53:41.000Z
|
tests/contract_tests/KT1Sc2dHqCRLQV9gWLmUndQyowVrv12AtcJi/test_sc2dhq_sendFunds.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 152
|
2019-05-20T16:38:56.000Z
|
2022-03-30T14:24:38.000Z
|
tests/contract_tests/KT1Sc2dHqCRLQV9gWLmUndQyowVrv12AtcJi/test_sc2dhq_sendFunds.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 34
|
2019-07-25T12:03:51.000Z
|
2021-11-11T22:23:38.000Z
|
from unittest import TestCase
from os.path import dirname, join
import json
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.types.big_map import big_map_diff_to_lazy_diff
from pytezos.michelson.forge import forge_micheline, unforge_micheline
folder = 'dexter_usdtz_xtz'
entrypoint = 'removeLiquidity'
class MainnetOperationTestCaseSC2DHQ(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'sendFunds.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'sendFunds'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_sc2dhq(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_sc2dhq(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_diff = big_map_diff_to_lazy_diff(self.operation['big_map_diff'])
extended_storage = storage.merge_lazy_diff(lazy_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| 39.083333
| 112
| 0.722281
|
4a0e29e5680b3f1ba7e708db12e2ecb892aada81
| 2,189
|
py
|
Python
|
code/models/SegHR_UperNet_OCR_DA.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | 1
|
2021-11-16T12:24:43.000Z
|
2021-11-16T12:24:43.000Z
|
code/models/SegHR_UperNet_OCR_DA.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | null | null | null |
code/models/SegHR_UperNet_OCR_DA.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Ke Sun (sunk@mail.ustc.edu.cn), Jingyi Xie (hsfzxjy@gmail.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
from code.models.backbone import HighResolutionNet
from code.lufangxiao.module.OCR import OCR_Module
from code.models.decoder.UperNet_Decoder import UperNet_OCR_Decoder
class SegHR_UperNet_OCR_DA(nn.Module):
def __init__(self, in_ch, n_classes, backbone='hr-w32'):
super(SegHR_UperNet_OCR_DA, self).__init__()
self.backbone = HighResolutionNet(in_ch, backbone=backbone)
self.filters = self.backbone.get_filters()
self.decoder = UperNet_OCR_Decoder(self.filters)
self.ocr = OCR_Module(n_classes, self.decoder.last_output_chs)
def init_weights(self, pretrained=''):
for _, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
self.backbone.init_weights(pretrained=pretrained)
elif pretrained:
raise RuntimeError('No such file {}'.format(pretrained))
def forward(self, input):
x = self.backbone(input)
feats = self.decoder(x)
out = self.ocr(feats)
out = [F.interpolate(o, size=input.shape[2:], mode='bilinear', align_corners=True) for o in out]
return out, x
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
input = torch.autograd.Variable(torch.randn(1, 3, 1500, 1500)).to(device)
net = SegHR_UperNet_OCR_DA(3, 17).to(device)
net.eval()
out, x = net(input)
for o in out:
print(o.size())
for f in x:
print(f.size())
| 37.741379
| 104
| 0.622202
|
4a0e29ea135496b331260401275cb31a3106b4ce
| 8,949
|
py
|
Python
|
themes/default/base16-black-metal-bathory.config.py
|
pltanton/base16-qutebrowser
|
dff61f51e10906491034e15f6b78440ce06dbc1a
|
[
"MIT"
] | null | null | null |
themes/default/base16-black-metal-bathory.config.py
|
pltanton/base16-qutebrowser
|
dff61f51e10906491034e15f6b78440ce06dbc1a
|
[
"MIT"
] | null | null | null |
themes/default/base16-black-metal-bathory.config.py
|
pltanton/base16-qutebrowser
|
dff61f51e10906491034e15f6b78440ce06dbc1a
|
[
"MIT"
] | null | null | null |
# base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# Black Metal (Bathory) scheme by metalelf0 (https://github.com/metalelf0)
base00 = "#000000"
base01 = "#121212"
base02 = "#222222"
base03 = "#333333"
base04 = "#999999"
base05 = "#c1c1c1"
base06 = "#999999"
base07 = "#c1c1c1"
base08 = "#5f8787"
base09 = "#aaaaaa"
base0A = "#e78a53"
base0B = "#fbcb97"
base0C = "#aaaaaa"
base0D = "#888888"
base0E = "#999999"
base0F = "#444444"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base01
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base05
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base02
# Top border color of the selected completion item.
c.colors.completion.item.selected.border.top = base02
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base02
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base0B
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color of disabled items in the context menu.
c.colors.contextmenu.disabled.bg = base01
# Foreground color of disabled items in the context menu.
c.colors.contextmenu.disabled.fg = base04
# Background color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.bg = base00
# Foreground color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.fg = base05
# Background color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.bg = base02
#Foreground color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.fg = base05
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base02
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base01
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base01
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0C
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base07
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base07
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base02
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base05
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base02
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base05
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base05
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base02
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base05
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base02
# Background color for webpages if unset (or empty to use the theme's
# color).
# c.colors.webpage.bg = base00
| 30.030201
| 95
| 0.771259
|
4a0e2a100d48173034d66cf381396e75cfb3c81e
| 730
|
py
|
Python
|
profiles_api/permissions.py
|
carlosmertens/profile-rest-api
|
ff217242e178c5f47ea162f4f0808326c37d1b5d
|
[
"MIT"
] | null | null | null |
profiles_api/permissions.py
|
carlosmertens/profile-rest-api
|
ff217242e178c5f47ea162f4f0808326c37d1b5d
|
[
"MIT"
] | null | null | null |
profiles_api/permissions.py
|
carlosmertens/profile-rest-api
|
ff217242e178c5f47ea162f4f0808326c37d1b5d
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""Allow users to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to edit own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow users to update their own status"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to update own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| 29.2
| 56
| 0.684932
|
4a0e2b1659cfb1733187c1ad465725883d0996b8
| 6,536
|
py
|
Python
|
interactiveCycloidal/fusionUtils/__init__.py
|
kizmit99/cycloidal_generator
|
93e1381c21c6ca9fe50165022072fc41706b07d1
|
[
"Unlicense"
] | 89
|
2018-10-28T10:29:06.000Z
|
2022-03-18T13:18:58.000Z
|
fusionUtils/__init__.py
|
colinmsnow/fusion_API_template
|
009f2f5258fc0ffe452796002c31a1b146442c96
|
[
"Unlicense"
] | 5
|
2019-07-25T06:53:05.000Z
|
2021-11-19T01:57:34.000Z
|
fusionUtils/__init__.py
|
colinmsnow/fusion_API_template
|
009f2f5258fc0ffe452796002c31a1b146442c96
|
[
"Unlicense"
] | 24
|
2018-10-28T06:17:07.000Z
|
2022-03-31T00:51:57.000Z
|
""" Utilities to handle the process of accepting inputs from Fusion.
The process is rather complicated and confusing so much of it has
been relocated here to prevent confusion """
import adsk.core
import adsk.fusion
import traceback
import math
class CommandExecuteHandler(adsk.core.CommandEventHandler):
""" Executes the reading of parameters and the building of the object """
def __init__(self, app, ui, object_class, input_parameters):
super().__init__()
self.object_class = object_class
self.app = app
self.parameters = input_parameters
self.ui = ui
def notify(self, args):
""" Builds the object from the given inputs """
try:
units_mgr = self.app.activeProduct.unitsManager
command = args.firingEvent.sender
inputs = command.commandInputs
for current_input in inputs:
test_parameter = self.parameters.parameter_dict[current_input.id]
self.object_class.parameters[current_input.id] =\
units_mgr.evaluateExpression(current_input.expression, test_parameter.units)
self.object_class.build(self.app, self.ui)
args.isValidResult = True
except:
if self.ui:
self.ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class CommandDestroyHandler(adsk.core.CommandEventHandler):
""" Terminates the script cleanly """
def __init__(self, ui):
super().__init__()
self.ui = ui
def notify(self, args):
try:
# when the command is done, terminate the script
# this will release all globals which will remove all event handlers
adsk.terminate()
except:
if self.ui:
self.ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class CommandCreatedHandler(adsk.core.CommandCreatedEventHandler):
""" Called by run to create the object. Calls the execute handler """
def __init__(self, app, ui, object_class, input_parameters, handlers):
super().__init__()
self.object_class = object_class
self.app = app
self.parameters = input_parameters
self.ui = ui
self.handlers = handlers
def notify(self, args):
""" Runs other handlers to create the object """
try:
cmd = args.command
cmd.isRepeatable = False
onExecute = CommandExecuteHandler(self.app, self.ui, self.object_class, self.parameters)
cmd.execute.add(onExecute)
onExecutePreview = CommandExecuteHandler(self.app, self.ui, self.object_class, self.parameters)
cmd.executePreview.add(onExecutePreview)
onDestroy = CommandDestroyHandler(self.ui)
cmd.destroy.add(onDestroy)
# keep the handler referenced beyond this function
self.handlers.append(onExecute)
self.handlers.append(onExecutePreview)
self.handlers.append(onDestroy)
#define the inputs
inputs = cmd.commandInputs
for parameter in self.parameters.parameter_list:
init_value = adsk.core.ValueInput.createByReal(parameter.default_value)
inputs.addValueInput(parameter.id, parameter.description, parameter.units, init_value)
except:
if self.ui:
self.ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class Parameter:
""" A container for all parameters needed to create an input field """
def __init__(self, name, units, description, default_value):
self.id = name
self.units = units
self.description = description
self.default_value = default_value
class Parameters:
""" A container to hold parameters for input initialization """
def __init__(self):
""" Store a list of objects and a dictionary of object pointers
for easy indexing by name and iterating """
self.parameter_list = []
self.parameter_dict = {}
def addParameter(self, name, units, description, default_value):
""" Add a parameter to the input box for the module.
name: the varuable name that will hold the valie
units: the units that the value will be converted to. "" for unitless
description: the text which will appear with the box
default_value: the initial value that will appear before being edited """
new_param = Parameter(name, units, description, default_value)
self.parameter_list.append(new_param)
self.parameter_dict[name] = new_param
def createNewComponent(app):
""" Create a new component in the active design """
# Get the active design.
product = app.activeProduct
design = adsk.fusion.Design.cast(product)
root_comp = design.rootComponent
all_occs = root_comp.occurrences
new_occ = all_occs.addNewComponent(adsk.core.Matrix3D.create())
return new_occ.component
def run(parameters, default_name, createdObject):
""" The default function run by Fusion """
handlers = []
app = adsk.core.Application.get()
if app:
ui = app.userInterface
try:
product = app.activeProduct
design = adsk.fusion.Design.cast(product)
if not design:
ui.messageBox('It is not supported in current workspace, please change to MODEL workspace and try again.')
return
command_definitions = ui.commandDefinitions
#check the command exists or not
cmd_def = command_definitions.itemById(default_name)
if not cmd_def:
cmd_def = command_definitions.addButtonDefinition(default_name,
'Create ' + default_name,
'Create a' + default_name,
'') # Edit last parameter to provide resources
on_command_created = CommandCreatedHandler(app, ui, createdObject, parameters, handlers)
cmd_def.commandCreated.add(on_command_created)
# keep the handler referenced beyond this function
handlers.append(on_command_created)
inputs = adsk.core.NamedValues.create()
cmd_def.execute(inputs)
# prevent this module from being terminate when the script returns,
# because we are waiting for event handlers to fire
adsk.autoTerminate(False)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
| 37.563218
| 118
| 0.650245
|
4a0e2be3e28a843dd72955de73e9f94f1ab23ff3
| 3,096
|
py
|
Python
|
losses/loss.py
|
Tuxianeer/generalizationconfusion
|
e00e24d6fdf370f085c1c83b749d2560a0a0d89f
|
[
"MIT"
] | 4
|
2020-07-01T03:31:55.000Z
|
2020-10-16T14:47:08.000Z
|
losses/loss.py
|
Tuxianeer/generalizationconfusion
|
e00e24d6fdf370f085c1c83b749d2560a0a0d89f
|
[
"MIT"
] | null | null | null |
losses/loss.py
|
Tuxianeer/generalizationconfusion
|
e00e24d6fdf370f085c1c83b749d2560a0a0d89f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn import functional as F
class CrossEntropy():
def __init__(self):
self.crit = nn.CrossEntropyLoss()
def __call__(self, logits, targets, index, epoch, update_labels = True):
loss = self.crit(logits, targets)
return loss
class SelfAdaptiveTrainingCE():
def __init__(self, labels, device, num_classes=10, momentum=0.9, es=40):
# initialize soft labels to onthot vectors
self.soft_labels = torch.zeros(labels.shape[0], num_classes, dtype=torch.float, device=device)
self.soft_labels[torch.arange(labels.shape[0]), labels] = 1
self.momentum = momentum
self.es = es
def __call__(self, logits, targets, index, epoch, orig_logits, update_labels, args):
if epoch < self.es:
return F.cross_entropy(logits, targets)
# obtain prob, then update running avg
if args.aggressive:
prob = F.softmax(orig_logits.detach(), dim=1)
else:
prob = F.softmax(logits.detach(), dim=1)
if update_labels:
self.soft_labels[index] = self.momentum * self.soft_labels[index] + (1 - self.momentum) * prob
# obtain weights
weights, _ = self.soft_labels[index].max(dim=1)
weights *= logits.shape[0] / weights.sum()
# compute cross entropy loss, without reduction
loss = torch.sum(-F.log_softmax(logits, dim=1) * self.soft_labels[index], dim=1)
# sample weighted mean
loss = (loss * weights).mean()
return loss
class SelfAdaptiveTrainingSCE():
def __init__(self, labels, device, num_classes=10, momentum=0.9, es=40, alpha=1, beta=0.3):
# initialize soft labels to onthot vectors
self.soft_labels = torch.zeros(labels.shape[0], num_classes, dtype=torch.float, device=device)
self.soft_labels[torch.arange(labels.shape[0]), labels] = 1
self.momentum = momentum
self.es = es
self.alpha = alpha
self.beta = beta
print("alpha = {}, beta = {}".format(alpha, beta))
def __call__(self, logits, targets, index, epoch, update_labels = True):
if epoch < self.es:
return F.cross_entropy(logits, targets)
# obtain prob, then update running avg
prob = F.softmax(logits, dim=1)
if update_labels:
self.soft_labels[index] = self.momentum * self.soft_labels[index] + (1 - self.momentum) * prob.detach()
# obtain weights based largest and second largest prob
weights, _ = self.soft_labels[index].max(dim=1)
weights *= logits.shape[0] / weights.sum()
# use symmetric cross entropy loss, without reduction
loss = - self.alpha * torch.sum(self.soft_labels[index] * torch.log(prob), dim=-1) \
- self.beta * torch.sum(prob * torch.log(self.soft_labels[index]), dim=-1)
# sample weighted mean
loss = (loss * weights).mean()
return loss
| 38.7
| 116
| 0.607881
|
4a0e2c7e7f70ae807faaaa3e3a29cfcdcb20cf93
| 3,343
|
py
|
Python
|
napari/layers/shapes/_shapes_models/path.py
|
SaraLatif99/napari
|
b17235ee77d30e58492368a73d7c8d8189397fa4
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/shapes/_shapes_models/path.py
|
SaraLatif99/napari
|
b17235ee77d30e58492368a73d7c8d8189397fa4
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/shapes/_shapes_models/path.py
|
SaraLatif99/napari
|
b17235ee77d30e58492368a73d7c8d8189397fa4
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from xml.etree.ElementTree import Element
from .shape import Shape
from .._shapes_utils import create_box
class Path(Shape):
"""Class for a single path, which is a sequence of line segments.
Parameters
----------
data : np.ndarray
NxD array of vertices specifying the path.
edge_width : float
thickness of lines and edges.
edge_color : str | tuple
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3 or
4 elements.
face_color : str | tuple
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3 or
4 elements.
opacity : float
Opacity of the shape, must be between 0 and 1.
z_index : int
Specifier of z order priority. Shapes with higher z order are displayed
ontop of others.
dims_order : (D,) list
Order that the dimensions are to be rendered in.
"""
def __init__(
self,
data,
*,
edge_width=1,
edge_color='black',
face_color='white',
opacity=1,
z_index=0,
dims_order=None,
ndisplay=2,
):
super().__init__(
edge_width=edge_width,
edge_color=edge_color,
face_color=face_color,
opacity=opacity,
z_index=z_index,
dims_order=dims_order,
ndisplay=ndisplay,
)
self._filled = False
self.data = data
self.name = 'path'
@property
def data(self):
"""np.ndarray: NxD array of vertices.
"""
return self._data
@data.setter
def data(self, data):
data = np.array(data).astype(float)
if len(self.dims_order) != data.shape[1]:
self._dims_order = list(range(data.shape[1]))
if len(data) < 2:
raise ValueError(
f"""Data shape does not match a path. A
Path expects at least two vertices,
{len(data)} provided."""
)
self._data = data
self._update_displayed_data()
def _update_displayed_data(self):
"""Update the data that is to be displayed."""
# For path connect every all data
self._set_meshes(self.data_displayed, face=False, closed=False)
self._box = create_box(self.data_displayed)
data_not_displayed = self.data[:, self.dims_not_displayed]
self.slice_key = np.round(
[
np.min(data_not_displayed, axis=0),
np.max(data_not_displayed, axis=0),
]
).astype('int')
def to_xml(self):
"""Generates an xml element that defintes the shape according to the
svg specification.
Returns
----------
element : xml.etree.ElementTree.Element
xml element specifying the shape according to svg.
"""
data = self.data[:, self.dims_displayed]
points = ' '.join([f'{d[1]},{d[0]}' for d in data])
props = self.svg_props
props['fill'] = 'none'
element = Element('polyline', points=points, **props)
return element
| 29.324561
| 79
| 0.572839
|
4a0e2d3a05e9112ea060f131251d3cd37c408737
| 9,810
|
py
|
Python
|
app/recipe/tests/test_recipe_api.py
|
ido777/newish
|
298a3d5babf411ba1eb777101eb6e8f70b9e495f
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
ido777/newish
|
298a3d5babf411ba1eb777101eb6e8f70b9e495f
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
ido777/newish
|
298a3d5babf411ba1eb777101eb6e8f70b9e495f
|
[
"MIT"
] | null | null | null |
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
"""Test the authenticaiton is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@londonappdev.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'pass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Test recipe',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Tag 1')
tag2 = sample_tag(user=self.user, name='Tag 2')
payload = {
'title': 'Test recipe with two tags',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('user', 'testpass')
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags': f'{tag1.id},{tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Posh beans on toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')
res = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id},{ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 34.664311
| 78
| 0.646993
|
4a0e2d446e817694fb654a34f09a779fe9820647
| 245
|
py
|
Python
|
benchmarks/DNNF-CIFAR-EQ/properties/global_targeted_diff_8_5.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | 1
|
2022-03-01T08:59:32.000Z
|
2022-03-01T08:59:32.000Z
|
benchmarks/DNNF-CIFAR-EQ/properties/global_targeted_diff_8_5.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | null | null | null |
benchmarks/DNNF-CIFAR-EQ/properties/global_targeted_diff_8_5.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | null | null | null |
from dnnv.properties import *
import numpy as np
N1 = Network("N1")
N2 = Network("N2")
class_1 = 8
class_2 = 5
Forall(
x,
Implies(
(0 <= x <= 1),
(np.argmax(N1(x)) != class_1) | (np.argmax(N2(x)) != class_2),
),
)
| 14.411765
| 70
| 0.530612
|
4a0e2d74c9deebec4baee42f3c386844da67f6c1
| 3,414
|
py
|
Python
|
src/utils/__init__.py
|
Beirdo/save_game_archiver
|
c2cf5c6f076d6de63fff696577c3d2d2bdee3c06
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
Beirdo/save_game_archiver
|
c2cf5c6f076d6de63fff696577c3d2d2bdee3c06
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
Beirdo/save_game_archiver
|
c2cf5c6f076d6de63fff696577c3d2d2bdee3c06
|
[
"MIT"
] | null | null | null |
import concurrent
import hashlib
import json
import logging
import os
import re
import sys
import time
from concurrent.futures import ALL_COMPLETED
from concurrent.futures.thread import ThreadPoolExecutor
logger = logging.getLogger(__name__)
def numToReadable(value):
prefixes = ["", "k", "M", "G", "T", "P"]
index = 0
for (index, prefix) in enumerate(prefixes):
if value <= 700.0:
break
value /= 1024.0
return "%.2f%s" % (value, prefixes[index])
def generate_sha1sum(filename):
blocksize = 1 * 2**20 # 1MB
h = hashlib.sha1()
with open(filename, "rb") as f:
while True:
chunk = f.read(blocksize)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
def generate_exclude_dirs(item):
path_sep = os.path.sep.replace("\\", "\\\\")
exclude_dirs = {re.compile(r'^%s([%s].*)?$' % (dir_.replace("/", path_sep), path_sep))
for dir_ in item.get("exclude_dirs", [])}
return exclude_dirs
def generate_manifest_for_file(item):
filename = item.get("filename", None)
if filename:
item["size"] = os.path.getsize(filename)
item["sha1sum"] = generate_sha1sum(filename)
return item
def generate_manifest(source_base, source_dir, exclude_dirs, old_count=-1, threads=8):
source = os.path.join(source_base, source_dir)
source_split = source_base + os.path.sep
manifest = {}
subdir_split = source_split + source_dir + os.path.sep
start_time = time.time()
logger.info("Generating local manifest to support filtering")
for (root, dirs, files) in os.walk(source, topdown=True):
basedir_name = (root + os.path.sep).split(subdir_split)[1].rstrip(os.path.sep)
if old_count != 0:
found = list(filter(lambda x: x.search(basedir_name), exclude_dirs))
if found:
continue
for file_ in files:
filename = os.path.join(root, file_)
arcfile = os.path.join(basedir_name, file_)
manifest[filename] = {
"filename": filename,
"arcfile": arcfile,
}
file_count = len(manifest)
end_time = time.time()
duration = end_time - start_time
logger.info("Generating file list (%s files) took %.3fs" % (file_count, duration))
threads = min(threads, os.cpu_count() - 2)
start_time = time.time()
logger.info("Starting SHA1 fingerprinting with %s threads", threads)
with ThreadPoolExecutor(max_workers=threads, thread_name_prefix="Worker-") as executor:
futures = {executor.submit(generate_manifest_for_file, item) for item in manifest.values()}
concurrent.futures.wait(futures, return_when=ALL_COMPLETED)
end_time = time.time()
duration = end_time - start_time
logger.info("SHA1 fingerprinting %s files took %.3fs" % (file_count, duration))
return manifest
def load_manifest_file(filename):
logger.info("Reading manifest file: %s" % filename)
try:
with open(filename, "r") as f:
return json.load(f)
except Exception as e:
logger.error("Error loading manifest file: %s" % e)
return {}
def write_manifest_file(filename, manifest):
logger.info("Writing manifest file: %s" % filename)
with open(filename, "w") as f:
json.dump(manifest, f, indent=2, sort_keys=True)
| 31.036364
| 99
| 0.636497
|
4a0e2d7ce8ba71ea7f296a9cde809770d0d5563d
| 858
|
py
|
Python
|
ocean_lib/web3_internal/web3_overrides/http_provider.py
|
joshualyguessennd/ocean.py
|
23274698df4aae078d53b12d768c721af16f6e80
|
[
"Apache-2.0"
] | null | null | null |
ocean_lib/web3_internal/web3_overrides/http_provider.py
|
joshualyguessennd/ocean.py
|
23274698df4aae078d53b12d768c721af16f6e80
|
[
"Apache-2.0"
] | 1
|
2021-02-16T18:31:53.000Z
|
2021-02-16T18:31:53.000Z
|
ocean_lib/web3_internal/web3_overrides/http_provider.py
|
joshualyguessennd/ocean.py
|
23274698df4aae078d53b12d768c721af16f6e80
|
[
"Apache-2.0"
] | null | null | null |
from ocean_lib.web3_internal.web3_overrides.request import make_post_request
from web3 import HTTPProvider
class CustomHTTPProvider(HTTPProvider):
"""
Override requests to control the connection pool to make it blocking.
"""
def make_request(self, method, params):
self.logger.debug(
"Making request HTTP. URI: %s, Method: %s", self.endpoint_uri, method
)
request_data = self.encode_rpc_request(method, params)
raw_response = make_post_request(
self.endpoint_uri, request_data, **self.get_request_kwargs()
)
response = self.decode_rpc_response(raw_response)
self.logger.debug(
"Getting response HTTP. URI: %s, " "Method: %s, Response: %s",
self.endpoint_uri,
method,
response,
)
return response
| 33
| 81
| 0.642191
|
4a0e2dd35fa9be6739ab061c091917586655e146
| 421
|
py
|
Python
|
kits19cnn/io/__init__.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | 7
|
2019-12-19T01:10:09.000Z
|
2021-07-05T07:35:39.000Z
|
kits19cnn/io/__init__.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | 5
|
2019-12-19T23:03:12.000Z
|
2020-02-06T04:18:34.000Z
|
kits19cnn/io/__init__.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | 1
|
2021-03-20T06:28:37.000Z
|
2021-03-20T06:28:37.000Z
|
from .dataset import SliceDataset, PseudoSliceDataset
from .test_dataset import VoxelDataset, TestVoxelDataset
from .preprocess import Preprocessor
from .resample import resample_patient
from .custom_transforms import CenterCrop
from .custom_augmentations import resize_data_and_seg, crop_to_bbox, \
expand_bbox, get_bbox_from_mask, resize_bbox
from .slice_sampler import SliceIDSampler
| 46.777778
| 78
| 0.80285
|
4a0e2f2dcb15381a1049066dd6f6362c837d2102
| 750
|
py
|
Python
|
WideShot.py
|
tyomies123/asteroid-game
|
aa2b198fc6da88d3c988edec52cef372f893ae4d
|
[
"MIT"
] | 1
|
2019-12-17T18:42:20.000Z
|
2019-12-17T18:42:20.000Z
|
WideShot.py
|
tyomies123/asteroid-game
|
aa2b198fc6da88d3c988edec52cef372f893ae4d
|
[
"MIT"
] | null | null | null |
WideShot.py
|
tyomies123/asteroid-game
|
aa2b198fc6da88d3c988edec52cef372f893ae4d
|
[
"MIT"
] | null | null | null |
import pygame
import time
from random import *
from pygame.locals import *
from PowerUp import PowerUp
from PiercingProjectile import PiercingProjectile
#WideShot powerup pickup (increased projectile width)
class WideShot(PowerUp):
def __init__(self, width, height, speed, screen, info_x):
PowerUp.__init__(self, "assets/wide_shot.png", width, height, speed, screen, info_x)
def function(self, powerup_projectiles):
#Add 5 ammo
for i in range(5):
powerup_projectiles.append("WideProjectile")
#Remove additional ammo if over max
while len(powerup_projectiles) > 10:
powerup_projectiles.remove("WideProjectile")
return powerup_projectiles
| 31.25
| 92
| 0.690667
|
4a0e2f40295f85c3c93e614b14dd1a9644d49af8
| 3,712
|
py
|
Python
|
tests/operators/test_dbt_deps.py
|
tomasfarias/airflow-dbt-python
|
1cffdcafd8841b96ee622a49d9ae8f6716c55831
|
[
"MIT"
] | 37
|
2021-06-15T23:23:28.000Z
|
2022-03-22T08:16:49.000Z
|
tests/operators/test_dbt_deps.py
|
tomasfarias/airflow-dbt-python
|
1cffdcafd8841b96ee622a49d9ae8f6716c55831
|
[
"MIT"
] | 29
|
2021-06-01T21:03:39.000Z
|
2022-03-12T15:09:33.000Z
|
tests/operators/test_dbt_deps.py
|
tomasfarias/airflow-dbt-python
|
1cffdcafd8841b96ee622a49d9ae8f6716c55831
|
[
"MIT"
] | 5
|
2021-08-04T08:48:31.000Z
|
2022-02-07T19:14:56.000Z
|
"""Unit test module for DbtDepsOperator."""
from unittest.mock import patch
import pytest
from airflow_dbt_python.hooks.dbt import DepsTaskConfig
from airflow_dbt_python.operators.dbt import DbtDepsOperator
condition = False
try:
from airflow_dbt_python.hooks.s3 import DbtS3Hook
except ImportError:
condition = True
no_s3_hook = pytest.mark.skipif(
condition, reason="S3Hook not available, consider installing amazon extras"
)
def test_dbt_deps_mocked_all_args():
"""Test mocked dbt deps call with all arguments."""
op = DbtDepsOperator(
task_id="dbt_task",
project_dir="/path/to/project/",
profiles_dir="/path/to/profiles/",
profile="dbt-profile",
target="dbt-target",
vars={"target": "override"},
log_cache_events=True,
)
assert op.command == "deps"
config = op.get_dbt_config()
assert isinstance(config, DepsTaskConfig) is True
assert config.project_dir == "/path/to/project/"
assert config.profiles_dir == "/path/to/profiles/"
assert config.profile == "dbt-profile"
assert config.target == "dbt-target"
assert config.vars == '{"target": "override"}'
assert config.log_cache_events is True
def test_dbt_deps_downloads_dbt_utils(
profiles_file, dbt_project_file, dbt_packages_dir, packages_file
):
"""Test that a DbtDepsOperator downloads the dbt_utils module."""
import shutil
# Ensure modules directory is empty before starting
dbt_utils_dir = dbt_packages_dir / "dbt_utils"
shutil.rmtree(dbt_utils_dir, ignore_errors=True)
assert dbt_utils_dir.exists() is False
op = DbtDepsOperator(
task_id="dbt_task",
project_dir=dbt_project_file.parent,
profiles_dir=profiles_file.parent,
)
modules = dbt_packages_dir.glob("dbt_utils")
assert len([m for m in modules]) == 0
op.execute({})
modules = dbt_packages_dir.glob("dbt_utils")
assert len([m for m in modules]) == 1
@no_s3_hook
def test_dbt_deps_push_to_s3(
s3_bucket,
profiles_file,
dbt_project_file,
packages_file,
):
"""Test execution of DbtDepsOperator with a push to S3 at the end."""
hook = DbtS3Hook()
bucket = hook.get_bucket(s3_bucket)
with open(dbt_project_file) as pf:
project_content = pf.read()
bucket.put_object(Key="project/dbt_project.yml", Body=project_content.encode())
with open(profiles_file) as pf:
profiles_content = pf.read()
bucket.put_object(Key="project/profiles.yml", Body=profiles_content.encode())
with open(packages_file) as pf:
packages_content = pf.read()
bucket.put_object(Key="project/packages.yml", Body=packages_content.encode())
# Ensure we are working with an empty dbt_packages dir in S3.
keys = hook.list_keys(
s3_bucket,
f"s3://{s3_bucket}/project/dbt_packages/",
)
if keys is not None and len(keys) > 0:
hook.delete_objects(
s3_bucket,
keys,
)
keys = hook.list_keys(
s3_bucket,
f"s3://{s3_bucket}/project/dbt_packages/",
)
assert keys is None or len(keys) == 0
op = DbtDepsOperator(
task_id="dbt_task",
project_dir=f"s3://{s3_bucket}/project/",
profiles_dir=f"s3://{s3_bucket}/project/",
push_dbt_project=True,
)
results = op.execute({})
assert results is None
keys = hook.list_keys(
s3_bucket,
f"s3://{s3_bucket}/project/dbt_packages/",
)
assert len(keys) >= 0
# dbt_utils files may be anything, let's just check that at least
# "dbt_utils" exists as part of the key.
assert len([k for k in keys if "dbt_utils" in k]) >= 0
| 29.935484
| 83
| 0.66945
|
4a0e2f554ee3f484a199e2b460b806f615f4411e
| 5,984
|
py
|
Python
|
src/ZODB/tests/testcrossdatabasereferences.py
|
pretagov/ZODB
|
1fb097b41cae4ca863c8a3664414c9ec0e204393
|
[
"ZPL-2.1"
] | 514
|
2015-01-01T17:20:57.000Z
|
2022-03-29T18:27:04.000Z
|
src/ZODB/tests/testcrossdatabasereferences.py
|
pretagov/ZODB
|
1fb097b41cae4ca863c8a3664414c9ec0e204393
|
[
"ZPL-2.1"
] | 299
|
2015-01-11T15:47:11.000Z
|
2022-03-18T16:37:32.000Z
|
src/ZODB/tests/testcrossdatabasereferences.py
|
pretagov/ZODB
|
1fb097b41cae4ca863c8a3664414c9ec0e204393
|
[
"ZPL-2.1"
] | 87
|
2015-01-10T22:51:45.000Z
|
2021-12-28T09:18:16.000Z
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import doctest
import persistent
import unittest
import ZODB.tests.util
class MyClass(persistent.Persistent):
pass
class MyClass_w_getnewargs(persistent.Persistent):
def __getnewargs__(self):
return ()
def test_must_use_consistent_connections():
"""
It's important to use consistent connections. References to
separate connections to the same database or multi-database won't
work.
For example, it's tempting to open a second database using the
database open function, but this doesn't work:
>>> import ZODB.tests.util, transaction, persistent
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
>>> tm = transaction.TransactionManager()
>>> conn1 = db1.open(transaction_manager=tm)
>>> p1 = MyClass()
>>> conn1.root()['p'] = p1
>>> tm.commit()
>>> conn2 = db2.open(transaction_manager=tm)
>>> p2 = MyClass()
>>> conn2.root()['p'] = p2
>>> p2.p1 = p1
>>> tm.commit() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
InvalidObjectReference:
('Attempt to store a reference to an object from a separate connection to
the same database or multidatabase',
<Connection at ...>,
<ZODB.tests.testcrossdatabasereferences.MyClass object at ...>)
>>> tm.abort()
Even without multi-databases, a common mistake is to mix objects in
different connections to the same database.
>>> conn2 = db1.open(transaction_manager=tm)
>>> p2 = MyClass()
>>> conn2.root()['p'] = p2
>>> p2.p1 = p1
>>> tm.commit() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
InvalidObjectReference:
('Attempt to store a reference to an object from a separate connection
to the same database or multidatabase',
<Connection at ...>,
<ZODB.tests.testcrossdatabasereferences.MyClass object at ...>)
>>> tm.abort()
"""
def test_connection_management_doesnt_get_caching_wrong():
"""
If a connection participates in a multidatabase, then it's
connections must remain so that references between it's cached
objects remain sane.
>>> import ZODB.tests.util, transaction, persistent
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
>>> tm = transaction.TransactionManager()
>>> conn1 = db1.open(transaction_manager=tm)
>>> conn2 = conn1.get_connection('2')
>>> z = MyClass()
>>> conn2.root()['z'] = z
>>> tm.commit()
>>> x = MyClass()
>>> x.z = z
>>> conn1.root()['x'] = x
>>> y = MyClass()
>>> y.z = z
>>> conn1.root()['y'] = y
>>> tm.commit()
>>> conn1.root()['x'].z is conn1.root()['y'].z
True
So, we have 2 objects in conn1 that point to the same object in conn2.
Now, we'll deactivate one, close and repopen the connection, and see
if we get the same objects:
>>> x._p_deactivate()
>>> conn1.close()
>>> conn1 = db1.open(transaction_manager=tm)
>>> conn1.root()['x'].z is conn1.root()['y'].z
True
>>> db1.close()
>>> db2.close()
"""
def test_explicit_adding_with_savepoint():
"""
>>> import ZODB.tests.util, transaction, persistent
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
>>> tm = transaction.TransactionManager()
>>> conn1 = db1.open(transaction_manager=tm)
>>> conn2 = conn1.get_connection('2')
>>> z = MyClass()
>>> conn1.root()['z'] = z
>>> conn1.add(z)
>>> s = tm.savepoint()
>>> conn2.root()['z'] = z
>>> tm.commit()
>>> z._p_jar.db().database_name
'1'
>>> db1.close()
>>> db2.close()
"""
def test_explicit_adding_with_savepoint2():
"""
>>> import ZODB.tests.util, transaction, persistent
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
>>> tm = transaction.TransactionManager()
>>> conn1 = db1.open(transaction_manager=tm)
>>> conn2 = conn1.get_connection('2')
>>> z = MyClass()
>>> conn1.root()['z'] = z
>>> conn1.add(z)
>>> s = tm.savepoint()
>>> conn2.root()['z'] = z
>>> z.x = 1
>>> tm.commit()
>>> z._p_jar.db().database_name
'1'
>>> db1.close()
>>> db2.close()
"""
def tearDownDbs(test):
test.globs['db1'].close()
test.globs['db2'].close()
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
'../cross-database-references.rst',
globs=dict(MyClass=MyClass),
tearDown=tearDownDbs,
checker=ZODB.tests.util.checker,
),
doctest.DocFileSuite(
'../cross-database-references.rst',
globs=dict(MyClass=MyClass_w_getnewargs),
tearDown=tearDownDbs,
checker=ZODB.tests.util.checker,
),
doctest.DocTestSuite(checker=ZODB.tests.util.checker),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 29.477833
| 78
| 0.610461
|
4a0e31219b63aa4cd8848ed86130122fc646ee13
| 8,202
|
py
|
Python
|
deep_CCA_model.py
|
zhaoaite/CorrMNN
|
f88a70a199b462e9f3648da3ffdc5ee80a3e5f02
|
[
"MIT"
] | 1
|
2022-02-04T05:28:03.000Z
|
2022-02-04T05:28:03.000Z
|
deep_CCA_model.py
|
zhaoaite/CorrMNN
|
f88a70a199b462e9f3648da3ffdc5ee80a3e5f02
|
[
"MIT"
] | null | null | null |
deep_CCA_model.py
|
zhaoaite/CorrMNN
|
f88a70a199b462e9f3648da3ffdc5ee80a3e5f02
|
[
"MIT"
] | 1
|
2022-03-07T10:16:24.000Z
|
2022-03-07T10:16:24.000Z
|
import math
from keras.layers import Dense
from keras.layers import merge as Merge
from keras.models import Sequential
from keras.optimizers import RMSprop, SGD
from keras.regularizers import l2
from keras import backend as K
import tensorflow as tf
def my_init_sigmoid(shape, dtype=None):
rnd = K.random_uniform(
shape, 0., 1., dtype)
from keras.initializers import _compute_fans
fan_in, fan_out = _compute_fans(shape)
return 8. * (rnd - 0.5) * math.sqrt(6) / math.sqrt(fan_in + fan_out)
def my_init_others(shape, dtype=None):
rnd = K.random_uniform(
shape, 0., 1., dtype)
from keras.initializers import _compute_fans
fan_in, fan_out = _compute_fans(shape)
return 2. * (rnd - 0.5) / math.sqrt(fan_in)
class DeepCCA():
def __init__(self, layer_sizes1,
layer_sizes2, layer_sizes3, layer_sizes4,input_size1,
input_size2, outdim_size, reg_par, use_all_singular_values):
self.layer_sizes1 = layer_sizes1 # [1024, 1024, 1024, outdim_size]
self.layer_sizes2 = layer_sizes2 # [1024, 1024, 1024, outdim_size]
self.layer_sizes3 = layer_sizes3
self.layer_sizes4 = layer_sizes4
self.input_size1 = input_size1
self.input_size2 = input_size2
self.outdim_size = outdim_size
# MLP
self.input_view1 = tf.placeholder(tf.float32, [None, input_size1])
self.input_view2 = tf.placeholder(tf.float32, [None, input_size2])
# tf Graph input cnn
# self.input_view1 = tf.placeholder("float", [None, 256])
# self.input_view2 = tf.placeholder("float", [None, 256])
self.label1 = tf.placeholder("float", [None, 52])
self.label2 = tf.placeholder("float", [None, 52])
# # Define weights
# self.weights = {
# # 5x5 conv, 1 input, 32 outputs
# 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 32])),
# # 5x5 conv, 32 inputs, 64 outputs
# 'wc2': tf.Variable(tf.random_normal([3, 3, 32, 64])),
# # fully connected, 7*7*64 inputs, 1024 outputs
# 'wd1': tf.Variable(tf.random_normal([4*256, 1024])),
# # 1024 inputs, 10 outputs (class prediction)
# 'cnnout': tf.Variable(tf.random_normal([4*4*64, 52])),
# 'mlpout': tf.Variable(tf.random_normal([52, 52]))
# }
# self.biases = {
# 'bc1': tf.Variable(tf.random_normal([32])),
# 'bc2': tf.Variable(tf.random_normal([64])),
# 'bd1': tf.Variable(tf.random_normal([1024])),
# 'cnnout': tf.Variable(tf.random_normal([52]))
# }
# neural network mlp
self.output_view1= self.build_mlp_net(self.input_view1, layer_sizes1, reg_par)
self.output_view2= self.build_mlp_net(self.input_view2, layer_sizes2, reg_par)
# classification
self.output_view1_class = self.build_mlp_net(self.input_view1, layer_sizes3, reg_par)
self.output_view2_class = self.build_mlp_net(self.input_view2, layer_sizes4, reg_par)
# neural network CNN
# self.output_view1 = self.conv_net(self.input_view1,self.weights,self.biases)
# self.output_view2 = self.conv_net(self.input_view2,self.weights,self.biases)
self.neg_corr,self.value = self.neg_correlation(self.output_view1, self.output_view2, use_all_singular_values)
# CNN
# Create some wrappers for simplicity
def conv2d(self,x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(self,x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# # Create model
# def conv_net(self,x, weights, biases, dropout=0.5):
# # Reshape input picture
# x = tf.reshape(x, shape=[-1, 16, 16, 1])
# # Convolution Layer
# conv1 = self.conv2d(x, weights['wc1'], biases['bc1'])
# # Max Pooling (down-sampling)
# conv1 = self.maxpool2d(conv1, k=2)
# # Convolution Layer
# conv2 = self.conv2d(conv1, weights['wc2'], biases['bc2'])
# # Max Pooling (down-sampling)
# conv2 = self.maxpool2d(conv2, k=2)
# # Fully connected layer
# # Reshape conv2 output to fit fully connected layer input
# fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
# fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
# fc1 = tf.nn.relu(fc1)
# # Apply Dropout
# fc1 = tf.nn.dropout(fc1, dropout)
## conv2 = tf.reshape(conv2, [-1, 1024])
# out = tf.add(tf.matmul(fc1, weights['cnnout']), biases['cnnout'])
# return out
# mlp
def build_mlp_net(self, input, layer_sizes, reg_par):
output = input
for l_id, ls in enumerate(layer_sizes):
if l_id == len(layer_sizes) - 1:
activation = None
kernel_initializer = my_init_others
else:
activation = tf.nn.sigmoid
kernel_initializer = my_init_sigmoid
output = Dense(ls, activation=activation,
kernel_initializer=kernel_initializer,
kernel_regularizer=l2(reg_par))(output)
return output
# loss computition
def neg_correlation(self, output1, output2, use_all_singular_values):
r1 = 1e-4
r2 = 1e-4
eps = 1e-12
# unpack (separate) the output of networks for view 1 and view 2
H1 = tf.transpose(output1)
H2 = tf.transpose(output2)
m = tf.shape(H1)[1]
H1bar = H1 - (1.0 / tf.cast(m, tf.float32)) * tf.matmul(H1, tf.ones([m, m]))
H2bar = H2 - (1.0 / tf.cast(m, tf.float32)) * tf.matmul(H2, tf.ones([m, m]))
SigmaHat12 = (1.0 / (tf.cast(m, tf.float32) - 1)) * tf.matmul(H1bar, tf.transpose(H2bar))
SigmaHat11 = (1.0 / (tf.cast(m, tf.float32) - 1)) * tf.matmul(H1bar, tf.transpose(H1bar)) + r1 * tf.eye(self.outdim_size)
SigmaHat22 = (1.0 / (tf.cast(m, tf.float32) - 1)) * tf.matmul(H2bar, tf.transpose(H2bar)) + r2 * tf.eye(self.outdim_size)
# Calculating the root inverse of covariance matrices by using eigen decomposition
[D1, V1] = tf.linalg.eigh(SigmaHat11)
[D2, V2] = tf.linalg.eigh(SigmaHat22)
# Added to increase stability
posInd1 = tf.where(tf.greater(D1, eps))
posInd1 = tf.reshape(posInd1, [-1, tf.shape(posInd1)[0]])[0]
D1 = tf.gather(D1, posInd1)
V1 = tf.gather(V1, posInd1)
posInd2 = tf.where(tf.greater(D2, eps))
posInd2 = tf.reshape(posInd2, [-1, tf.shape(posInd2)[0]])[0]
D2 = tf.gather(D2, posInd2)
V2 = tf.gather(V2, posInd2)
SigmaHat11RootInv = tf.matmul(tf.matmul(V1, tf.linalg.diag(D1 ** -0.5)), tf.transpose(V1))
SigmaHat22RootInv = tf.matmul(tf.matmul(V2, tf.linalg.diag(D2 ** -0.5)), tf.transpose(V2))
print(SigmaHat22RootInv.shape)
Tval = tf.matmul(tf.matmul(SigmaHat11RootInv, SigmaHat12), SigmaHat22RootInv)
value=None
if use_all_singular_values:
# all singular values are used to calculate the correlation
# corr = tf.sqrt(tf.linalg.trace(tf.matmul(tf.transpose(Tval), Tval))) ### The usage of "sqrt" here is wrong!!!
Tval.set_shape([self.outdim_size, self.outdim_size])
s = tf.linalg.svd(Tval, compute_uv=False)
value=s
corr = tf.reduce_sum(s)
else:
# just the top outdim_size singular values are used
[U, V] = tf.linalg.eigh(tf.matmul(tf.transpose(Tval), Tval))
non_critical_indexes = tf.where(tf.greater(U, eps))
non_critical_indexes = tf.reshape(non_critical_indexes, [-1, tf.shape(non_critical_indexes)[0]])[0]
U = tf.gather(U, non_critical_indexes)
U = tf.gather(U, tf.nn.top_k(U[:, ]).indices)
value=tf.sqrt(U[0:self.outdim_size])
corr = tf.reduce_sum(tf.sqrt(U[0:self.outdim_size]))
return -corr,value
| 44.096774
| 129
| 0.610461
|
4a0e31fa1edd83585dda4b864e1e620a12ed3b5e
| 488
|
py
|
Python
|
hit_me_please/hitters/migrations/0001_initial.py
|
zkan/hit-me-please
|
378bc8c666bc68fd1f84cb31e9208a794e3917c4
|
[
"MIT"
] | null | null | null |
hit_me_please/hitters/migrations/0001_initial.py
|
zkan/hit-me-please
|
378bc8c666bc68fd1f84cb31e9208a794e3917c4
|
[
"MIT"
] | 12
|
2019-12-04T23:05:18.000Z
|
2022-02-10T08:05:37.000Z
|
hit_me_please/hitters/migrations/0001_initial.py
|
jayakornk/hit-me-please-old
|
b6358a8b6bddfe8d29ad796eb61884371308ad6f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-06-01 04:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hitter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=300)),
],
),
]
| 22.181818
| 114
| 0.57582
|
4a0e32887c386978aaa626a5551674f26db7ef5c
| 4,316
|
py
|
Python
|
contrib/devtools/logprint-scanner.py
|
IngenuityCoin/Ingenuity
|
475289926e435a9939358c695f4f10d1503bfa0c
|
[
"MIT"
] | 4
|
2018-12-06T23:56:18.000Z
|
2021-03-06T10:15:33.000Z
|
contrib/devtools/logprint-scanner.py
|
IngenuityCoin/Ingenuity
|
475289926e435a9939358c695f4f10d1503bfa0c
|
[
"MIT"
] | null | null | null |
contrib/devtools/logprint-scanner.py
|
IngenuityCoin/Ingenuity
|
475289926e435a9939358c695f4f10d1503bfa0c
|
[
"MIT"
] | 5
|
2018-12-06T23:56:41.000Z
|
2021-05-10T06:35:51.000Z
|
#!/usr/bin/env python
# Copyright (c) 2017-2018 The Ingenuity developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os, sys
from subprocess import check_output
def countRelevantCommas(line):
openParensPosStack = []
openParensPos = 0
charCounter = 0
numRelevantCommas = 0
firstOpenParensIndex = line.find("(")
for char in line:
if char == '(':
openParensPosStack.append(charCounter)
if char == ')':
openParensPosStack.pop()
if char == "," and openParensPosStack[-1] == firstOpenParensIndex:
numRelevantCommas += 1
charCounter += 1
return numRelevantCommas
if __name__ == "__main__":
out = check_output(["git", "rev-parse", "--show-toplevel"])
srcDir = out.rstrip() + "/src/"
filelist = [os.path.join(dp, f) for dp, dn, filenames in os.walk(srcDir) for f in filenames if os.path.splitext(f)[1] == '.cpp' or os.path.splitext(f)[1] == '.h' ]
incorrectInstanceCounter = 0
for file in filelist:
f = open(file,"r")
data = f.read()
rows = data.split("\n")
count = 0
full_data = []
lineCounter = 1
tempLine = ""
tempCount = 0
for row in rows:
# Collapse multiple lines into one
tempLine += row
# Line contains LogPrint or LogPrintf
if tempLine.find("LogPrint") != -1:
if tempLine.count("(") == tempLine.count(")"):
havePercents = tempLine.count('%') > 0
if havePercents:
# This line of code has a format specifier that requires checking number of associated arguments
# Determine the number of arguments provided, see if that matches the number of format specifiers
# Count the number of commas after the format specifier string. Check to see if it matches the number of format specifiers.
# Assumes quotes are not escaped in the specifier string and there are no percent signs when specifying the debug level.
# First, determine the position of the comma after the format specifier section, named commaAfterEndSpecifierStringIndex
firstSpecifierIndex = tempLine.find('%')
startSpecifierStringIndex = tempLine.rfind('"',firstSpecifierIndex)
endSpecifierStringIndex = tempLine.find('"',firstSpecifierIndex)
commaAfterEndSpecifierStringIndex = tempLine.find(',',endSpecifierStringIndex)
# Count the number of commas after the specifier string
line = "(" + tempLine[commaAfterEndSpecifierStringIndex:-1]
numCommas = countRelevantCommas(line)
# Determine number of extra percents after specifier string
numExtraPercents = tempLine.count('%', commaAfterEndSpecifierStringIndex)
# Subtract extra from total count. This is the number of expected specifiers
# ignore %%
numPercents = tempLine.count('%') - numExtraPercents - 2*tempLine.count('%%')
if numPercents != numCommas:
print "Incorrect number of arguments for LogPrint(f) statement found."
print(str(file) + ":" + str(lineCounter - tempCount))
print "Line = " + tempLine
print("numRelevantCommas = " + str(numCommas) + ", numRelevantPercents = " + str(numPercents))
print ""
incorrectInstanceCounter += 1
# Done with this multiline, clear tempLine
tempLine = ""
tempCount = 0
else:
tempCount += 1
else:
# No LogPrint, clear tempLine
tempLine = ""
tempCount = 0
lineCounter += 1
print("# of incorrect instances: " + str(incorrectInstanceCounter))
sys.exit(incorrectInstanceCounter)
| 41.902913
| 167
| 0.565107
|
4a0e33bff8f3545c0bb8820161c2592756f5b7df
| 2,496
|
py
|
Python
|
test/unit/util/test_inflection.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 1,085
|
2015-02-18T16:14:38.000Z
|
2022-03-30T23:52:07.000Z
|
test/unit/util/test_inflection.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 11,253
|
2015-02-18T17:47:32.000Z
|
2022-03-31T21:47:03.000Z
|
test/unit/util/test_inflection.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 1,000
|
2015-02-18T16:18:10.000Z
|
2022-03-29T08:22:56.000Z
|
import pytest
from galaxy.util.inflection import Inflector
SINGULAR_TO_PLURAL = {
"search": "searches",
"switch": "switches",
"fix": "fixes",
"box": "boxes",
"process": "processes",
"address": "addresses",
"case": "cases",
"stack": "stacks",
"wish": "wishes",
"category": "categories",
"query": "queries",
"ability": "abilities",
"agency": "agencies",
"movie": "movies",
"archive": "archives",
"index": "indices",
"wife": "wives",
"half": "halves",
"move": "moves",
"salesperson": "salespeople",
"person": "people",
"spokesman": "spokesmen",
"man": "men",
"woman": "women",
"basis": "bases",
"diagnosis": "diagnoses",
"datum": "data",
"medium": "media",
"analysis": "analyses",
"node_child": "node_children",
"child": "children",
"experience": "experiences",
"day": "days",
"comment": "comments",
"foobar": "foobars",
"newsletter": "newsletters",
"old_news": "old_news",
"news": "news",
"series": "series",
"species": "species",
"subspecies": "subspecies",
"quiz": "quizzes",
"perspective": "perspectives",
"ox": "oxen",
"photo": "photos",
"buffalo": "buffaloes",
"tomato": "tomatoes",
"information": "information",
"misinformation": "misinformation",
"equipment": "equipment",
"bus": "buses",
"status": "statuses",
"mouse": "mice",
"louse": "lice",
"house": "houses",
"octopus": "octopi",
"virus": "viruses",
"alias": "aliases",
"portfolio": "portfolios",
"vertex": "vertices",
"matrix": "matrices",
"axis": "axes",
"testis": "testes",
"crisis": "crises",
"rice": "rice",
"shoe": "shoes",
"horse": "horses",
"prize": "prizes",
"edge": "edges"
}
@pytest.fixture
def inflector():
return Inflector()
@pytest.mark.parametrize('test_data', SINGULAR_TO_PLURAL.items())
def test_pluralize_rules(test_data, inflector):
assert test_data[1] == inflector.pluralize(test_data[0])
@pytest.mark.parametrize('test_data', SINGULAR_TO_PLURAL.items())
def test_singularize_rules(test_data, inflector):
assert test_data[0] == inflector.singularize(test_data[1])
def test_cond_plural(inflector):
assert 'edge' == inflector.cond_plural(1, 'edge')
assert 'edges' == inflector.cond_plural(-1, 'edge')
assert 'edges' == inflector.cond_plural(0, 'edge')
assert 'edges' == inflector.cond_plural(2, 'edge')
| 25.212121
| 65
| 0.589343
|
4a0e353ba0f6dfe791445a4de0d1a82463590d23
| 4,863
|
py
|
Python
|
configs/hand/hrnetv2/onehand10k/hrnetv2_w18_onehand10k_256x256.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
configs/hand/hrnetv2/onehand10k/hrnetv2_w18_onehand10k_256x256.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
configs/hand/hrnetv2/onehand10k/hrnetv2_w18_onehand10k_256x256.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(
interval=10, metric=['PCK', 'AUC', 'EPE'], key_indicator='AUC')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=21,
dataset_joints=21,
dataset_channel=[
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20
],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20
])
# model settings
model = dict(
type='TopDown',
pretrained='open-mmlab://msra/hrnetv2_w18',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144),
multiscale_output=True),
upsample=dict(mode='bilinear', align_corners=False))),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=[18, 36, 72, 144],
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(
final_conv_kernel=1, num_conv_layers=1, num_conv_kernels=(1, )),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'])
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=90, scale_factor=0.3),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=['image_file', 'center', 'scale', 'rotation', 'flip_pairs']),
]
test_pipeline = val_pipeline
data_root = 'data/onehand10k'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='OneHand10KDataset',
ann_file=f'{data_root}/annotations/onehand10k_train.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='OneHand10KDataset',
ann_file=f'{data_root}/annotations/onehand10k_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='OneHand10KDataset',
ann_file=f'{data_root}/annotations/onehand10k_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 29.834356
| 80
| 0.551306
|
4a0e35a8944737979ef66074b0426471fbca6ed7
| 1,723
|
py
|
Python
|
tests/thread_safety_tests.py
|
jstasiak/coalringbuf
|
c0720fe95121dab18ed084c528c836c7193682bf
|
[
"MIT"
] | 4
|
2017-08-08T17:15:07.000Z
|
2021-12-18T01:55:27.000Z
|
tests/thread_safety_tests.py
|
jstasiak/coalringbuf
|
c0720fe95121dab18ed084c528c836c7193682bf
|
[
"MIT"
] | null | null | null |
tests/thread_safety_tests.py
|
jstasiak/coalringbuf
|
c0720fe95121dab18ed084c528c836c7193682bf
|
[
"MIT"
] | 3
|
2019-12-23T04:37:24.000Z
|
2020-06-27T12:03:38.000Z
|
import os
from threading import Thread
from nose.tools import eq_
from six.moves import xrange
from coalringbuf import CoalescingRingBuffer
POISON_PILL = -1
FIRST_BID = 3
SECOND_BID = 4
FIRST_ASK = 5
SECOND_ASK = 6
NUMBER_OF_INSTRUMENTS = 1 * 10 ** 6 if 'COALRINGBUF_FULL_TEST' in os.environ else 5 * 10 ** 4
def test_should_see_last_prices():
buffer = CoalescingRingBuffer(NUMBER_OF_INSTRUMENTS // 5)
consumer_snapshots = [None] * NUMBER_OF_INSTRUMENTS
producer_thread = Thread(target=producer, args=(buffer,))
consumer_thread = Thread(target=consumer, args=(buffer, consumer_snapshots))
producer_thread.start()
consumer_thread.start()
consumer_thread.join()
for snapshot in consumer_snapshots:
key, bid, ask = snapshot
eq_((bid, ask), (SECOND_BID, SECOND_ASK), 'Bid/ask for instrument %r' % (key,))
def producer(buffer):
def put(key, bid, ask):
success = buffer.offer(key, (key, bid, ask))
if not success:
raise Exception('Adding key %r failed' % (key,))
for key in xrange(NUMBER_OF_INSTRUMENTS):
put(key, FIRST_BID, FIRST_ASK)
put(key, SECOND_BID, SECOND_ASK)
put(POISON_PILL, POISON_PILL, POISON_PILL)
def consumer(buffer, consumer_snapshots):
use_limited_read = [False]
def fill(bucket):
buffer.poll(bucket, 1 if use_limited_read[0] else None)
use_limited_read[0] = not use_limited_read[0]
while True:
bucket = []
fill(bucket)
for element in bucket:
if element == (POISON_PILL, POISON_PILL, POISON_PILL):
return
else:
key = element[0]
consumer_snapshots[key] = element
| 25.716418
| 93
| 0.662217
|
4a0e363041e095845fd66a0704cca6392a88b93c
| 21,224
|
py
|
Python
|
image_recognition_tools.py
|
karlobermeyer/numeric-digit-recognition
|
38bd332f1ab2bb8ad6b548a6957522a74e564031
|
[
"MIT"
] | null | null | null |
image_recognition_tools.py
|
karlobermeyer/numeric-digit-recognition
|
38bd332f1ab2bb8ad6b548a6957522a74e564031
|
[
"MIT"
] | null | null | null |
image_recognition_tools.py
|
karlobermeyer/numeric-digit-recognition
|
38bd332f1ab2bb8ad6b548a6957522a74e564031
|
[
"MIT"
] | 1
|
2021-04-25T08:38:32.000Z
|
2021-04-25T08:38:32.000Z
|
'''Tools for image recognition and classification projects.
Author: Karl J. Obermeyer
'''
# Scientific Computing and Visualization
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import cv2 # Image processing.
# ----- Image Processing -----
def rgb2gray(image, add_degenerate_dimension=False):
'''Convert RGB image to grayscale.
Args:
image: numpy array, RGB image.
add_degenerate_dimension: bool, whether to make grayscale output image
have and extra degenerate dimension of size 1.
Returns:
numpy array, grayscale image.
'''
if add_degenerate_dimension:
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)[:, :, None]
else:
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def histogram_equalize_brightness(image):
'''Improve contrast of RGB or grayscale image via histogram equalization of
brightness. Preserves degenerate dimension of grayscale image, if any.
Args:
image: numpy array, RGB or grayscale image.
Returns:
image_out: numpy array, RGB image with improved contrast.
'''
if len(image.shape) == 3:
if image.shape[2] == 3:
image_yuv = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
image_yuv[:, :, 0] = cv2.equalizeHist(image_yuv[:, :, 0])
image_out = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2RGB)
else: # image.shape[2] == 1 # gray with degenerate dimension
image_out = cv2.equalizeHist(image[:, :, 0])[:, :, None]
else: # grayscale
image_out = cv2.equalizeHist(image)
return image_out
def test_histogram_equalize_brightness(image):
'''Test histogram brightness equalization on an RGB or grayscale image.
Args:
image: numpy array, RGB or grayscale image. Grayscale images are
allowed to have degenerate dimension.
Returns:
None, just plots images.
'''
if len(image.shape) == 2 or image.shape[2] == 1: # grayscale
fig, axes = plt.subplots(
1, 2, figsize=(6, 5),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.7, wspace=0.8)
axf = axes.flat # Iterator over axes.
ax = next(axf); ax.imshow(image.squeeze(), cmap='gray')
ax = next(axf); ax.imshow(
histogram_equalize_brightness(image.squeeze()), cmap='gray')
else: # RGB
image_gray = rgb2gray(image)
fig, axes = plt.subplots(
2, 2, figsize=(6, 5),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.7, wspace=0.8)
axf = axes.flat # Iterator over axes.
ax = next(axf); ax.imshow(image)
ax = next(axf); ax.imshow(histogram_equalize_brightness(image))
ax = next(axf); ax.imshow(image_gray, cmap='gray')
ax = next(axf); ax.imshow(
histogram_equalize_brightness(image_gray), cmap='gray')
def scale_brightness(image, scale_factor=1.0):
'''Scale brightness of an image, with clamp from above.
Args:
image: numpy array, RGB or grayscale image. Grayscale images are
allowed to have a degenerate dimension, and that dimension is
preserved when present in input.
scale_factor: float.
Returns:
numpy array, image with brightness scaled.
'''
if len(image.shape) == 3 and image.shape[2] == 3: # RGB
image_out = cv2.cvtColor(
image, cv2.COLOR_RGB2HSV).astype(dtype=np.float32)
image_out[:, :, 2] = scale_factor*image_out[:, :, 2]
image_out[:, :, 2][image_out[:, :, 2] > 255] = 255 # Upper clamp.
image_out = image_out.astype(dtype=np.uint8)
image_out = cv2.cvtColor(image_out, cv2.COLOR_HSV2RGB)
else: # grayscale
image_out = scale_factor*image.astype(dtype=np.float32)
np.clip(image_out, a_min=0.0, a_max=255.0, out=image_out)
image_out = image_out.astype(dtype=np.uint8)
return image_out
def normalize(image):
'''Normalize image, RGB or grayscale. Puts each pixel into the
range [-1.0, 1.0].
Args:
image: numpy array, image with pixels values in the range [0, 255].
RGB, grayscale, and grayscale with a degenerate dimension are all
allowed. Degenerate dimension is preserved when present in input.
Returns:
numpy array like input image, but normalized, so
dtype is float32 instead of a uint8.
'''
return ((image - 128.0) / 128.0).astype(np.float32)
def randomly_perturb(
image,
brightness_radius=0.3,
rotation_radius=30.0,
translation_radius=3,
shear_radius=3):
'''Perturb image in brightness, rotation, translation, and shear. The
*_radius inputs, w.r.t. the infinity norm, are the radii of intervals on
which perturbation parameters are uniform randomly sampled.
Inspired in part by
https://github.com/vxy10/ImageAugmentation
OpenCV functions
http://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html
Args:
image: numpy array, RGB or grayscale image. Grayscale images are
allowed to have a degenerate dimension, and that dimension is
preserved when present in input.
brightness_radius: float, unitless.
rotation_radius: float, degrees.
translation_radius: float, pixels.
shear_radius: float, pixels.
Returns:
image_out: numpy array, perturbed version of the image.
'''
row_cnt = image.shape[0]
column_cnt = image.shape[1]
# Brightness.
brightness_factor = np.random.uniform(
low=1.0 - brightness_radius,
high=1.0 + brightness_radius)
image_out = scale_brightness(image, scale_factor=brightness_factor)
# Rotation.
rotation_angle = np.random.uniform(
low=-0.5*rotation_radius, high=0.5*rotation_radius)
M_rotation = cv2.getRotationMatrix2D(
(column_cnt/2, row_cnt/2), rotation_angle, 1.0)
# Translation.
dx = np.random.uniform(
low=-0.5*translation_radius, high=0.5*translation_radius)
dy = np.random.uniform(
low=-0.5*translation_radius, high=0.5*translation_radius)
M_translation = np.float32([[1.0, 0.0, dx], [0.0, 1.0, dy]])
# Shear.
points0 = np.float32([[5.0, 5.0], [20.0, 5.0], [5.0, 20.0]])
point0 = 5.0 + shear_radius*np.random.uniform(low=-0.5, high=0.5)
point1 = 20 + shear_radius*np.random.uniform(low=-0.5, high=0.5)
points1 = np.float32([[point0, 5.0], [point1, point0], [5.0, point1]])
M_shear = cv2.getAffineTransform(points0, points1)
if len(image_out.shape) == 3 and image_out.shape[2] == 1:
# Grayscale with degenerate dimension.
image_out = cv2.warpAffine(
image_out[:, :, 0], M_rotation, (column_cnt, row_cnt))
image_out = cv2.warpAffine(
image_out, M_translation, (column_cnt, row_cnt))
image_out = cv2.warpAffine(
image_out, M_shear, (column_cnt, row_cnt))[:, :, None]
else: # RGB and grayscale without degenerate dimension.
image_out = cv2.warpAffine(
image_out, M_rotation, (column_cnt, row_cnt))
image_out = cv2.warpAffine(
image_out, M_translation, (column_cnt, row_cnt))
image_out = cv2.warpAffine(
image_out, M_shear, (column_cnt, row_cnt))
return image_out
def test_randomly_perturb(
image,
brightness_radius=0.3,
rotation_radius=30.0,
translation_radius=3,
shear_radius=3):
'''Test random perturbation on an RGB image.
Args:
image: numpy array, RGB or grayscale image. Grayscale images are
allowed to have degenerate dimension.
brightness_radius: float.
rotation_radius: float.
translation_radius: float.
shear_radius: float.
Returns:
None, just plots images.
'''
row_cnt = 6
column_cnt = 4
width = 17
height = 15
gs = gridspec.GridSpec(row_cnt, column_cnt)
gs.update(wspace=0.01, hspace=0.02) # set the spacing between axes.
plt.figure(figsize=(width, height))
for i in range(row_cnt*column_cnt):
perturbation = randomly_perturb(
image,
brightness_radius,
rotation_radius,
translation_radius,
shear_radius)
ax1 = plt.subplot(gs[i])
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.set_aspect('equal')
plt.subplot(row_cnt, column_cnt, i+1)
if len(image.shape) == 3 and image.shape[2] == 3: # RGB
plt.imshow(perturbation)
else: # grayscale
plt.imshow(perturbation.squeeze(), cmap='gray')
plt.axis('off')
# ----- Dataset Manipulation -----
def shuffle(X, y):
'''Generate and return randomly shuffled versions of X and y.
Similar to sklearn.utils.shuffle, but uses numpy.random seed
for consistency.
Args:
X: numpy array, e.g., as X_train, X_validate, or X_test.
y: numpy array, e.g., as y_train, y_validate, or y_test.
Returns:œ
X_shuffled: numpy array of images, e.g., as X_train,
X_validate, or X_test.
y_shuffled: numpy array of int labels, e.g., as y_train,
y_validate, or y_test.
'''
ixs_shuffled = np.arange(len(y))
np.random.shuffle(ixs_shuffled)
return X[ixs_shuffled, :], y[ixs_shuffled]
def combine(*args):
'''Combine image classification datasets.
Args:
args: a sequence of data pairs, e.g., X0, y0, X1, y1, X2, y2,
where each X is a numpy array of images and each y is a
numpy array of class labels.
Returns:
X_combined: numpy array of images.
y_combined: numpy array of class labels.
'''
assert len(args) % 2 == 0, 'Must be even number of arguments!'
input_pair_cnt = int(len(args)/2)
image_shape = args[0][0, :].shape
X_dtype = args[0].dtype # Usu. np.uint8 or np.float32.
y_dtype = args[1].dtype # Usu. np.uint8 or str.
# Count total number of datapoints.
datapoint_cnt = 0
for i in range(input_pair_cnt):
datapoint_cnt += len(args[2*i + 1])
# Initialize output.
X_combined = np.zeros((datapoint_cnt, *image_shape), dtype=X_dtype)
y_combined = np.zeros(datapoint_cnt, dtype=y_dtype)
# Populate output.
ix_next = 0
for i in range(input_pair_cnt):
ix_next_next = ix_next + len(args[2*i + 1])
X_combined[ix_next:ix_next_next, :] = args[2*i]
y_combined[ix_next:ix_next_next] = args[2*i + 1]
ix_next = ix_next_next
return X_combined, y_combined
def count_by_label(y):
'''Count the number of occurrences of each label.
Args:
y: numpy array of int labels, e.g., as y_train, y_validate,
or y_test.
Returns:
datapoint_cnt_by_label: dict, where keys are class labels and
values are the number of respective label occurrences as
unsigned ints.
'''
datapoint_cnt_by_label = {}
for y_ in y:
if y_ in datapoint_cnt_by_label:
datapoint_cnt_by_label[y_] += 1
else:
datapoint_cnt_by_label[y_] = 1
return datapoint_cnt_by_label
def sort_by_class(X, y, class_list, datapoint_cnt_by_label=None):
'''Sort images by class.
Args:
X: numpy array of images, e.g., as X_train, X_validate, or
X_test.
y: numpy array of int labels, e.g., as y_train, y_validate,
or y_test.
class_list: list of class labels.
datapoint_cnt_by_label: None or dict, where keys are class
labels and values are the number of respective label
occurrences as unsigned ints. None => computed
internally, else precompute using
count_by_label(...).
Returns:
images_by_label: dict, where keys are class labels and
values are numpy arrays of images.
'''
image_shape = X[0].shape
images_by_label = {} # All images by label.
if datapoint_cnt_by_label is None:
datapoint_cnt_by_label = count_by_label(y)
# Initialize numpy array for each label.
for label, cnt in datapoint_cnt_by_label.items():
images_by_label[label] = \
np.zeros((cnt, *image_shape), dtype=X.dtype)
# Finish populating output.
next_image_ixs = { label: 0 for label in class_list }
for i, label in enumerate(y):
images_by_label[label][next_image_ixs[label], :] = X[i, :]
next_image_ixs[label] += 1
return images_by_label
def balance(
X, y, class_list, datapoint_cnt_per_class=None, perturb=False):
'''Create a new dataset with datapoints either pruned or replicated
such that every class has exactly datapoint_cnt_per_class datapoints.
Args:
X: numpy array of images, e.g., as X_train, X_validate, or
X_test. Assumed shuffled.
y: numpy array of int labels, e.g., as y_train, y_validate,
or y_test. Assumed shuffled.
class_list: list of class labels.
datapoint_cnt_per_class: None or unsigned, optional number
of data points to use per label. Default is the number of
datapoints in the most represented class.
perturb: bool, whether to perturb (modulate brightness, translate,
rotate, shear) replicated images.
Returns:
X_balanced: numpy array of images, where each class has
exactly datapoint_cnt_per_class datapoints.
y_balanced: numpy array of unsigned labels, where each
class has exactly datapoint_cnt_per_class datapoints.
'''
class_cnt = len(class_list)
image_shape = X[0, :].shape
datapoint_cnt_by_label = count_by_label(y)
if datapoint_cnt_per_class is None:
datapoint_cnt_per_class = max(datapoint_cnt_by_label.values())
images_by_label = sort_by_class(
X, y, class_list, datapoint_cnt_by_label=datapoint_cnt_by_label)
# Initialize output.
datapoint_cnt = datapoint_cnt_per_class*class_cnt
X_balanced = np.zeros((datapoint_cnt, *image_shape), dtype=np.uint8)
y_balanced = np.zeros(datapoint_cnt, dtype=np.uint8)
# Populate output.
running_datapoint_cnts = { label: 0 for label in class_list }
for i in range(datapoint_cnt):
label = class_list[i % class_cnt]
image = images_by_label[label][i % datapoint_cnt_by_label[label], :]
if not perturb or \
running_datapoint_cnts[label] < datapoint_cnt_by_label[label]:
X_balanced[i, :] = image
else:
X_balanced[i, :] = randomly_perturb(image)
y_balanced[i] = label
running_datapoint_cnts[label] += 1
return shuffle(X_balanced, y_balanced)
# ----- Dataset Visualization -----
def truncate_string(s, ubnd, add_ellipses=True):
'''Return a version of the string that is clamped at the end to
length `ubnd`.
Args:
s: string, string to clamp.
ubnd: unsigned int, max length of output string.
add_ellipses: bool, whether to replace the last 3 chars of truncated
strings with an ellipsis.
Returns:
string, clamped version of `s`.
'''
s_length = len(s)
if s_length <= ubnd:
return s
else:
return s[:ubnd-3] + '...'
def class_histograms(
y_train, y_validate, y_test,
class_cnt, suptitle_prefix=''):
'''Plot histograms of class representation in each dataset.
Args:
y_train: numpy array, training labels.
y_validate: numpy array, validation labels.
y_test: numpy array, testing labels.
class_cnt: unsigned, precomputed number of classes.
suptitle_prefix: string, to describe datasets modified, e.g.,
by over- or undersampling.
Returns:
None
'''
bin_cnt = class_cnt
#fig = plt.figure()
fig = plt.figure(
figsize=(13, 10), dpi=80, facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace=0.4, wspace=0.7, top=0.9)
fig.suptitle(r'$\ \ \ $ ' + suptitle_prefix +
' Class Counts', fontsize=16, fontweight='bold')
# Bar chart showing distribution of classes in training set.
ax = fig.add_subplot(3, 1, 1) # [rows|columns|plot]
plt.hist(y_train, bins=bin_cnt, normed=False)
#plt.xlabel('', fontsize=14, fontweight='bold')
#plt.ylabel('Number of Classes', fontsize=14, fontweight='bold')
plt.title('Training Set', fontsize=14, fontweight='bold')
ax.grid(True)
# Bar chart showing distribution of classes in validation set.
ax = fig.add_subplot(3, 1, 2) # [rows|columns|plot]
plt.hist(y_validate, bins=bin_cnt, normed=False)
#plt.xlabel('', fontsize=14, fontweight='bold')
plt.ylabel('Number of Datapoints in Class', fontsize=14, fontweight='bold')
plt.title('Validation Set', fontsize=14, fontweight='bold')
ax.grid(True)
# Bar chart showing distribution of classes in test set.
ax = fig.add_subplot(3, 1, 3) # [rows|columns|plot]
plt.hist(y_test, bins=bin_cnt, normed=False)
plt.xlabel('Class Label', fontsize=14, fontweight='bold')
#plt.ylabel('Number of Classes', fontsize=14, fontweight='bold')
plt.title('Test Set', fontsize=14, fontweight='bold')
ax.grid(True)
def plot_images(X, y=None, english_labels=None):
'''Plot all images in a dataset.
Args:
X: numpy array of images, e.g., as X_train, X_validate, or
X_test.
y: None or indexable of labels, e.g., as y_train,
y_validate, or y_test.
english_labels: None or list of strings, the English names
for labels in y.
Returns:
None
'''
# ::CAUTION:: RGB images should have type np.uint8. Images cast
# as a float type will not display correctly with matplotlib.
image_cnt = X.shape[0]
column_cnt = 3
row_cnt = int(np.ceil(image_cnt/column_cnt))
height = 2.5*row_cnt
fig, axes = plt.subplots(
row_cnt, column_cnt, figsize=(15, height),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.7, wspace=0.8)
axf = axes.flat # Iterator over axes.
for i in range(X.shape[0]):
ax = next(axf)
if X.shape[3] == 3:
ax.imshow(X[i, :].squeeze())
else:
ax.imshow(X[i, :].squeeze(), cmap='gray')
if y is not None:
label = y[i]
if english_labels is not None:
full_label = str(label) + ': ' + english_labels[label]
else:
full_label = str(label)
ax.set_title(truncate_string(full_label, ubnd=25))
for i in range(row_cnt*column_cnt - image_cnt):
ax = next(axf)
ax.set_visible(False) # Hide blank subplot.
def plot_representative_images(
images_by_label, class_cnt, english_labels=None,
method='first', image_cnt=None):
'''Plot one summary image for each class in a dataset.
Args:
images_by_label: dict of numpy arrays, e.g., as output by
sort_by_class(...).
class_cnt: unsigned, number of image classes.
english_labels: None or list of strings, the English names
for labels in y.
method: string, use as representative
'first' => first encountered example,
'mean' => pixelwise mean of first image_cnt images encoutnered,
'median' => pixelwise median of first image_cnt images encoutnered.
image_cnt: None or uint, number of images to use for mean or median, if
applicable. None will use all when 'mean' or 'median are used.
Returns:
None
'''
# ::CAUTION:: RGB images should have type np.uint8. Images cast
# as a float type will not display correctly with matplotlib.
image_shape = next(iter(images_by_label.values()))[0].shape
if len(image_shape) < 3 or image_shape[2] == 1:
cmap = 'gray'
else:
cmap = 'jet'
image_cnt = class_cnt
column_cnt = 3
row_cnt = int(np.ceil(image_cnt/column_cnt))
height = 2.5*row_cnt
fig, axes = plt.subplots(
row_cnt, column_cnt, figsize=(15, height),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.7, wspace=0.8)
axf = axes.flat # Iterator over axes.
#for label, images in images_by_label.items():
for label in sorted(images_by_label):
images = images_by_label[label]
if image_cnt is None:
image_cnt = len(images)
if method == 'mean':
summary_image = np.mean(
images[0:image_cnt], axis=0).astype(images.dtype)
elif method == 'median':
summary_image = np.median(
images[0:image_cnt], axis=0).astype(images.dtype)
else: # 'first'
summary_image = images[0, :]
ax = next(axf)
ax.imshow(summary_image.squeeze(), cmap=cmap)
if english_labels:
full_label = str(label) + ': ' + english_labels[label]
else:
full_label = str(label)
ax.set_title(truncate_string(full_label, ubnd=25))
for i in range(row_cnt*column_cnt - image_cnt):
ax = next(axf)
ax.set_visible(False) # Hide blank subplot.
| 34.232258
| 81
| 0.636779
|
4a0e363892bfa8ca28bc9b7fd42b909b4573ac26
| 2,819
|
py
|
Python
|
detectors/medics/cnn2d/models/mmaction/mmaction/models/recognizers/TSN3D.py
|
zhampel/FakeFinder
|
2891a8649acc1dabdef07554d6acb346dd23dbae
|
[
"Apache-2.0"
] | 48
|
2020-06-12T15:46:14.000Z
|
2022-03-19T16:02:05.000Z
|
detectors/medics/cnn2d/models/mmaction/mmaction/models/recognizers/TSN3D.py
|
zhampel/FakeFinder
|
2891a8649acc1dabdef07554d6acb346dd23dbae
|
[
"Apache-2.0"
] | 37
|
2021-03-11T18:44:08.000Z
|
2022-03-30T02:47:53.000Z
|
detectors/medics/cnn2d/models/mmaction/mmaction/models/recognizers/TSN3D.py
|
zhampel/FakeFinder
|
2891a8649acc1dabdef07554d6acb346dd23dbae
|
[
"Apache-2.0"
] | 17
|
2020-06-13T12:25:42.000Z
|
2021-11-22T14:49:26.000Z
|
from .base import BaseRecognizer
from .. import builder
from ..registry import RECOGNIZERS
import torch
@RECOGNIZERS.register_module
class TSN3D(BaseRecognizer):
def __init__(self,
backbone,
flownet=None,
spatial_temporal_module=None,
segmental_consensus=None,
cls_head=None,
train_cfg=None,
test_cfg=None,
fcn_testing=False):
super(TSN3D, self).__init__()
self.fcn_testing = fcn_testing
self.backbone = builder.build_backbone(backbone)
if flownet is not None:
self.flownet = builder.build_flownet(flownet)
if spatial_temporal_module is not None:
self.spatial_temporal_module = builder.build_spatial_temporal_module(
spatial_temporal_module)
else:
raise NotImplementedError
if segmental_consensus is not None:
self.segmental_consensus = builder.build_segmental_consensus(
segmental_consensus)
else:
raise NotImplementedError
if cls_head is not None:
self.cls_head = builder.build_head(cls_head)
else:
raise NotImplementedError
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights()
@property
def with_flownet(self):
return hasattr(self, 'flownet') and self.flownet is not None
@property
def with_spatial_temporal_module(self):
return hasattr(self, 'spatial_temporal_module') and self.spatial_temporal_module is not None
@property
def with_segmental_consensus(self):
return hasattr(self, 'segmental_consensus') and self.segmental_consensus is not None
@property
def with_cls_head(self):
return hasattr(self, 'cls_head') and self.cls_head is not None
def init_weights(self):
super(TSN3D, self).init_weights()
self.backbone.init_weights()
if self.with_flownet:
self.flownet.init_weights()
if self.with_spatial_temporal_module:
self.spatial_temporal_module.init_weights()
if self.with_segmental_consensus:
self.segmental_consensus.init_weights()
if self.with_cls_head:
self.cls_head.init_weights()
def extract_feat(self, x):
return self.backbone(x)
def forward(self, x):
x = self.extract_feat(x)
if self.with_spatial_temporal_module:
x = self.spatial_temporal_module(x)
if self.with_segmental_consensus:
x = x.reshape((-1, 1) + x.shape[1:])
x = self.segmental_consensus(x)
x = x.squeeze(1)
if self.with_cls_head:
x = self.cls_head(x)
return x
| 29.061856
| 100
| 0.628237
|
4a0e3822f997fa8f57845b561e32c5cd9164e7d6
| 11,668
|
py
|
Python
|
database_delivery_sdk/api/dbtask/list_dbversions_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
database_delivery_sdk/api/dbtask/list_dbversions_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
database_delivery_sdk/api/dbtask/list_dbversions_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_dbversions.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_dbversions.proto',
package='dbtask',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x15list_dbversions.proto\x12\x06\x64\x62task\"K\n\x14ListDBVersionRequest\x12\x13\n\x0b\x64\x62ServiceId\x18\x01 \x01(\t\x12\x0c\n\x04page\x18\x02 \x01(\x05\x12\x10\n\x08pageSize\x18\x03 \x01(\x05\"\xf0\x01\n\x15ListDBVersionResponse\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x30\n\x04list\x18\x04 \x03(\x0b\x32\".dbtask.ListDBVersionResponse.List\x1au\n\x04List\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x62serviceId\x18\x02 \x01(\t\x12\x14\n\x0c\x64\x62instanceId\x18\x03 \x01(\t\x12\x10\n\x08\x64\x62taskId\x18\x04 \x01(\t\x12\x15\n\rchangesetName\x18\x05 \x01(\t\x12\r\n\x05\x63time\x18\x06 \x01(\x03\"}\n\x1cListDBVersionResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12+\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1d.dbtask.ListDBVersionResponseb\x06proto3')
)
_LISTDBVERSIONREQUEST = _descriptor.Descriptor(
name='ListDBVersionRequest',
full_name='dbtask.ListDBVersionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dbServiceId', full_name='dbtask.ListDBVersionRequest.dbServiceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page', full_name='dbtask.ListDBVersionRequest.page', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pageSize', full_name='dbtask.ListDBVersionRequest.pageSize', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=108,
)
_LISTDBVERSIONRESPONSE_LIST = _descriptor.Descriptor(
name='List',
full_name='dbtask.ListDBVersionResponse.List',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='dbtask.ListDBVersionResponse.List.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dbserviceId', full_name='dbtask.ListDBVersionResponse.List.dbserviceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dbinstanceId', full_name='dbtask.ListDBVersionResponse.List.dbinstanceId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dbtaskId', full_name='dbtask.ListDBVersionResponse.List.dbtaskId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='changesetName', full_name='dbtask.ListDBVersionResponse.List.changesetName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='dbtask.ListDBVersionResponse.List.ctime', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=351,
)
_LISTDBVERSIONRESPONSE = _descriptor.Descriptor(
name='ListDBVersionResponse',
full_name='dbtask.ListDBVersionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='dbtask.ListDBVersionResponse.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='dbtask.ListDBVersionResponse.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='dbtask.ListDBVersionResponse.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='dbtask.ListDBVersionResponse.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTDBVERSIONRESPONSE_LIST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=111,
serialized_end=351,
)
_LISTDBVERSIONRESPONSEWRAPPER = _descriptor.Descriptor(
name='ListDBVersionResponseWrapper',
full_name='dbtask.ListDBVersionResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='dbtask.ListDBVersionResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='dbtask.ListDBVersionResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='dbtask.ListDBVersionResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='dbtask.ListDBVersionResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=353,
serialized_end=478,
)
_LISTDBVERSIONRESPONSE_LIST.containing_type = _LISTDBVERSIONRESPONSE
_LISTDBVERSIONRESPONSE.fields_by_name['list'].message_type = _LISTDBVERSIONRESPONSE_LIST
_LISTDBVERSIONRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTDBVERSIONRESPONSE
DESCRIPTOR.message_types_by_name['ListDBVersionRequest'] = _LISTDBVERSIONREQUEST
DESCRIPTOR.message_types_by_name['ListDBVersionResponse'] = _LISTDBVERSIONRESPONSE
DESCRIPTOR.message_types_by_name['ListDBVersionResponseWrapper'] = _LISTDBVERSIONRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListDBVersionRequest = _reflection.GeneratedProtocolMessageType('ListDBVersionRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTDBVERSIONREQUEST,
'__module__' : 'list_dbversions_pb2'
# @@protoc_insertion_point(class_scope:dbtask.ListDBVersionRequest)
})
_sym_db.RegisterMessage(ListDBVersionRequest)
ListDBVersionResponse = _reflection.GeneratedProtocolMessageType('ListDBVersionResponse', (_message.Message,), {
'List' : _reflection.GeneratedProtocolMessageType('List', (_message.Message,), {
'DESCRIPTOR' : _LISTDBVERSIONRESPONSE_LIST,
'__module__' : 'list_dbversions_pb2'
# @@protoc_insertion_point(class_scope:dbtask.ListDBVersionResponse.List)
})
,
'DESCRIPTOR' : _LISTDBVERSIONRESPONSE,
'__module__' : 'list_dbversions_pb2'
# @@protoc_insertion_point(class_scope:dbtask.ListDBVersionResponse)
})
_sym_db.RegisterMessage(ListDBVersionResponse)
_sym_db.RegisterMessage(ListDBVersionResponse.List)
ListDBVersionResponseWrapper = _reflection.GeneratedProtocolMessageType('ListDBVersionResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTDBVERSIONRESPONSEWRAPPER,
'__module__' : 'list_dbversions_pb2'
# @@protoc_insertion_point(class_scope:dbtask.ListDBVersionResponseWrapper)
})
_sym_db.RegisterMessage(ListDBVersionResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 41.523132
| 950
| 0.7494
|
4a0e394d893ec46ca93d8f8a3789bc83faffad2c
| 6,616
|
py
|
Python
|
src/datadog_api_client/v1/model/widget_layout.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 32
|
2021-01-07T15:09:56.000Z
|
2022-01-30T05:49:23.000Z
|
src/datadog_api_client/v1/model/widget_layout.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 228
|
2020-09-03T14:03:54.000Z
|
2022-03-31T20:16:12.000Z
|
src/datadog_api_client/v1/model/widget_layout.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 12
|
2020-09-15T21:36:03.000Z
|
2022-03-31T17:13:17.000Z
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
class WidgetLayout(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {
("height",): {
"inclusive_minimum": 0,
},
("width",): {
"inclusive_minimum": 0,
},
("x",): {
"inclusive_minimum": 0,
},
("y",): {
"inclusive_minimum": 0,
},
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"height": (int,), # noqa: E501
"width": (int,), # noqa: E501
"x": (int,), # noqa: E501
"y": (int,), # noqa: E501
"is_column_break": (bool,), # noqa: E501
}
discriminator = None
attribute_map = {
"height": "height", # noqa: E501
"width": "width", # noqa: E501
"x": "x", # noqa: E501
"y": "y", # noqa: E501
"is_column_break": "is_column_break", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@convert_js_args_to_python_args
def __init__(self, height, width, x, y, *args, **kwargs): # noqa: E501
"""WidgetLayout - a model defined in OpenAPI
Args:
height (int): The height of the widget. Should be a non-negative integer.
width (int): The width of the widget. Should be a non-negative integer.
x (int): The position of the widget on the x (horizontal) axis. Should be a non-negative integer.
y (int): The position of the widget on the y (vertical) axis. Should be a non-negative integer.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
is_column_break (bool): Whether the widget should be the first one on the second column in high density or not. **Note**: Only for the **new dashboard layout** and only one widget in the dashboard should have this property set to `true`.. [optional] # noqa: E501
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.height = height
self.width = width
self.x = x
self.y = y
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, height, width, x, y, *args, **kwargs): # noqa: E501
"""Helper creating a new instance from a response."""
self = super(WidgetLayout, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.height = height
self.width = width
self.x = x
self.y = y
return self
| 40.341463
| 275
| 0.573156
|
4a0e39efca1226fb469bb1208fe9c2fef878b452
| 33,386
|
py
|
Python
|
test/functional/test_framework/test_node.py
|
c1pzo/c1pzo
|
51d395eacfb92bdf43564b01b508fa84b0636a7d
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
c1pzo/c1pzo
|
51d395eacfb92bdf43564b01b508fa84b0636a7d
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
c1pzo/c1pzo
|
51d395eacfb92bdf43564b01b508fa84b0636a7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The C1pzo Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for c1pzod node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .descriptors import descsum_create
from .messages import MY_SUBVERSION
from .util import (
MAX_NODES,
append_config,
delete_cookie_file,
get_auth_cookie,
get_rpc_proxy,
rpc_url,
wait_until_helper,
p2p_port,
EncodeDecimal,
)
C1PZOD_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a c1pzod node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, c1pzod, c1pzo_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.p2p_conn_index = 1
self.datadir = datadir
self.c1pzoconf = os.path.join(self.datadir, "c1pzo.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = c1pzod
self.coverage_dir = coverage_dir
self.cwd = cwd
self.descriptors = descriptors
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.version = version
# Configuration for logging is set as command-line args rather than in the c1pzo.conf file.
# This means that starting a c1pzod using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
if use_valgrind:
default_suppressions_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "contrib", "valgrind.supp")
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
default_suppressions_file)
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
if self.version_is_at_least(190000):
self.args.append("-logthreadnames")
self.cli = TestNodeCLI(c1pzo_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
self.timeout_factor = timeout_factor
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any c1pzod processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time c1pzod is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by c1pzod, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("c1pzod started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the c1pzod process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'c1pzod exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(
rpc_url(self.datadir, self.index, self.chain, self.rpchost),
self.index,
timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT
coveragedir=self.coverage_dir,
)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
if self.version_is_at_least(190000):
# getmempoolinfo.loaded is available since commit
# bb8ae2c (version 0.19.0)
wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor)
# Wait for the node to finish reindex, block import, and
# loading the mempool. Usually importing happens fast or
# even "immediate" when the node is started. However, there
# is no guarantee and sometimes ThreadImport might finish
# later. This is going to cause intermittent test failures,
# because generally the tests assume the node is fully
# ready after being started.
#
# For example, the node will reject block messages from p2p
# when it is still importing with the error "Unexpected
# block message received"
#
# The wait is done here to make tests as robust as possible
# and prevent racy tests and intermittent failures as much
# as possible. Some tests might not need this, but the
# overhead is trivial, and the added guarantees are worth
# the minimal performance cost.
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ConnectionResetError:
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
# succeeds. Try again to properly raise the FailedToStartError
pass
except OSError as e:
if e.errno == errno.ETIMEDOUT:
pass # Treat identical to ConnectionResetError
elif e.errno == errno.ECONNREFUSED:
pass # Port not yet open?
else:
raise # unknown OS error
except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; c1pzod is still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to c1pzod after {}s".format(self.rpc_timeout))
def wait_for_cookie_credentials(self):
"""Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up."""
self.log.debug("Waiting for cookie credentials")
# Poll at a rate of four times per second.
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
try:
get_auth_cookie(self.datadir, self.chain)
self.log.debug("Cookie credentials successfully retrieved")
return
except ValueError: # cookie file not found and no rpcuser or rpcpassword; c1pzod is still starting
pass # so we continue polling until RPC credentials are retrieved
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors)
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors)
def version_is_at_least(self, ver):
return self.version is None or self.version >= ver
def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
# Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
if self.version_is_at_least(180000):
self.stop(wait=wait)
else:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
if wait_until_stopped:
self.wait_until_stopped()
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=C1PZOD_PROC_WAIT_TIMEOUT):
wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
if unexpected_msgs is None:
unexpected_msgs = []
time_end = time.time() + timeout * self.timeout_factor
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
yield
while True:
found = True
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for unexpected_msg in unexpected_msgs:
if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):
self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
if found:
return
if time.time() >= time_end:
break
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into c1pzod")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to c1pzod
expected_msg: regex that stderr should match when c1pzod fails
Will throw if c1pzod starts without an error.
Will throw if an expected_msg is provided and it does not match c1pzod's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
ret = self.process.wait(timeout=self.rpc_timeout)
self.log.debug(self._node_msg(f'c1pzod exited with status {ret} during initialization'))
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
except subprocess.TimeoutExpired:
self.process.kill()
self.running = False
self.process = None
assert_msg = f'c1pzod should have exited within {self.rpc_timeout}s '
if expected_msg is None:
assert_msg += "with an error"
else:
assert_msg += "with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add an inbound p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
self.p2ps.append(p2p_conn)
p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
if wait_for_verack:
# Wait for the node to send us the version and verack
p2p_conn.wait_for_verack()
# At this point we have sent our version message and received the version and verack, however the full node
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
# established (fSuccessfullyConnected).
#
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
# transaction that will be added to the mempool as soon as we return here.
#
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
# in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
p2p_conn.sync_with_ping()
return p2p_conn
def add_outbound_p2p_connection(self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs):
"""Add an outbound p2p connection from node. Either
full-relay("outbound-full-relay") or
block-relay-only("block-relay-only") connection.
This method adds the p2p connection to the self.p2ps list and returns
the connection to the caller.
"""
def addconnection_callback(address, port):
self.log.debug("Connecting to %s:%d %s" % (address, port, connection_type))
self.addconnection('%s:%d' % (address, port), connection_type)
p2p_conn.peer_accept_connection(connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, **kwargs)()
p2p_conn.wait_for_connect()
self.p2ps.append(p2p_conn)
p2p_conn.wait_for_verack()
p2p_conn.sync_with_ping()
return p2p_conn
def num_test_p2p_connections(self):
"""Return number of test framework p2p connections to the node."""
return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION.decode("utf-8")])
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor)
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif arg is None:
return 'null'
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg, default=EncodeDecimal)
else:
return str(arg)
class TestNodeCLI():
"""Interface to c1pzo-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.c1pzocli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with c1pzo-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run c1pzo-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same c1pzo-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running c1pzo-cli {}".format(p_args[2:]))
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except (json.JSONDecodeError, decimal.InvalidOperation):
return cli_stdout.rstrip("\n")
class RPCOverloadWrapper():
def __init__(self, rpc, cli=False, descriptors=False):
self.rpc = rpc
self.is_cli = cli
self.descriptors = descriptors
def __getattr__(self, name):
return getattr(self.rpc, name)
def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None):
if descriptors is None:
descriptors = self.descriptors
return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup)
def importprivkey(self, privkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importprivkey')(privkey, label, rescan)
desc = descsum_create('combo(' + privkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def addmultisigaddress(self, nrequired, keys, label=None, address_type=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type)
cms = self.createmultisig(nrequired, keys, address_type)
req = [{
'desc': cms['descriptor'],
'timestamp': 0,
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
return cms
def importpubkey(self, pubkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importpubkey')(pubkey, label, rescan)
desc = descsum_create('combo(' + pubkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def importaddress(self, address, label=None, rescan=None, p2sh=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importaddress')(address, label, rescan, p2sh)
is_hex = False
try:
int(address ,16)
is_hex = True
desc = descsum_create('raw(' + address + ')')
except:
desc = descsum_create('addr(' + address + ')')
reqs = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
if is_hex and p2sh:
reqs.append({
'desc': descsum_create('p2sh(raw(' + address + '))'),
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
})
import_res = self.importdescriptors(reqs)
for res in import_res:
if not res['success']:
raise JSONRPCException(res['error'])
| 44.69344
| 238
| 0.623675
|
4a0e3a0d10bb1ccb886298463bbe12de1e306adb
| 882
|
py
|
Python
|
comfy/moe.py
|
bmintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2018-11-12T10:33:13.000Z
|
2019-02-24T05:01:40.000Z
|
comfy/moe.py
|
LyricLy/python-snippets
|
9d868b7bbccd793ea1dc513f51290963584a1dee
|
[
"CC0-1.0"
] | null | null | null |
comfy/moe.py
|
LyricLy/python-snippets
|
9d868b7bbccd793ea1dc513f51290963584a1dee
|
[
"CC0-1.0"
] | 2
|
2018-11-24T08:16:59.000Z
|
2019-02-24T04:41:30.000Z
|
import aiofiles
import aiohttp
import requests
session = requests.Session()
async def get_session():
"""avoids "ClientSession created outside of coroutine" :^)"""
global aio_session
aio_session = aiohttp.ClientSession()
async def upload_file(selected_file):
await get_session()
async with aiofiles.open(selected_file, mode='r') as f:
payload = {"files[]": await f.read()}
async with aio_session.post(url="https://comfy.moe/upload.php", data=payload) as r:
json = await r.json()
return json
def upload_file_sync(selected_file):
try:
with open(selected_file, 'r') as f:
response = session.post(
url="https://comfy.moe/upload.php",
files={"files[]": f.read()}
response = response.json()
print(response)
return [file['url'] for file in response['files']]
except requests.exceptions.ConnectionError:
print("Upload to https://comfy.moe/ failed")
| 25.941176
| 84
| 0.71542
|
4a0e3b3e1d006637178c64ade7787dbedf35be5a
| 8,239
|
py
|
Python
|
src/tests/test_issue7.py
|
davidshinn/pymdptoolbox
|
7c96789cc80e280437005c12065cf70266c11636
|
[
"BSD-3-Clause"
] | 472
|
2015-01-09T06:30:05.000Z
|
2022-03-29T21:05:36.000Z
|
src/tests/test_issue7.py
|
davidshinn/pymdptoolbox
|
7c96789cc80e280437005c12065cf70266c11636
|
[
"BSD-3-Clause"
] | 30
|
2015-01-01T06:40:14.000Z
|
2021-03-25T11:36:14.000Z
|
src/tests/test_issue7.py
|
davidshinn/pymdptoolbox
|
7c96789cc80e280437005c12065cf70266c11636
|
[
"BSD-3-Clause"
] | 210
|
2015-03-02T07:13:41.000Z
|
2022-03-31T23:42:25.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.sparse as sp
import mdptoolbox
class BaseTestIssue7(object):
discount = 0.9
P = [None] * 2
P[0] = np.array([
[ 0. , 0. , 0. , 0.64, 0. , 0. , 0.36, 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.93, 0. , 0. , 0.07, 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.2 , 0. , 0. , 0.8 ],
[ 0. , 0. , 0. , 1. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 1. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 1. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 1. ]
])
P[1] = np.array([
[ 0. , 0. , 0.4 , 0. , 0.6 , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 1. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 1. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.87, 0.13, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 1. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 1. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.11, 0.89],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 1. ]
])
R = [None] * 2
R[0] = np.zeros((9, 9))
R[1] = np.array([
[ 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.]
])
computed_R = (np.array((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)),
np.array((0.6, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)))
policy = (1, 1, 0, 0, 0, 0, 0, 0, 0)
def dense_P_dense_R(self, algorithm):
sdp = algorithm(self.P, self.R, self.discount)
if algorithm != mdptoolbox.mdp.QLearning:
assert (sdp.R[0] == self.computed_R[0]).all()
assert (sdp.R[1] == self.computed_R[1]).all()
assert not sp.issparse(sdp.P[0])
assert not sp.issparse(sdp.P[1])
assert not sp.issparse(sdp.R[0])
assert not sp.issparse(sdp.R[1])
sdp.run()
if algorithm != mdptoolbox.mdp.QLearning:
assert sdp.policy == self.policy, sdp.policy
def sparse_P_dense_R(self, algorithm):
P = list(map(sp.csr_matrix, self.P))
sdp = algorithm(P, self.R, self.discount)
if algorithm != mdptoolbox.mdp.QLearning:
assert (sdp.R[0] == self.computed_R[0]).all()
assert (sdp.R[1] == self.computed_R[1]).all()
assert sp.issparse(sdp.P[0])
assert sp.issparse(sdp.P[1])
assert not sp.issparse(sdp.R[0])
assert not sp.issparse(sdp.R[1])
sdp.run()
if algorithm != mdptoolbox.mdp.QLearning:
assert sdp.policy == self.policy, sdp.policy
def dense_P_sparse_R(self, algorithm):
R = list(map(sp.csr_matrix, self.R))
sdp = algorithm(self.P, R, self.discount)
if algorithm != mdptoolbox.mdp.QLearning:
assert (sdp.R[0] == self.computed_R[0]).all()
assert (sdp.R[1] == self.computed_R[1]).all()
assert not sp.issparse(sdp.P[0])
assert not sp.issparse(sdp.P[1])
#assert sp.issparse(sdp.R[0])
#assert sp.issparse(sdp.R[1])
sdp.run()
if algorithm != mdptoolbox.mdp.QLearning:
assert sdp.policy == self.policy, sdp.policy
def sparse_P_sparse_R(self, algorithm):
P = list(map(sp.csr_matrix, self.P))
R = list(map(sp.csr_matrix, self.R))
sdp = algorithm(P, R, self.discount)
if algorithm != mdptoolbox.mdp.QLearning:
assert (sdp.R[0] == self.computed_R[0]).all()
assert (sdp.R[1] == self.computed_R[1]).all()
assert sp.issparse(sdp.P[0])
assert sp.issparse(sdp.P[1])
#assert sp.issparse(sdp.R[0])
#assert sp.issparse(sdp.R[1])
sdp.run()
if algorithm != mdptoolbox.mdp.QLearning:
assert sdp.policy == self.policy, sdp.policy
# Needs some work before can use, need to pass horizon
#class TestFiniteHorizon(BaseTestIssue7):
#
# def test_dense_P_dense_R(self):
# self.dense_P_dense_R(mdptoolbox.mdp.FiniteHorizon)
#
# def test_sparse_P_dense_R(self):
# self.sparse_P_dense_R(mdptoolbox.mdp.FiniteHorizon)
#
# def test_dense_P_sparse_R(self):
# self.dense_P_sparse_R(mdptoolbox.mdp.FiniteHorizon)
#
# def test_sparse_P_sparse_R(self):
# self.sparse_P_sparse_R(mdptoolbox.mdp.FiniteHorizon)
#class TestLP(BaseTestIssue7):
#
# def test_dense_P_dense_R(self):
# self.dense_P_dense_R(mdptoolbox.mdp.LP)
#
# def test_sparse_P_dense_R(self):
# self.sparse_P_dense_R(mdptoolbox.mdp.LP)
#
# def test_dense_P_sparse_R(self):
# self.dense_P_sparse_R(mdptoolbox.mdp.LP)
#
# def test_sparse_P_sparse_R(self):
# self.sparse_P_sparse_R(mdptoolbox.mdp.LP)
class TestPolicyIteration(BaseTestIssue7):
def test_dense_P_dense_R(self):
self.dense_P_dense_R(mdptoolbox.mdp.PolicyIteration)
def test_sparse_P_dense_R(self):
self.sparse_P_dense_R(mdptoolbox.mdp.PolicyIteration)
def test_dense_P_sparse_R(self):
self.dense_P_sparse_R(mdptoolbox.mdp.PolicyIteration)
def test_sparse_P_sparse_R(self):
self.sparse_P_sparse_R(mdptoolbox.mdp.PolicyIteration)
class TestPolicyIterationModified(BaseTestIssue7):
def test_dense_P_dense_R(self):
self.dense_P_dense_R(mdptoolbox.mdp.PolicyIterationModified)
def test_sparse_P_dense_R(self):
self.sparse_P_dense_R(mdptoolbox.mdp.PolicyIterationModified)
def test_dense_P_sparse_R(self):
self.dense_P_sparse_R(mdptoolbox.mdp.PolicyIterationModified)
def test_sparse_P_sparse_R(self):
self.sparse_P_sparse_R(mdptoolbox.mdp.PolicyIterationModified)
class TestQLearning(BaseTestIssue7):
def test_dense_P_dense_R(self):
self.dense_P_dense_R(mdptoolbox.mdp.QLearning)
def test_sparse_P_dense_R(self):
self.sparse_P_dense_R(mdptoolbox.mdp.QLearning)
def test_dense_P_sparse_R(self):
self.dense_P_sparse_R(mdptoolbox.mdp.QLearning)
def test_sparse_P_sparse_R(self):
self.sparse_P_sparse_R(mdptoolbox.mdp.QLearning)
class TestValueIteration(BaseTestIssue7):
def test_dense_P_dense_R(self):
self.dense_P_dense_R(mdptoolbox.mdp.ValueIteration)
def test_sparse_P_dense_R(self):
self.sparse_P_dense_R(mdptoolbox.mdp.ValueIteration)
def test_dense_P_sparse_R(self):
self.dense_P_sparse_R(mdptoolbox.mdp.ValueIteration)
def test_sparse_P_sparse_R(self):
self.sparse_P_sparse_R(mdptoolbox.mdp.ValueIteration)
class TestRelativeValueIteration(BaseTestIssue7):
def test_dense_P_dense_R(self):
self.dense_P_dense_R(mdptoolbox.mdp.RelativeValueIteration)
def test_sparse_P_dense_R(self):
self.sparse_P_dense_R(mdptoolbox.mdp.RelativeValueIteration)
def test_dense_P_sparse_R(self):
self.dense_P_sparse_R(mdptoolbox.mdp.RelativeValueIteration)
def test_sparse_P_sparse_R(self):
self.sparse_P_sparse_R(mdptoolbox.mdp.RelativeValueIteration)
class TestValueIterationGS(BaseTestIssue7):
def test_dense_P_dense_R(self):
self.dense_P_dense_R(mdptoolbox.mdp.ValueIterationGS)
def test_sparse_P_dense_R(self):
self.sparse_P_dense_R(mdptoolbox.mdp.ValueIterationGS)
def test_dense_P_sparse_R(self):
self.dense_P_sparse_R(mdptoolbox.mdp.ValueIterationGS)
def test_sparse_P_sparse_R(self):
self.sparse_P_sparse_R(mdptoolbox.mdp.ValueIterationGS)
| 37.280543
| 74
| 0.557956
|
4a0e3b40a39b117936db038f95cf7d1f15d99cbe
| 1,327
|
py
|
Python
|
bubulle-py/html_report/htmlparser_utils.py
|
Maxim-Costa/Bubulle-Norminette
|
2801d2138ec58f7bb3626dc7c89f75adfdfaa0be
|
[
"MIT"
] | 52
|
2020-10-17T11:07:41.000Z
|
2022-03-22T21:02:37.000Z
|
bubulle-py/html_report/htmlparser_utils.py
|
Maxim-Costa/Bubulle-Norminette
|
2801d2138ec58f7bb3626dc7c89f75adfdfaa0be
|
[
"MIT"
] | 11
|
2020-12-05T22:46:10.000Z
|
2022-03-22T21:17:21.000Z
|
bubulle-py/html_report/htmlparser_utils.py
|
Maxim-Costa/Bubulle-Norminette
|
2801d2138ec58f7bb3626dc7c89f75adfdfaa0be
|
[
"MIT"
] | 7
|
2020-10-18T16:59:57.000Z
|
2021-12-02T20:41:11.000Z
|
#
# Copyright (c) 2020 aureliancnx
#
# MIT LICENSE
#
# This project is part of aureliancnx.
# See https://github.com/aureliancnx/Bubulle-Norminette for further info.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.#
def fill_variable(content, variable, value):
return content.replace("{{" + variable + "}}", value)
| 47.392857
| 80
| 0.767898
|
4a0e3bce079a3465d4aa4f44fd7ff94e452e5cfc
| 1,739
|
py
|
Python
|
zipline/utils/sqlite_utils.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 14,525
|
2015-01-01T02:57:52.000Z
|
2022-03-31T18:16:35.000Z
|
zipline/utils/sqlite_utils.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 2,146
|
2015-01-01T13:03:44.000Z
|
2022-02-22T03:25:28.000Z
|
zipline/utils/sqlite_utils.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 4,517
|
2015-01-01T14:26:47.000Z
|
2022-03-31T14:38:05.000Z
|
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import os
import sqlite3
import sqlalchemy as sa
from six.moves import range
from .input_validation import coerce_string
SQLITE_MAX_VARIABLE_NUMBER = 998
def group_into_chunks(items, chunk_size=SQLITE_MAX_VARIABLE_NUMBER):
items = list(items)
return [items[x:x+chunk_size]
for x in range(0, len(items), chunk_size)]
def verify_sqlite_path_exists(path):
if path != ':memory:' and not os.path.exists(path):
raise ValueError("SQLite file {!r} doesn't exist.".format(path))
def check_and_create_connection(path, require_exists):
if require_exists:
verify_sqlite_path_exists(path)
return sqlite3.connect(path)
def check_and_create_engine(path, require_exists):
if require_exists:
verify_sqlite_path_exists(path)
return sa.create_engine('sqlite:///' + path)
def coerce_string_to_conn(require_exists):
return coerce_string(
partial(check_and_create_connection, require_exists=require_exists)
)
def coerce_string_to_eng(require_exists):
return coerce_string(
partial(check_and_create_engine, require_exists=require_exists)
)
| 28.983333
| 75
| 0.753882
|
4a0e3c8a745f0e5ed3a067c05b43160c938326b1
| 3,166
|
py
|
Python
|
kachna/kachna/settings.py
|
miiila/kachna
|
e14821cce84d853986c3b4f10f0fc57cb6549147
|
[
"MIT"
] | null | null | null |
kachna/kachna/settings.py
|
miiila/kachna
|
e14821cce84d853986c3b4f10f0fc57cb6549147
|
[
"MIT"
] | null | null | null |
kachna/kachna/settings.py
|
miiila/kachna
|
e14821cce84d853986c3b4f10f0fc57cb6549147
|
[
"MIT"
] | null | null | null |
"""
Django settings for kachna project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!@+&@05q1*wso#jd_6zu54^wue1c59f^5-#(mkjd%gkxvv)@7%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'guestbook.apps.GuestbookConfig',
'teams.apps.TeamsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kachna.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kachna.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'cs-cz'
TIME_ZONE = 'Europe/Prague'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.739837
| 91
| 0.695515
|
4a0e3cb6c73dbdcd199f83f3ef7d54afb1169a8b
| 14,202
|
py
|
Python
|
models/fatchord_version.py
|
begeekmyfriend/WaveRNN
|
7e1d4032ae89244945b8eb1216852a48305b4e99
|
[
"MIT"
] | 15
|
2019-06-10T06:36:28.000Z
|
2022-03-31T12:21:11.000Z
|
models/fatchord_version.py
|
begeekmyfriend/WaveRNN
|
7e1d4032ae89244945b8eb1216852a48305b4e99
|
[
"MIT"
] | null | null | null |
models/fatchord_version.py
|
begeekmyfriend/WaveRNN
|
7e1d4032ae89244945b8eb1216852a48305b4e99
|
[
"MIT"
] | 7
|
2019-07-09T09:57:12.000Z
|
2020-12-01T03:49:25.000Z
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.distribution import sample_from_discretized_mix_logistic
from utils.display import *
from utils.dsp import *
class ResBlock(nn.Module):
def __init__(self, dims):
super().__init__()
self.conv1 = nn.Conv1d(dims, dims, kernel_size=1)
self.conv2 = nn.Conv1d(dims, dims, kernel_size=1)
self.batch_norm1 = nn.BatchNorm1d(dims)
self.batch_norm2 = nn.BatchNorm1d(dims)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.batch_norm1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.batch_norm2(x)
return x + residual
class MelResNet(nn.Module):
def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad):
super().__init__()
k_size = pad * 2 + 1
self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size)
self.batch_norm = nn.BatchNorm1d(compute_dims)
self.layers = nn.ModuleList()
for i in range(res_blocks):
self.layers.append(ResBlock(compute_dims))
self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1)
def forward(self, x):
x = self.conv_in(x)
x = self.batch_norm(x)
x = F.relu(x)
for f in self.layers: x = f(x)
x = self.conv_out(x)
return x
class Stretch2d(nn.Module):
def __init__(self, x_scale, y_scale):
super().__init__()
self.x_scale = x_scale
self.y_scale = y_scale
def forward(self, x):
b, c, h, w = x.size()
x = x.unsqueeze(-1).unsqueeze(3)
x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale)
return x.view(b, c, h * self.y_scale, w * self.x_scale)
class UpsampleNetwork(nn.Module):
def __init__(self, feat_dims, upsample_scales, compute_dims,
res_blocks, res_out_dims, pad):
super().__init__()
total_scale = np.cumproduct(upsample_scales)[-1]
self.indent = pad * total_scale
self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad)
self.resnet_stretch = Stretch2d(total_scale, 1)
self.up_layers = nn.ModuleList()
for scale in upsample_scales:
k_size = (1, scale * 2 + 1)
padding = (0, scale)
stretch = Stretch2d(scale, 1)
conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding)
conv.weight.data.fill_(1. / k_size[1])
self.up_layers.append(stretch)
self.up_layers.append(conv)
def forward(self, m):
aux = self.resnet(m).unsqueeze(1)
aux = self.resnet_stretch(aux)
aux = aux.squeeze(1)
m = m.unsqueeze(1)
for f in self.up_layers: m = f(m)
m = m.squeeze(1)[:, :, self.indent:-self.indent]
return m.transpose(1, 2), aux.transpose(1, 2)
class WaveRNN(nn.Module):
def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors,
feat_dims, compute_dims, res_out_dims, res_blocks,
hop_length, sample_rate, pad_val, mode='RAW'):
super().__init__()
self.mode = mode
self.bits = bits
self.pad = pad
self.step = 0
self.pad_val = pad_val
if self.mode == 'RAW' :
self.n_classes = 2 ** bits
elif self.mode == 'MOL' :
self.n_classes = 30
else :
RuntimeError("Unknown model mode value - ", self.mode)
self.rnn_dims = rnn_dims
self.aux_dims = res_out_dims // 4
self.hop_length = hop_length
self.sample_rate = sample_rate
self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad)
self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims)
self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True)
self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True)
self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims)
self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims)
self.fc3 = nn.Linear(fc_dims, self.n_classes)
self.num_params()
def forward(self, x, mels) :
self.step += 1
bsize = x.size(0)
mels, aux = self.upsample(mels)
aux_idx = [self.aux_dims * i for i in range(5)]
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2)
x = self.I(x)
res = x
x, _ = self.rnn1(x)
x = x + res
res = x
x = torch.cat([x, a2], dim=2)
x, _ = self.rnn2(x)
x = x + res
x = torch.cat([x, a3], dim=2)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4], dim=2)
x = F.relu(self.fc2(x))
return self.fc3(x)
def generate(self, mels, save_path, batched, target, overlap, mu_law):
output = []
mu_law = mu_law if self.mode == 'RAW' else False
start = time.time()
rnn1 = self.get_gru_cell(self.rnn1)
rnn2 = self.get_gru_cell(self.rnn2)
with torch.no_grad():
mels = mels.cuda()
wave_len = mels.size(-1) * self.hop_length
mels = self.pad_tensor(mels.transpose(1, 2), self.pad, self.pad_val, side='both')
mels, aux = self.upsample(mels.transpose(1, 2))
if batched:
mels = self.fold_with_overlap(mels, target, overlap, self.pad_val)
aux = self.fold_with_overlap(aux, target, overlap, self.pad_val)
b_size, seq_len, _ = mels.size()
h1 = torch.zeros(b_size, self.rnn_dims).half().cuda()
h2 = torch.zeros(b_size, self.rnn_dims).half().cuda()
x = torch.zeros(b_size, 1).half().cuda()
d = self.aux_dims
aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)]
for i in range(seq_len):
m_t = mels[:, i, :]
a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split)
x = torch.cat([x, m_t, a1_t], dim=1)
x = self.I(x)
h1 = rnn1(x, h1)
x = x + h1
inp = torch.cat([x, a2_t], dim=1)
h2 = rnn2(inp, h2)
x = x + h2
x = torch.cat([x, a3_t], dim=1)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4_t], dim=1)
x = F.relu(self.fc2(x))
logits = self.fc3(x)
if self.mode == 'MOL':
sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2))
output.append(sample.view(-1))
# x = torch.FloatTensor([[sample]]).cuda()
x = sample.half().transpose(0, 1).cuda()
elif self.mode == 'RAW' :
posterior = F.softmax(logits.float(), dim=1)
distrib = torch.distributions.Categorical(posterior)
# label -> float
sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1.
output.append(sample)
x = sample.half().unsqueeze(-1)
else:
raise RuntimeError("Unknown model mode value - ", self.mode)
if i % 100 == 0 : self.gen_display(i, seq_len, b_size, start)
output = torch.stack(output).transpose(0, 1)
output = output.cpu().numpy()
output = output.astype(np.float64)
if mu_law :
output = decode_mu_law(output, self.n_classes, False)
if batched:
output = self.xfade_and_unfold(output, target, overlap, -1)
else:
output = output[0]
end = time.time()
print(f'Elapsed {end - start} seconds')
return save_wav(output[:wave_len], save_path)
def gen_display(self, i, seq_len, b_size, start):
gen_rate = (i + 1) / (time.time() - start) * b_size / 1000
pbar = progbar(i, seq_len)
msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | '
stream(msg)
def get_gru_cell(self, gru):
gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)
gru_cell.weight_hh.data = gru.weight_hh_l0.data
gru_cell.weight_ih.data = gru.weight_ih_l0.data
gru_cell.bias_hh.data = gru.bias_hh_l0.data
gru_cell.bias_ih.data = gru.bias_ih_l0.data
return gru_cell
def pad_tensor(self, x, pad, pad_val, side='both'):
# NB - this is just a quick method i need right now
# i.e., it won't generalise to other shapes/dims
b, t, c = x.size()
total = t + 2 * pad if side == 'both' else t + pad
padded = torch.zeros(b, total, c).half().fill_(pad_val).cuda()
if side == 'before' or side == 'both':
padded[:, pad:pad + t, :] = x
elif side == 'after':
padded[:, :t, :] = x
return padded
def fold_with_overlap(self, x, target, overlap, pad_val):
''' Fold the tensor with overlap for quick batched inference.
Overlap will be used for crossfading in xfade_and_unfold()
Args:
x (tensor) : Upsampled conditioning features.
shape=(1, timesteps, features)
target (int) : Target timesteps for each index of batch
overlap (int) : Timesteps for both xfade and rnn warmup
Return:
(tensor) : shape=(num_folds, target + 2 * overlap, features)
Details:
x = [[h1, h2, ... hn]]
Where each h is a vector of conditioning features
Eg: target=2, overlap=1 with x.size(1)=10
folded = [[h1, h2, h3, h4],
[h4, h5, h6, h7],
[h7, h8, h9, h10]]
'''
_, total_len, features = x.size()
# Calculate variables needed
num_folds = (total_len - overlap) // (target + overlap)
extended_len = num_folds * (overlap + target) + overlap
remaining = total_len - extended_len
# Pad if some time steps poking out
if remaining != 0:
num_folds += 1
padding = target + 2 * overlap - remaining
x = self.pad_tensor(x, padding, pad_val, side='after')
folded = torch.zeros(num_folds, target + 2 * overlap, features).half().fill_(pad_val).cuda()
# Get the values for the folded tensor
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
folded[i] = x[:, start:end, :]
return folded
def xfade_and_unfold(self, y, target, overlap, pad_val):
''' Applies a crossfade and unfolds into a 1d array.
Args:
y (ndarry) : Batched sequences of audio samples
shape=(num_folds, target + 2 * overlap)
dtype=np.float64
overlap (int) : Timesteps for both xfade and rnn warmup
Return:
(ndarry) : audio samples in a 1d array
shape=(total_len)
dtype=np.float64
Details:
y = [[seq1],
[seq2],
[seq3]]
Apply a gain envelope at both ends of the sequences
y = [[seq1_in, seq1_target, seq1_out],
[seq2_in, seq2_target, seq2_out],
[seq3_in, seq3_target, seq3_out]]
Stagger and add up the groups of samples:
[seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...]
'''
num_folds, length = y.shape
total_len = num_folds * length
unfolded = np.zeros(total_len)
# Equal power crossfade
window = np.hanning(2 * overlap)
fade_in = window[:overlap]
fade_out = window[-overlap:]
end = total_len
for i in range(1, num_folds):
prev = y[i-1]
curr = y[i]
if i == 1:
end = length
unfolded[:end] += prev
max_idx = 0
max_corr = 0
pattern = prev[-overlap:]
# slide the curr batch to match with the pattern of previous one
for j in range(overlap):
match = curr[j:j + overlap]
corr = np.sum(pattern * match) / (np.sqrt(np.sum(pattern ** 2)) * np.sqrt(np.sum(match ** 2)))
if corr > max_corr:
max_idx = j
max_corr = corr
# Apply the gain to the overlap samples
start = end - overlap
unfolded[start:end] *= fade_out
end = start + (length - max_idx)
curr[max_idx:max_idx + overlap] *= fade_in
unfolded[start:end] += curr[max_idx:]
return unfolded[:end]
def get_step(self) :
return self.step
def checkpoint(self, path) :
k_steps = self.get_step() // 1000
self.save(f'{path}/checkpoint_{k_steps}k_steps.pyt')
def log(self, path, msg) :
with open(path, 'a') as f:
print(msg, file=f)
def restore(self, path):
if not os.path.exists(path) :
print('\nNew WaveRNN Training Session...\n')
self.save(path)
else:
print(f'\nLoading Weights: "{path}"\n')
self.load(path)
def load(self, path) :
checkpoint = torch.load(path)
self.step = checkpoint['step']
self.load_state_dict(checkpoint['model'], strict=False)
def save(self, path) :
torch.save({'step': self.step, 'model': self.state_dict()}, path)
def num_params(self, print_out=True):
parameters = filter(lambda p: p.requires_grad, self.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
if print_out :
print('Trainable Parameters: %.3fM' % parameters)
| 34.304348
| 113
| 0.543093
|
4a0e3cf549af6958b69e37cf6513c54bed912f80
| 609
|
py
|
Python
|
libs/garden/garden.mapview/examples/simple_mbtiles.py
|
kellpossible/Kivy_ATC_Simulator
|
7ebac6c1561e8cf4ed9d50d15a0e057a27247b90
|
[
"MIT"
] | null | null | null |
libs/garden/garden.mapview/examples/simple_mbtiles.py
|
kellpossible/Kivy_ATC_Simulator
|
7ebac6c1561e8cf4ed9d50d15a0e057a27247b90
|
[
"MIT"
] | null | null | null |
libs/garden/garden.mapview/examples/simple_mbtiles.py
|
kellpossible/Kivy_ATC_Simulator
|
7ebac6c1561e8cf4ed9d50d15a0e057a27247b90
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
This example demonstrate how to use the MBTilesMapSource provider.
It supports v1.1 version of .mbtiles of Mapbox.
See more at http://mbtiles.org/
It currently require a Kivy version that can load data from a buffer. This
is not the case on every platform at 1.8.1, but we're going to fix it.
"""
import sys
from mapview import MapView
from mapview.mbtsource import MBTilesMapSource
from kivy.base import runTouchApp
source = MBTilesMapSource(sys.argv[1])
runTouchApp(MapView(
map_source=source,
lat=source.default_lat,
lon=source.default_lon,
zoom=source.default_zoom))
| 27.681818
| 74
| 0.768473
|
4a0e3e85455577ac8369b3df3b52778263a3f10b
| 1,826
|
py
|
Python
|
sunpy/data/test/__init__.py
|
dodoextinct/sunpy
|
a87664106b67698c260b7f7127f56e49791464cc
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/data/test/__init__.py
|
dodoextinct/sunpy
|
a87664106b67698c260b7f7127f56e49791464cc
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/data/test/__init__.py
|
dodoextinct/sunpy
|
a87664106b67698c260b7f7127f56e49791464cc
|
[
"BSD-2-Clause"
] | null | null | null |
"""SunPy test data files"""
import os
import glob
import fnmatch
import re
from astropy.utils.data import get_pkg_data_filename
import sunpy
__all__ = ['rootdir', 'file_list', 'get_test_filepath', 'test_data_filenames']
rootdir = os.path.join(os.path.dirname(sunpy.__file__), "data", "test")
file_list = glob.glob(os.path.join(rootdir, '*.[!p]*'))
def get_test_filepath(filename, **kwargs):
"""
Return the full path to a test file in the ``data/test`` directory.
Parameters
----------
filename : `str`
The name of the file inside the ``data/test`` directory.
Return
------
filepath : `str`
The full path to the file.
See Also
--------
astropy.utils.data.get_pkg_data_filename : Get package data filename
Notes
-----
This is a wrapper around `astropy.utils.data.get_pkg_data_filename` which
sets the ``package`` kwarg to be 'sunpy.data.test`.
"""
return get_pkg_data_filename(filename, package="sunpy.data.test", **kwargs)
def test_data_filenames():
"""
Return a list of all test files in ``data/test`` directory.
This ignores any ``py``, ``pyc`` and ``__*__`` files in these directories.
Return
------
get_all_test_filepath : `list`
The name of all test files in ``data/test`` directory.
"""
test_data_filenames_list = []
excludes = ['*.pyc', '*'+os.path.sep+'__*__', '*.py']
excludes = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
for root, dirs, files in os.walk(rootdir):
files = [os.path.join(root, f) for f in files]
files = [f for f in files if not re.match(excludes, f)]
files = [file.replace(rootdir + os.path.sep, '') for file in files]
test_data_filenames_list.extend(files)
return test_data_filenames_list
| 26.085714
| 79
| 0.63965
|
4a0e3ec173874179426fb08e39e64abf8efb33aa
| 9,642
|
py
|
Python
|
pysnmp/IANA-ITU-ALARM-TC-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/IANA-ITU-ALARM-TC-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/IANA-ITU-ALARM-TC-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module IANA-ITU-ALARM-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IANA-ITU-ALARM-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:51:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, ObjectIdentity, IpAddress, Counter64, Integer32, mib_2, iso, Counter32, NotificationType, TimeTicks, MibIdentifier, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ObjectIdentity", "IpAddress", "Counter64", "Integer32", "mib-2", "iso", "Counter32", "NotificationType", "TimeTicks", "MibIdentifier", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ianaItuAlarmNumbers = ModuleIdentity((1, 3, 6, 1, 2, 1, 119))
ianaItuAlarmNumbers.setRevisions(('2014-05-22 00:00', '2004-09-09 00:00',))
if mibBuilder.loadTexts: ianaItuAlarmNumbers.setLastUpdated('201405220000Z')
if mibBuilder.loadTexts: ianaItuAlarmNumbers.setOrganization('IANA')
class IANAItuProbableCause(TextualConvention, Integer32):
reference = "ITU Recommendation M.3100, 'Generic Network Information Model', 1995 ITU Recommendation X.733, 'Information Technology - Open Systems Interconnection - System Management: Alarm Reporting Function', 1992 ITU Recommendation X.736, 'Information Technology - Open Systems Interconnection - System Management: Security Alarm Reporting Function', 1992"
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 201, 202, 203, 204, 205, 206, 207, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 1024))
namedValues = NamedValues(("aIS", 1), ("callSetUpFailure", 2), ("degradedSignal", 3), ("farEndReceiverFailure", 4), ("framingError", 5), ("lossOfFrame", 6), ("lossOfPointer", 7), ("lossOfSignal", 8), ("payloadTypeMismatch", 9), ("transmissionError", 10), ("remoteAlarmInterface", 11), ("excessiveBER", 12), ("pathTraceMismatch", 13), ("unavailable", 14), ("signalLabelMismatch", 15), ("lossOfMultiFrame", 16), ("receiveFailure", 17), ("transmitFailure", 18), ("modulationFailure", 19), ("demodulationFailure", 20), ("broadcastChannelFailure", 21), ("connectionEstablishmentError", 22), ("invalidMessageReceived", 23), ("localNodeTransmissionError", 24), ("remoteNodeTransmissionError", 25), ("routingFailure", 26), ("backplaneFailure", 51), ("dataSetProblem", 52), ("equipmentIdentifierDuplication", 53), ("externalIFDeviceProblem", 54), ("lineCardProblem", 55), ("multiplexerProblem", 56), ("nEIdentifierDuplication", 57), ("powerProblem", 58), ("processorProblem", 59), ("protectionPathFailure", 60), ("receiverFailure", 61), ("replaceableUnitMissing", 62), ("replaceableUnitTypeMismatch", 63), ("synchronizationSourceMismatch", 64), ("terminalProblem", 65), ("timingProblem", 66), ("transmitterFailure", 67), ("trunkCardProblem", 68), ("replaceableUnitProblem", 69), ("realTimeClockFailure", 70), ("antennaFailure", 71), ("batteryChargingFailure", 72), ("diskFailure", 73), ("frequencyHoppingFailure", 74), ("iODeviceError", 75), ("lossOfSynchronisation", 76), ("lossOfRedundancy", 77), ("powerSupplyFailure", 78), ("signalQualityEvaluationFailure", 79), ("tranceiverFailure", 80), ("protectionMechanismFailure", 81), ("protectingResourceFailure", 82), ("airCompressorFailure", 101), ("airConditioningFailure", 102), ("airDryerFailure", 103), ("batteryDischarging", 104), ("batteryFailure", 105), ("commercialPowerFailure", 106), ("coolingFanFailure", 107), ("engineFailure", 108), ("fireDetectorFailure", 109), ("fuseFailure", 110), ("generatorFailure", 111), ("lowBatteryThreshold", 112), ("pumpFailure", 113), ("rectifierFailure", 114), ("rectifierHighVoltage", 115), ("rectifierLowFVoltage", 116), ("ventilationsSystemFailure", 117), ("enclosureDoorOpen", 118), ("explosiveGas", 119), ("fire", 120), ("flood", 121), ("highHumidity", 122), ("highTemperature", 123), ("highWind", 124), ("iceBuildUp", 125), ("intrusionDetection", 126), ("lowFuel", 127), ("lowHumidity", 128), ("lowCablePressure", 129), ("lowTemperatue", 130), ("lowWater", 131), ("smoke", 132), ("toxicGas", 133), ("coolingSystemFailure", 134), ("externalEquipmentFailure", 135), ("externalPointFailure", 136), ("storageCapacityProblem", 151), ("memoryMismatch", 152), ("corruptData", 153), ("outOfCPUCycles", 154), ("sfwrEnvironmentProblem", 155), ("sfwrDownloadFailure", 156), ("lossOfRealTimel", 157), ("applicationSubsystemFailure", 158), ("configurationOrCustomisationError", 159), ("databaseInconsistency", 160), ("fileError", 161), ("outOfMemory", 162), ("softwareError", 163), ("timeoutExpired", 164), ("underlayingResourceUnavailable", 165), ("versionMismatch", 166), ("bandwidthReduced", 201), ("congestion", 202), ("excessiveErrorRate", 203), ("excessiveResponseTime", 204), ("excessiveRetransmissionRate", 205), ("reducedLoggingCapability", 206), ("systemResourcesOverload", 207), ("adapterError", 500), ("applicationSubsystemFailture", 501), ("bandwidthReducedX733", 502), ("callEstablishmentError", 503), ("communicationsProtocolError", 504), ("communicationsSubsystemFailure", 505), ("configurationOrCustomizationError", 506), ("congestionX733", 507), ("coruptData", 508), ("cpuCyclesLimitExceeded", 509), ("dataSetOrModemError", 510), ("degradedSignalX733", 511), ("dteDceInterfaceError", 512), ("enclosureDoorOpenX733", 513), ("equipmentMalfunction", 514), ("excessiveVibration", 515), ("fileErrorX733", 516), ("fireDetected", 517), ("framingErrorX733", 518), ("heatingVentCoolingSystemProblem", 519), ("humidityUnacceptable", 520), ("inputOutputDeviceError", 521), ("inputDeviceError", 522), ("lanError", 523), ("leakDetected", 524), ("localNodeTransmissionErrorX733", 525), ("lossOfFrameX733", 526), ("lossOfSignalX733", 527), ("materialSupplyExhausted", 528), ("multiplexerProblemX733", 529), ("outOfMemoryX733", 530), ("ouputDeviceError", 531), ("performanceDegraded", 532), ("powerProblems", 533), ("pressureUnacceptable", 534), ("processorProblems", 535), ("pumpFailureX733", 536), ("queueSizeExceeded", 537), ("receiveFailureX733", 538), ("receiverFailureX733", 539), ("remoteNodeTransmissionErrorX733", 540), ("resourceAtOrNearingCapacity", 541), ("responseTimeExecessive", 542), ("retransmissionRateExcessive", 543), ("softwareErrorX733", 544), ("softwareProgramAbnormallyTerminated", 545), ("softwareProgramError", 546), ("storageCapacityProblemX733", 547), ("temperatureUnacceptable", 548), ("thresholdCrossed", 549), ("timingProblemX733", 550), ("toxicLeakDetected", 551), ("transmitFailureX733", 552), ("transmiterFailure", 553), ("underlyingResourceUnavailable", 554), ("versionMismatchX733", 555), ("authenticationFailure", 600), ("breachOfConfidentiality", 601), ("cableTamper", 602), ("delayedInformation", 603), ("denialOfService", 604), ("duplicateInformation", 605), ("informationMissing", 606), ("informationModificationDetected", 607), ("informationOutOfSequence", 608), ("keyExpired", 609), ("nonRepudiationFailure", 610), ("outOfHoursActivity", 611), ("outOfService", 612), ("proceduralError", 613), ("unauthorizedAccessAttempt", 614), ("unexpectedInformation", 615), ("other", 1024))
class IANAItuEventType(TextualConvention, Integer32):
reference = "ITU Recommendation X.736, 'Information Technology - Open Systems Interconnection - System Management: Security Alarm Reporting Function', 1992"
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))
namedValues = NamedValues(("other", 1), ("communicationsAlarm", 2), ("qualityOfServiceAlarm", 3), ("processingErrorAlarm", 4), ("equipmentAlarm", 5), ("environmentalAlarm", 6), ("integrityViolation", 7), ("operationalViolation", 8), ("physicalViolation", 9), ("securityServiceOrMechanismViolation", 10), ("timeDomainViolation", 11))
mibBuilder.exportSymbols("IANA-ITU-ALARM-TC-MIB", PYSNMP_MODULE_ID=ianaItuAlarmNumbers, ianaItuAlarmNumbers=ianaItuAlarmNumbers, IANAItuProbableCause=IANAItuProbableCause, IANAItuEventType=IANAItuEventType)
| 311.032258
| 5,501
| 0.729517
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.