hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7956230e122ce165e4fc3263ada0d9d4bf36b35e | 647 | py | Python | alembic/versions/20220323-2155_.py | marutoraman/fastapi-awesome-template | 65613e2227e02633491f582625d9611af9235975 | [
"MIT"
] | null | null | null | alembic/versions/20220323-2155_.py | marutoraman/fastapi-awesome-template | 65613e2227e02633491f582625d9611af9235975 | [
"MIT"
] | null | null | null | alembic/versions/20220323-2155_.py | marutoraman/fastapi-awesome-template | 65613e2227e02633491f582625d9611af9235975 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 7083cebf60fd
Revises: 6d4151b10c3e
Create Date: 2022-03-23 21:55:10.092971
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "7083cebf60fd"
down_revision = "6d4151b10c3e"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("users", sa.Column("scopes", sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("users", "scopes")
# ### end Alembic commands ###
| 22.310345 | 73 | 0.68779 |
7956238d431c151ecac1a097c871f29a5c0365c0 | 2,130 | py | Python | day-08/part-2/coco.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 12 | 2020-11-30T19:22:18.000Z | 2021-06-21T05:55:58.000Z | day-08/part-2/coco.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 13 | 2020-11-30T17:27:22.000Z | 2020-12-22T17:43:13.000Z | day-08/part-2/coco.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 3 | 2020-12-01T08:49:40.000Z | 2022-03-26T21:47:38.000Z | from tool.runners.python import SubmissionPy
from collections import defaultdict
class CocoSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
# Your code goes here
instructions = [instr.split() for instr in s.strip().split("\n")]
parents = defaultdict(list)
for i, (instr, value) in enumerate(instructions):
if instr == "acc":
parents[i + 1].append(i)
elif instr == "jmp":
parents[i + int(value)].append(i)
else:
parents[i + 1].append(i)
# Contruct graph of items that can reach the last instruction
will_reach_end = [False] * len(instructions)
start = [len(instructions) - 1] # last position
already_visited = set()
while start:
p = start.pop()
if p in already_visited:
continue
already_visited.add(p)
will_reach_end[p] = True
current_parents = parents[p]
start.extend(current_parents)
# Now, go along path, but if we can change one instruction
# to reach the end, do it
acc = 0
pointer = 0
already_changed = False
while True:
if pointer >= len(instructions):
break
instr, value = instructions[pointer]
value = int(value)
if instr == "acc":
acc += value
pointer += 1
elif instr == "jmp":
if not already_changed and will_reach_end[pointer + 1]:
# change to nop
pointer += 1
already_changed = True
else:
pointer += value
elif instr == "nop":
if not already_changed and will_reach_end[pointer + value]:
# change to jmp
pointer += value
already_changed = True
else:
pointer += 1
return acc
| 33.28125 | 75 | 0.497183 |
79562577f470e44e4b17a1946d3daa193e636202 | 319,768 | py | Python | nova/compute/manager.py | bopopescu/nova-19 | b898897e812333b1b93758e90e7d471c69a84c86 | [
"Apache-2.0"
] | null | null | null | nova/compute/manager.py | bopopescu/nova-19 | b898897e812333b1b93758e90e7d471c69a84c86 | [
"Apache-2.0"
] | null | null | null | nova/compute/manager.py | bopopescu/nova-19 | b898897e812333b1b93758e90e7d471c69a84c86 | [
"Apache-2.0"
] | 1 | 2020-07-24T01:18:46.000Z | 2020-07-24T01:18:46.000Z | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import contextlib
import functools
import inspect
import socket
import sys
import time
import traceback
import uuid
from cinderclient import exceptions as cinder_exception
import eventlet.event
from eventlet import greenthread
import eventlet.semaphore
import eventlet.timeout
from keystoneauth1 import exceptions as keystone_exception
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from six.moves import range
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import build_results
from nova.compute import claims
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
import nova.conf
from nova import consoleauth
import nova.context
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.image import glance
from nova import manager
from nova import network
from nova.network import base_api as base_net_api
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
from nova import paths
from nova import rpc
from nova import safe_utils
from nova.scheduler import client as scheduler_client
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
from nova.volume import encryptors
compute_opts = [
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.StrOpt('default_access_ip_network_name',
help='Name of network to use to set access IPs for instances'),
cfg.BoolOpt('defer_iptables_apply',
default=False,
help='Whether to batch up the application of IPTables rules'
' during a host restart and apply all at the end of the'
' init phase'),
cfg.StrOpt('instances_path',
default=paths.state_path_def('instances'),
help='Where instances are stored on disk'),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists"
" notifications"),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.IntOpt('network_allocate_retries',
default=0,
help="Number of times to retry network allocation on failures"),
cfg.IntOpt('max_concurrent_builds',
default=10,
help='Maximum number of instance builds to run concurrently'),
cfg.IntOpt('max_concurrent_live_migrations',
default=1,
help='Maximum number of live migrations to run concurrently. '
'This limit is enforced to avoid outbound live migrations '
'overwhelming the host/network and causing failures. It '
'is not recommended that you change this unless you are '
'very sure that doing so is safe and stable in your '
'environment.'),
cfg.IntOpt('block_device_allocate_retries',
default=60,
help='Number of times to retry block device '
'allocation on failures.\n'
'Starting with Liberty, Cinder can use image volume '
'cache. This may help with block device allocation '
'performance. Look at the cinder '
'image_volume_cache_enabled configuration option.')
]
interval_opts = [
cfg.IntOpt('bandwidth_poll_interval',
default=600,
help='Interval to pull network bandwidth usage info. Not '
'supported on all hypervisors. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('sync_power_state_interval',
default=600,
help='Interval to sync power states between the database and '
'the hypervisor. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance network information "
"cache updates"),
cfg.IntOpt('reclaim_instance_interval',
min=0,
default=0,
help='Interval in seconds for reclaiming deleted instances. '
'It takes effect only when value is greater than 0.'),
cfg.IntOpt('volume_usage_poll_interval',
default=0,
help='Interval in seconds for gathering volume usages'),
cfg.IntOpt('shelved_poll_interval',
default=3600,
help='Interval in seconds for polling shelved instances to '
'offload. Set to -1 to disable.'
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'immediately when shelved'),
cfg.IntOpt('instance_delete_interval',
default=300,
help='Interval in seconds for retrying failed instance file '
'deletes. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('block_device_allocate_retries_interval',
default=3,
help='Waiting time interval (seconds) between block'
' device allocation retries on failures'),
cfg.IntOpt('scheduler_instance_sync_interval',
default=120,
help='Waiting time interval (seconds) between sending the '
'scheduler a list of current instance UUIDs to verify '
'that its view of instances is in sync with nova. If the '
'CONF option `scheduler_tracks_instance_changes` is '
'False, changing this option will have no effect.'),
cfg.IntOpt('update_resources_interval',
default=0,
help='Interval in seconds for updating compute resources. A '
'number less than 0 means to disable the task completely. '
'Leaving this at the default of 0 will cause this to run '
'at the default periodic interval. Setting it to any '
'positive value will cause it to run at approximately '
'that number of seconds.'),
]
timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status. "
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes and cold migrations "
"after N seconds. Set to 0 to disable."),
cfg.IntOpt("shutdown_timeout",
default=60,
help="Total amount of time to wait in seconds for an instance "
"to perform a clean shutdown."),
]
running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
choices=('noop', 'log', 'shutdown', 'reap'),
help="Action to take if a running deleted instance is detected."
"Set to 'noop' to take no action."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
help="Number of seconds to wait between runs of the cleanup "
"task."),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
help='The number of times to attempt to reap an instance\'s '
'files.'),
]
CONF = nova.conf.CONF
CONF.register_opts(compute_opts)
CONF.register_opts(interval_opts)
CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.register_opts(instance_cleaning_opts)
CONF.import_opt('console_topic', 'nova.console.rpcapi')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('enabled', 'nova.spice', group='spice')
CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache')
CONF.import_opt('enabled', 'nova.rdp', group='rdp')
CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp')
CONF.import_opt('enabled', 'nova.mks', group='mks')
CONF.import_opt('mksproxy_base_url', 'nova.mks', group='mks')
CONF.import_opt('destroy_after_evacuate', 'nova.utils', group='workarounds')
CONF.import_opt('scheduler_tracks_instance_changes',
'nova.scheduler.host_manager')
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
@utils.expects_func_args('migration')
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as ex:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
migration = keyed_args['migration']
# NOTE(rajesht): If InstanceNotFound error is thrown from
# decorated function, migration status should be set to
# 'error', without checking current migration status.
if not isinstance(ex, exception.InstanceNotFound):
status = migration.status
if status not in ['migrating', 'post-migrating']:
return
migration.status = 'error'
try:
with migration.obj_as_admin():
migration.save()
except Exception:
LOG.debug('Error setting migration status '
'for instance %s.',
migration.instance_uuid, exc_info=True)
return decorated_function
@utils.expects_func_args('instance')
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info(_LI("Task possibly preempted: %s"),
e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
# NOTE(mriedem): 'instance' must be in keyed_args because we
# have utils.expects_func_args('instance') decorating this
# method.
instance = keyed_args['instance']
original_task_state = instance.task_state
try:
self._instance_update(context, instance, task_state=None)
LOG.info(_LI("Successfully reverted task state from %s on "
"failure for instance."), original_task_state,
instance=instance)
except exception.InstanceNotFound:
# We might delete an instance that failed to build shortly
# after it errored out this is an expected case and we
# should not trace on it.
pass
except Exception as e:
msg = _LW("Failed to revert task state for instance. "
"Error: %s")
LOG.warning(msg, e, instance=instance)
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.__code__.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
kwargs['instance'], e, sys.exc_info())
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_event(function):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context, *args,
**kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = 'compute_{0}'.format(function.__name__)
with compute_utils.EventReporter(context, event_name, instance_uuid):
return function(self, context, *args, **kwargs)
return decorated_function
@utils.expects_func_args('image_id', 'instance')
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Cleaning up image %s", image_id,
exc_info=True, instance=instance)
try:
self.image_api.delete(context, image_id)
except Exception:
LOG.exception(_LE("Error while trying to clean up "
"image %s"), image_id,
instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
# try to get metadata and system_metadata for most cases but
# only attempt to load those if the db instance already has
# those fields joined
metas = [meta for meta in ('metadata', 'system_metadata')
if meta in instance_or_dict]
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = objects.Migration._from_db_object(
context.elevated(), objects.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, event_name):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param event_name: the name of the event we're expecting
:returns: an event object that should be wait()'d on
"""
if self._events is None:
# NOTE(danms): We really should have a more specific error
# here, but this is what we use for our default error case
raise exception.NovaException('In shutdown, no new events '
'can be scheduled')
@utils.synchronized(self._lock_name(instance))
def _create_or_get_event():
instance_events = self._events.setdefault(instance.uuid, {})
return instance_events.setdefault(event_name,
eventlet.event.Event())
LOG.debug('Preparing to wait for external event %(event)s',
{'event': event_name}, instance=instance)
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
no_events_sentinel = object()
no_matching_event_sentinel = object()
@utils.synchronized(self._lock_name(instance))
def _pop_event():
if not self._events:
LOG.debug('Unexpected attempt to pop events during shutdown',
instance=instance)
return no_events_sentinel
events = self._events.get(instance.uuid)
if not events:
return no_events_sentinel
_event = events.pop(event.key, None)
if not events:
del self._events[instance.uuid]
if _event is None:
return no_matching_event_sentinel
return _event
result = _pop_event()
if result is no_events_sentinel:
LOG.debug('No waiting events found dispatching %(event)s',
{'event': event.key},
instance=instance)
return None
elif result is no_matching_event_sentinel:
LOG.debug('No event matching %(event)s in %(events)s',
{'event': event.key,
'events': self._events.get(instance.uuid, {}).keys()},
instance=instance)
return None
else:
return result
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name(instance))
def _clear_events():
if self._events is None:
LOG.debug('Unexpected attempt to clear events during shutdown',
instance=instance)
return dict()
return self._events.pop(instance.uuid, {})
return _clear_events()
def cancel_all_events(self):
if self._events is None:
LOG.debug('Unexpected attempt to cancel events during shutdown.')
return
our_events = self._events
# NOTE(danms): Block new events
self._events = None
for instance_uuid, events in our_events.items():
for event_name, eventlet_event in events.items():
LOG.debug('Canceling in-flight event %(event)s for '
'instance %(instance_uuid)s',
{'event': event_name,
'instance_uuid': instance_uuid})
name, tag = event_name.rsplit('-', 1)
event = objects.InstanceExternalEvent(
instance_uuid=instance_uuid,
name=name, status='failed',
tag=tag, data={})
eventlet_event.send(event)
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param instance: The instance for which an event is expected
:param event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
:param deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
if isinstance(event_name, tuple):
name, tag = event_name
event_name = objects.InstanceExternalEvent.make_key(
name, tag)
try:
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, event_name))
except exception.NovaException:
error_callback(event_name, instance)
# NOTE(danms): Don't wait for any of the events. They
# should all be canceled and fired immediately below,
# but don't stick around if not.
deadline = 0
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='4.11')
# How long to wait in seconds before re-issuing a shutdown
# signal to an instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = volume.API()
self.image_api = image.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
self._resource_tracker_dict = {}
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool()
self._syncs_in_progress = {}
self.send_instance_updates = CONF.scheduler_tracks_instance_changes
if CONF.max_concurrent_builds != 0:
self._build_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_builds)
else:
self._build_semaphore = compute_utils.UnlimitedSemaphore()
if max(CONF.max_concurrent_live_migrations, 0) != 0:
self._live_migration_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_live_migrations)
else:
self._live_migration_semaphore = compute_utils.UnlimitedSemaphore()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def reset(self):
LOG.info(_LI('Reloading compute RPC API'))
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nodename)
self._resource_tracker_dict[nodename] = rt
return rt
def _update_resource_tracker(self, context, instance):
"""Let the resource tracker know that an instance has changed state."""
if (instance.host == self.host and
self.driver.node_is_available(instance.node)):
rt = self._get_resource_tracker(instance.node)
rt.update_usage(context, instance)
def _instance_update(self, context, instance, **kwargs):
"""Update an instance in the database using kwargs as value."""
for k, v in kwargs.items():
setattr(instance, k, v)
instance.save()
self._update_resource_tracker(context, instance)
def _nil_out_instance_obj_host_and_node(self, instance):
# NOTE(jwcroppe): We don't do instance.save() here for performance
# reasons; a call to this is expected to be immediately followed by
# another call that does instance.save(), thus avoiding two writes
# to the database layer.
instance.host = None
instance.node = None
def _set_instance_obj_error_state(self, context, instance,
clean_task_state=False):
try:
instance.vm_state = vm_states.ERROR
if clean_task_state:
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR', instance=instance)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
if len(driver_uuids) == 0:
# Short circuit, don't waste a DB call
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them, with the exception of instances which are in
the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH
task state or RESIZED vm state.
"""
filters = {
'source_compute': self.host,
'status': ['accepted', 'done'],
'migration_type': 'evacuation',
}
evacuations = objects.MigrationList.get_by_filters(context, filters)
if not evacuations:
return
evacuations = {mig.instance_uuid: mig for mig in evacuations}
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
evacuated = [inst for inst in local_instances
if inst.uuid in evacuations]
for instance in evacuated:
migration = evacuations[instance.uuid]
LOG.info(_LI('Deleting instance as it has been evacuated from '
'this host'), instance=instance)
try:
network_info = self.network_api.get_instance_nw_info(
context, instance)
bdi = self._get_instance_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info(_LI('Instance has been marked deleted already, '
'removing it from the hypervisor.'),
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
migration.status = 'completed'
migration.save()
def _is_instance_storage_shared(self, context, instance, host=None):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
instance, data, host=host))
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage',
instance=instance)
shared_storage = False
except Exception:
LOG.exception(_LE('Failed to check if instance shared'),
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
system_meta = instance.system_metadata
instance.destroy()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas(context=context)
project_id, user_id = objects.quotas.ids_from_instance(context,
instance)
quotas.reserve(project_id=project_id, user_id=user_id, instances=-1,
cores=-instance.vcpus, ram=-instance.memory_mb)
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
def _complete_deletion(self, context, instance, bdms,
quotas, system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked
for bdm in bdms:
bdm.destroy()
self._update_resource_tracker(context, instance)
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
self._clean_instance_console_tokens(context, instance)
self._delete_scheduler_instance_info(context, instance.uuid)
def _create_reservations(self, context, instance, project_id, user_id):
vcpus = instance.vcpus
mem_mb = instance.memory_mb
quotas = objects.Quotas(context=context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-vcpus,
ram=-mem_mb)
return quotas
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
# NOTE(danms): If the instance appears to not be owned by this
# host, it may have been evacuated away, but skipped by the
# evacuation cleanup code due to configuration. Thus, if that
# is a possibility, don't touch the instance in any way, but
# log the concern. This will help avoid potential issues on
# startup due to misconfiguration.
if instance.host != self.host:
LOG.warning(_LW('Instance %(uuid)s appears to not be owned '
'by this host, but by %(host)s. Startup '
'processing is being skipped.'),
{'uuid': instance.uuid,
'host': instance.host})
return
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug("Instance is in %s state.",
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug("Instance failed to spawn correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and
instance.task_state in [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]):
# NOTE(jichenjc) compute stopped before instance was fully
# spawned so set to ERROR state. This is consistent to BUILD
LOG.debug("Instance failed to rebuild correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance.task_state, instance=instance)
try:
self._post_interrupted_snapshot_cleanup(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to cleanup snapshot.')
LOG.exception(msg, instance=instance)
instance.task_state = None
instance.save()
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.RESIZE_PREP]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance['task_state'], instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info(_LI('Service started deleting the instance during '
'the previous run, but did not finish. Restarting'
' the deletion now.'), instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = objects.quotas.ids_from_instance(
context, instance)
quotas = self._create_reservations(context, instance,
project_id, user_id)
self._delete_instance(context, instance, bdms, quotas)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_obj_error_state(context, instance)
return
try_reboot, reboot_type = self._retry_reboot(context, instance)
current_power_state = self._get_power_state(context, instance)
if try_reboot:
LOG.debug("Instance in transitional state (%(task_state)s) at "
"start-up and power state is (%(power_state)s), "
"triggering reboot",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
# NOTE(mikal): if the instance was doing a soft reboot that got as
# far as shutting down the instance but not as far as starting it
# again, then we've just become a hard reboot. That means the
# task state for the instance needs to change so that we're in one
# of the expected task states for a hard reboot.
soft_types = [task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING,
task_states.REBOOTING]
if instance.task_state in soft_types and reboot_type == 'HARD':
instance.task_state = task_states.REBOOT_PENDING_HARD
instance.save()
self.reboot_instance(context, instance, block_device_info=None,
reboot_type=reboot_type)
return
elif (current_power_state == power_state.RUNNING and
instance.task_state in [task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING]):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ACTIVE
instance.save()
elif (current_power_state == power_state.PAUSED and
instance.task_state == task_states.UNPAUSING):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state "
"and unpausing the instance"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
try:
self.unpause_instance(context, instance)
except NotImplementedError:
# Some virt driver didn't support pause and unpause
pass
except Exception:
LOG.exception(_LE('Failed to unpause instance'),
instance=instance)
return
if instance.task_state == task_states.POWERING_OFF:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying stop request",
instance.task_state, instance=instance)
self.stop_instance(context, instance, True)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to stop instance')
LOG.exception(msg, instance=instance)
return
if instance.task_state == task_states.POWERING_ON:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying start request",
instance.task_state, instance=instance)
self.start_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to start instance')
LOG.exception(msg, instance=instance)
return
net_info = compute_utils.get_nw_info_for_instance(instance)
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
except exception.VirtualInterfacePlugException:
# we don't want an exception to block the init_host
LOG.exception(_LE("Vifs plug failed"), instance=instance)
self._set_instance_obj_error_state(context, instance)
return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_block_device_info(context,
instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception:
LOG.exception(_LE('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_LI('Instance found in migrating state during '
'startup. Resetting task_state'),
instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.MIGRATING:
# Live migration did not complete, but instance is on this
# host, so reset the state.
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.',
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
LOG.info(_LI('Rebooting instance after nova-compute restart.'),
instance=instance)
block_device_info = \
self._get_instance_block_device_info(context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning(_LW('Failed to resume instance'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'firewall rules', instance=instance)
def _retry_reboot(self, context, instance):
current_power_state = self._get_power_state(context, instance)
current_task_state = instance.task_state
retry_reboot = False
reboot_type = compute_utils.get_reboot_type(current_task_state,
current_power_state)
pending_soft = (current_task_state == task_states.REBOOT_PENDING and
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD
and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
started_not_running = (current_task_state in
[task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD] and
current_power_state != power_state.RUNNING)
if pending_soft or pending_hard or started_not_running:
retry_reboot = True
return retry_reboot, reboot_type
def handle_lifecycle_event(self, event):
LOG.info(_LI("VM %(state)s (Lifecycle Event)"),
{'state': event.get_name()},
instance_uuid=event.get_instance_uuid())
context = nova.context.get_admin_context(read_deleted='yes')
instance = objects.Instance.get_by_uuid(context,
event.get_instance_uuid(),
expected_attrs=[])
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_SUSPENDED:
vm_power_state = power_state.SUSPENDED
else:
LOG.warning(_LW("Unexpected power state %d"),
event.get_transition())
# Note(lpetrut): The event may be delayed, thus not reflecting
# the current instance power state. In that case, ignore the event.
current_power_state = self._get_power_state(context, instance)
if current_power_state == vm_power_state:
LOG.debug('Synchronizing instance power state after lifecycle '
'event "%(event)s"; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, VM power_state: '
'%(vm_power_state)s',
{'event': event.get_name(),
'vm_state': instance.vm_state,
'task_state': instance.task_state,
'db_power_state': instance.power_state,
'vm_power_state': vm_power_state},
instance_uuid=instance.uuid)
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug("Event %s arrived for non-existent instance. The "
"instance was probably deleted.", event)
else:
LOG.debug("Ignoring event %s", event)
def init_virt_events(self):
if CONF.workarounds.handle_virt_lifecycle_events:
self.driver.register_event_listener(self.handle_events)
else:
# NOTE(mriedem): If the _sync_power_states periodic task is
# disabled we should emit a warning in the logs.
if CONF.sync_power_state_interval < 0:
LOG.warning(_LW('Instance lifecycle events from the compute '
'driver have been disabled. Note that lifecycle '
'changes to an instance outside of the compute '
'service will not be synchronized '
'automatically since the _sync_power_states '
'periodic task is also disabled.'))
else:
LOG.info(_LI('Instance lifecycle events from the compute '
'driver have been disabled. Note that lifecycle '
'changes to an instance outside of the compute '
'service will only be synchronized by the '
'_sync_power_states periodic task.'))
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache', 'metadata'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
self._update_scheduler_instance_info(context, instances)
def cleanup_host(self):
self.driver.register_event_listener(None)
self.instance_events.cancel_all_events()
self.driver.cleanup_host(host=self.host)
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context())
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
try:
return self.driver.get_info(instance).state
except exception.InstanceNotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
# TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
@wrap_exception()
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
# NOTE(hanlind): This and the virt method it calls can be removed in
# version 5.0 of the RPC API
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@object_compat
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronise the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance.uuid)
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'security groups.', instance=instance)
return _sync_refresh()
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
start = time.time()
retries = CONF.block_device_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'block_device_retries' as 0."),
{'retries': retries})
# (1) treat negative config value as 0
# (2) the configured value is 0, one attempt should be made
# (3) the configured value is > 0, then the total number attempts
# is (retries + 1)
attempts = 1
if retries >= 1:
attempts = retries + 1
for attempt in range(1, attempts + 1):
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status == 'available':
return attempt
LOG.warning(_LW("Volume id: %(vol_id)s finished being "
"created but its status is %(vol_status)s."),
{'vol_id': vol_id,
'vol_status': volume_status})
break
greenthread.sleep(CONF.block_device_allocate_retries_interval)
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempt,
volume_status=volume_status)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
try:
decoded = base64.b64decode(contents)
return path, decoded
except TypeError:
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _validate_instance_group_policy(self, context, instance,
filter_properties):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# the policy. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. It's also possible that
# multiple instances with an affinity policy could end up on different
# hosts. This is a validation step to make sure that starting the
# instance here doesn't violate the policy.
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group')
if not group_hint:
return
@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = objects.InstanceGroup.get_by_hint(context, group_hint)
if 'anti-affinity' in group.policies:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy "
"was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
elif 'affinity' in group.policies:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if group_hosts and self.host not in group_hosts:
msg = _("Affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
_do_validation(context, instance, group_hint)
def _log_original_error(self, exc_info, instance_uuid):
LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule(self, context, request_spec, filter_properties,
instance, reschedule_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
instance_uuid = instance.uuid
retry = filter_properties.get('retry')
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug("No request spec, will not reschedule",
instance_uuid=instance_uuid)
return
LOG.debug("Re-scheduling %(method)s: attempt %(num)d",
{'method': reschedule_method.__name__,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception_only(exc_info[0],
exc_info[1])
reschedule_method(context, *method_args)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
self._set_instance_obj_error_state(context, instance)
LOG.warning(_LW("Instance build timed out. Set to error "
"state."), instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance):
raise exception.InstanceExists(name=instance.name)
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
LOG.debug("Allocating IP information in the background.",
instance=instance)
retries = CONF.network_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."),
{'retries': retries})
retries = 0
attempts = retries + 1
retry_time = 1
bind_host_id = self.driver.network_binding_host_id(context, instance)
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options,
bind_host_id=bind_host_id)
LOG.debug('Instance network_info: |%s|', nwinfo,
instance=instance)
instance.system_metadata['network_allocated'] = 'True'
# NOTE(JoshNang) do not save the instance here, as it can cause
# races. The caller shares a reference to instance and waits
# for this async greenthread to finish before calling
# instance.save().
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_LE('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
six.reraise(*exc_info)
LOG.warning(_LW('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
# NOTE(alex_xu): The network_allocated is True means the network
# resource already allocated at previous scheduling, and the
# network setup is cleanup at previous. After rescheduling, the
# network resource need setup on the new host.
self.network_api.setup_instance_network_on_host(
context, instance, instance.host)
return self.network_api.get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups, dhcp_options)
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups, dhcp_options):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.NETWORKING
instance.save(expected_task_state=[None])
self._update_resource_tracker(context, instance)
is_vpn = pipelib.is_vpn_image(instance.image_ref)
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _get_device_name_for_instance(self, instance, bdms, block_device_obj):
# NOTE(ndipanov): Copy obj to avoid changing the original
block_device_obj = block_device_obj.obj_clone()
try:
return self.driver.get_device_name_for_instance(
instance, bdms, block_device_obj)
except NotImplementedError:
return compute_utils.get_device_name_for_instance(
instance, bdms, block_device_obj.get("device_name"))
def _default_block_device_names(self, context, instance,
image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance.root_device_name = root_device_name
elif instance.root_device_name:
root_device_name = instance.root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance.root_device_name = root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
if update_root_bdm:
root_bdm.save()
ephemerals = list(filter(block_device.new_format_is_ephemeral,
block_devices))
swap = list(filter(block_device.new_format_is_swap,
block_devices))
block_device_mapping = list(filter(
driver_block_device.is_block_device_mapping, block_devices))
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _block_device_info_to_legacy(self, block_device_info):
"""Convert BDI to the old format for drivers that need it."""
if self.use_legacy_block_device_info:
ephemerals = driver_block_device.legacy_block_devices(
driver.block_device_info_get_ephemerals(block_device_info))
mapping = driver_block_device.legacy_block_devices(
driver.block_device_info_get_mapping(block_device_info))
swap = block_device_info['swap']
if swap:
swap = swap.legacy()
block_device_info.update({
'ephemerals': ephemerals,
'swap': swap,
'block_device_mapping': mapping})
def _check_dev_name(self, bdms, instance):
bdms_no_device_name = [x for x in bdms if x.device_name is None]
for bdm in bdms_no_device_name:
device_name = self._get_device_name_for_instance(instance,
bdms,
bdm)
values = {'device_name': device_name}
bdm.update(values)
def _prep_block_device(self, context, instance, bdms,
do_check_attach=True):
"""Set up the block device for an instance with error logging."""
try:
self._check_dev_name(bdms, instance)
block_device_info = driver.get_block_device_info(instance, bdms)
mapping = driver.block_device_info_get_mapping(block_device_info)
driver_block_device.attach_block_devices(
mapping, context, instance, self.volume_api, self.driver,
do_check_attach=do_check_attach,
wait_func=self._await_block_device_map_created)
self._block_device_info_to_legacy(block_device_info)
return block_device_info
except exception.OverQuota:
msg = _LW('Failed to create block device for instance due to '
'being over volume resource quota')
LOG.warning(msg, instance=instance)
raise exception.VolumeLimitExceeded()
except Exception:
LOG.exception(_LE('Instance failed block device setup'),
instance=instance)
raise exception.InvalidBDM()
def _update_instance_after_spawn(self, context, instance):
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
configdrive.update_instance(instance)
def _update_scheduler_instance_info(self, context, instance):
"""Sends an InstanceList with created or updated Instance objects to
the Scheduler client.
In the case of init_host, the value passed will already be an
InstanceList. Other calls will send individual Instance objects that
have been created or resized. In this case, we create an InstanceList
object containing that Instance.
"""
if not self.send_instance_updates:
return
if isinstance(instance, obj_instance.Instance):
instance = objects.InstanceList(objects=[instance])
context = context.elevated()
self.scheduler_client.update_instance_info(context, self.host,
instance)
def _delete_scheduler_instance_info(self, context, instance_uuid):
"""Sends the uuid of the deleted Instance to the Scheduler client."""
if not self.send_instance_updates:
return
context = context.elevated()
self.scheduler_client.delete_instance_info(context, self.host,
instance_uuid)
@periodic_task.periodic_task(spacing=CONF.scheduler_instance_sync_interval)
def _sync_scheduler_instance_info(self, context):
if not self.send_instance_updates:
return
context = context.elevated()
instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
uuids = [instance.uuid for instance in instances]
self.scheduler_client.sync_instance_info(context, self.host, uuids)
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
LOG.debug('Deallocating network for instance', instance=instance)
with timeutils.StopWatch() as timer:
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
# nova-network does an rpc call so we're OK tracking time spent here
LOG.info(_LI('Took %0.2f seconds to deallocate network for instance.'),
timer.elapsed(), instance=instance)
def _get_instance_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform block devices to the driver block_device format."""
if not bdms:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance, bdms)
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_info['block_device_mapping'] = [
bdm for bdm in driver.block_device_info_get_mapping(
block_device_info)
if bdm.get('connection_info')]
else:
driver_block_device.refresh_conn_infos(
driver.block_device_info_get_mapping(block_device_info),
context, instance, self.volume_api, self.driver)
self._block_device_info_to_legacy(block_device_info)
return block_device_info
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
# locked because we could wait in line to build this instance
# for a while and we want to make sure that nothing else tries
# to do anything with this instance while we wait.
with self._build_semaphore:
self._do_build_and_run_instance(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
utils.spawn_n(_locked_do_build_and_run_instance,
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits)
@hooks.add_hook('build_instance')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None):
try:
LOG.debug('Starting instance...', context=context,
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = 'Instance disappeared before build.'
LOG.debug(msg, instance=instance)
return build_results.FAILED
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return build_results.FAILED
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
try:
with timeutils.StopWatch() as timer:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties)
LOG.info(_LI('Took %0.2f seconds to build instance.'),
timer.elapsed(), instance=instance)
return build_results.ACTIVE
except exception.RescheduledException as e:
retry = filter_properties.get('retry')
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info(),
fault_message=e.kwargs['reason'])
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
LOG.debug(e.format_message(), instance=instance)
# This will be used for logging the exception
retry['exc'] = traceback.format_exception(*sys.exc_info())
# This will be used for setting the instance fault message
retry['exc_reason'] = e.kwargs['reason']
# NOTE(comstud): Deallocate networks if the driver wants
# us to do so.
# NOTE(vladikr): SR-IOV ports should be deallocated to
# allow new sriov pci devices to be allocated on a new host.
# Otherwise, if devices with pci addresses are already allocated
# on the destination host, the instance will fail to spawn.
# info_cache.network_info should be present at this stage.
if (self.driver.deallocate_networks_on_reschedule(instance) or
self.deallocate_sriov_ports_on_reschedule(instance)):
self._cleanup_allocated_networks(context, instance,
requested_networks)
else:
# NOTE(alex_xu): Network already allocated and we don't
# want to deallocate them before rescheduling. But we need
# to cleanup those network resources setup on this host before
# rescheduling.
self.network_api.cleanup_instance_network_on_host(
context, instance, self.host)
self._nil_out_instance_obj_host_and_node(instance)
instance.task_state = task_states.SCHEDULING
instance.save()
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping)
return build_results.RESCHEDULED
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = 'Instance disappeared during build.'
LOG.debug(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
return build_results.FAILED
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
except Exception as e:
# Should not reach here.
msg = _LE('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
def deallocate_sriov_ports_on_reschedule(self, instance):
"""Determine if networks are needed to be deallocated before reschedule
Check the cached network info for any assigned SR-IOV ports.
SR-IOV ports should be deallocated prior to rescheduling
in order to allow new sriov pci devices to be allocated on a new host.
"""
info_cache = instance.info_cache
def _has_sriov_port(vif):
return vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV
if (info_cache and info_cache.network_info):
for vif in info_cache.network_info:
if _has_sriov_port(vif):
return True
return False
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
try:
rt = self._get_resource_tracker(node)
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
image_meta = objects.ImageMeta.from_dict(image)
with self._build_resources(context, instance,
requested_networks, security_groups, image_meta,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
# instance from _allocate_network_async, as they aren't
# saved in that function to prevent races.
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
LOG.debug('Start spawning the instance on the hypervisor.',
instance=instance)
with timeutils.StopWatch() as timer:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
LOG.info(_LI('Took %0.2f seconds to spawn the instance on '
'the hypervisor.'), timer.elapsed(),
instance=instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
except (exception.FixedIpLimitExceeded,
exception.NoMoreNetworks, exception.NoMoreFixedIps) as e:
LOG.warning(_LW('No more network or fixed IP to be allocated'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s) with error %s, '
'not rescheduling.') % e.format_message()
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException) as e:
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.ImageNotActive,
exception.ImageUnacceptable,
exception.InvalidDiskInfo) as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=six.text_type(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
network_name = CONF.default_access_ip_network_name
if (network_name and not instance.access_ip_v4 and
not instance.access_ip_v6):
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if not instance.access_ip_v4 and ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if not instance.access_ip_v6 and ip['version'] == 6:
instance.access_ip_v6 = ip['address']
break
self._update_instance_after_spawn(context, instance)
try:
instance.save(expected_task_state=task_states.SPAWNING)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image_meta, block_device_mapping):
resources = {}
network_info = None
try:
LOG.debug('Start building networks asynchronously for instance.',
instance=instance)
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image_meta,
block_device_mapping)
LOG.debug('Start building block device mappings for instance.',
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
with excutils.save_and_reraise_exception():
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except (exception.UnexpectedTaskStateError,
exception.VolumeLimitExceeded,
exception.InvalidBDM) as e:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception(_LE('Failure prepping block device'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if not isinstance(exc, (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError)):
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
# if network_info is empty we're likely here because of
# network allocation failure. Since nothing can be reused on
# rescheduling it's better to deallocate network to eliminate
# the chance of orphaned ports in neutron
deallocate_networks = False if network_info else True
try:
self._shutdown_instance(context, instance,
block_device_mapping, requested_networks,
try_deallocate_networks=deallocate_networks)
except Exception as exc2:
ctxt.reraise = False
LOG.warning(_LW('Could not clean up failed build,'
' not rescheduling. Error: %s'),
six.text_type(exc2))
raise exception.BuildAbortException(
instance_uuid=instance.uuid,
reason=six.text_type(exc))
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
except Exception:
msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE(alaski): It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to deallocate network for instance.'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
def _get_power_off_values(self, context, instance, clean_shutdown):
"""Get the timing configuration for powering down this instance."""
if clean_shutdown:
timeout = compute_utils.get_value_from_system_metadata(instance,
key='image_os_shutdown_timeout', type=int,
default=CONF.shutdown_timeout)
retry_interval = self.SHUTDOWN_RETRY_INTERVAL
else:
timeout = 0
retry_interval = 0
return timeout, retry_interval
def _power_off_instance(self, context, instance, clean_shutdown=True):
"""Power off an instance on this host."""
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
try_deallocate_networks=True):
"""Shutdown an instance on this host.
:param:context: security context
:param:instance: a nova.objects.Instance object
:param:bdms: the block devices for the instance to be torn
down
:param:requested_networks: the networks on which the instance
has ports
:param:notify: true if a final usage notification should be
emitted
:param:try_deallocate_networks: false if we should avoid
trying to teardown networking
"""
context = context.elevated()
LOG.info(_LI('Terminating instance'),
context=context, instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
network_info = compute_utils.get_nw_info_for_instance(instance)
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
timer = timeutils.StopWatch()
try:
LOG.debug('Start destroying the instance on the hypervisor.',
instance=instance)
timer.start()
self.driver.destroy(context, instance, network_info,
block_device_info)
LOG.info(_LI('Took %0.2f seconds to destroy the instance on the '
'hypervisor.'), timer.elapsed(), instance=instance)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
if try_deallocate_networks:
self._try_deallocate_network(context, instance,
requested_networks)
if try_deallocate_networks:
self._try_deallocate_network(context, instance, requested_networks)
timer.restart()
for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id, instance.uuid)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.debug('Ignoring VolumeNotFound: %s', exc,
instance=instance)
except (cinder_exception.EndpointNotFound,
keystone_exception.EndpointNotFound) as exc:
LOG.warning(_LW('Ignoring EndpointNotFound for '
'volume %(volume_id)s: %(exc)s'),
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
except cinder_exception.ClientException as exc:
LOG.warning(_LW('Ignoring unknown cinder exception for '
'volume %(volume_id)s: %(exc)s'),
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
except Exception as exc:
LOG.warning(_LW('Ignoring unknown exception for '
'volume %(volume_id)s: %(exc)s'),
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
if vol_bdms:
LOG.info(_LI('Took %(time).2f seconds to detach %(num)s volumes '
'for instance.'),
{'time': timer.elapsed(), 'num': len(vol_bdms)},
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
exc_info = None
for bdm in bdms:
LOG.debug("terminating bdm %s", bdm,
instance_uuid=instance_uuid)
if bdm.volume_id and bdm.delete_on_termination:
try:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning(_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': bdm.volume_id, 'exc': exc})
if exc_info is not None and raise_exc:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms, quotas):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
:param context: nova request context
:param instance: nova.objects.instance.Instance object
:param bdms: nova.objects.block_device.BlockDeviceMappingList object
:param quotas: nova.objects.quotas.Quotas object
"""
was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if was_soft_deleted:
# Instances in SOFT_DELETED vm_state have already had quotas
# decremented.
try:
quotas.rollback()
except Exception:
pass
try:
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug('Events pending at deletion: %(events)s',
{'events': ','.join(events.keys())},
instance=instance)
self._notify_about_instance_usage(context, instance,
"delete.start")
self._shutdown_instance(context, instance, bdms)
# NOTE(dims): instance.info_cache.delete() should be called after
# _shutdown_instance in the compute manager as shutdown calls
# deallocate_for_instance so the info_cache is still needed
# at this point.
if instance.info_cache is not None:
instance.info_cache.delete()
else:
# NOTE(yoshimatsu): Avoid AttributeError if instance.info_cache
# is None. When the root cause that instance.info_cache becomes
# None is fixed, the log level should be reconsidered.
LOG.warning(_LW("Info cache for instance could not be found. "
"Ignore."), instance=instance)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
self._cleanup_volumes(context, instance.uuid, bdms,
raise_exc=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.power_state = power_state.NOSTATE
instance.terminated_at = timeutils.utcnow()
instance.save()
system_meta = instance.system_metadata
instance.destroy()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_terminate_instance(instance, bdms):
# NOTE(mriedem): If we are deleting the instance while it was
# booting from volume, we could be racing with a database update of
# the BDM volume_id. Since the compute API passes the BDMs over RPC
# to compute here, the BDMs may be stale at this point. So check
# for any volume BDMs that don't have volume_id set and if we
# detect that, we need to refresh the BDM list before proceeding.
# TODO(mriedem): Move this into _delete_instance and make the bdms
# parameter optional.
for bdm in list(bdms):
if bdm.is_volume and not bdm.volume_id:
LOG.debug('There are potentially stale BDMs during '
'delete, refreshing the BlockDeviceMappingList.',
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
break
try:
self._delete_instance(context, instance, bdms, quotas)
except exception.InstanceNotFound:
LOG.info(_LI("Instance disappeared during terminate"),
instance=instance)
except Exception:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance, clean_shutdown):
"""Stopping an instance on this host."""
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(context, instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
'power_state: %(current_power_state)s',
{'vm_state': instance.vm_state,
'task_state': instance.task_state,
'db_power_state': instance.power_state,
'current_power_state': current_power_state},
instance_uuid=instance.uuid)
# NOTE(mriedem): If the instance is already powered off, we are
# possibly tearing down and racing with other operations, so we can
# expect the task_state to be None if something else updates the
# instance and we're not locking it.
expected_task_state = [task_states.POWERING_OFF]
# The list of power states is from _sync_instance_power_state.
if current_power_state in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.info(_LI('Instance is already powered off in the '
'hypervisor when stop is called.'),
instance=instance)
expected_task_state.append(None)
self._notify_about_instance_usage(context, instance,
"power_off.start")
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=expected_task_state)
self._notify_about_instance_usage(context, instance,
"power_off.end")
do_stop_instance()
def _power_on(self, context, instance):
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
def _delete_snapshot_of_shelved_instance(self, context, instance,
snapshot_id):
"""Delete snapshot of shelved instance."""
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_LW("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
# Delete an image(VM snapshot) for a shelved instance
snapshot_id = instance.system_metadata.get('shelved_image_id')
if snapshot_id:
self._delete_snapshot_of_shelved_instance(context, instance,
snapshot_id)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
@messaging.expected_exceptions(NotImplementedError,
exception.TriggerCrashDumpNotSupported,
exception.InstanceNotRunning)
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def trigger_crash_dump(self, context, instance):
"""Trigger crash dump in an instance."""
self._notify_about_instance_usage(context, instance,
"trigger_crash_dump.start")
# This method does not change task_state and power_state because the
# effect of a trigger depends on user's configuration.
self.driver.trigger_crash_dump(instance)
self._notify_about_instance_usage(context, instance,
"trigger_crash_dump.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._notify_about_instance_usage(context, instance,
"soft_delete.start")
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
quotas.commit()
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
@staticmethod
def _set_migration_status(migration, status):
"""Set the status, and guard against a None being passed in.
This is useful as some of the compute RPC calls will not pass
a migration object in older versions. The check can be removed when
we move past 4.x major version of the RPC API.
"""
if migration:
migration.status = status
migration.save()
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, bdms,
detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
if recreate:
detach_block_devices(context, bdms)
else:
self._power_off_instance(context, instance, clean_shutdown=True)
detach_block_devices(context, bdms)
self.driver.destroy(context, instance,
network_info=network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
with instance.mutated_migration_context():
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage=None,
preserve_ephemeral=False, migration=None,
scheduled_node=None, limits=None):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage.
If not provided then information from the
driver will be used to decide if the instance
files are available or not on the target host
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
:param migration: a Migration object if one was created for this
rebuild operation (if it's a part of evacaute)
:param scheduled_node: A node of the host chosen by the scheduler. If a
host was specified by the user, this will be
None
:param limits: Overcommit limits set by the scheduler. If a host was
specified by the user, this will be None
"""
context = context.elevated()
LOG.info(_LI("Rebuilding instance"), context=context,
instance=instance)
if scheduled_node is not None:
rt = self._get_resource_tracker(scheduled_node)
rebuild_claim = rt.rebuild_claim
else:
rebuild_claim = claims.NopClaim
image_meta = {}
if image_ref:
image_meta = self.image_api.get(context, image_ref)
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
if not scheduled_node:
try:
compute_node = self._get_compute_info(context, self.host)
scheduled_node = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
with self._error_out_instance_on_exception(context, instance):
try:
claim_ctxt = rebuild_claim(
context, instance, limits=limits, image_meta=image_meta,
migration=migration)
self._do_rebuild_instance_with_claim(
claim_ctxt, context, instance, orig_image_ref,
image_ref, injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage, preserve_ephemeral)
except exception.ComputeResourcesUnavailable as e:
LOG.debug("Could not rebuild instance on this host, not "
"enough resources available.", instance=instance)
# NOTE(ndipanov): We just abort the build for now and leave a
# migration record for potential cleanup later
self._set_migration_status(migration, 'failed')
self._notify_about_instance_usage(context, instance,
'rebuild.error', fault=e)
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=e.format_message())
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
LOG.debug('Instance was deleted while rebuilding',
instance=instance)
self._set_migration_status(migration, 'failed')
self._notify_about_instance_usage(context, instance,
'rebuild.error', fault=e)
except Exception as e:
self._set_migration_status(migration, 'failed')
self._notify_about_instance_usage(context, instance,
'rebuild.error', fault=e)
raise
else:
instance.apply_migration_context()
# NOTE (ndipanov): This save will now update the host and node
# attributes making sure that next RT pass is consistent since
# it will be based on the instance and not the migration DB
# entry.
instance.host = self.host
instance.node = scheduled_node
instance.save()
instance.drop_migration_context()
# NOTE (ndipanov): Mark the migration as done only after we
# mark the instance as belonging to this host.
self._set_migration_status(migration, 'done')
def _do_rebuild_instance_with_claim(self, claim_context, *args, **kwargs):
"""Helper to avoid deep nesting in the top-level method."""
with claim_context:
self._do_rebuild_instance(*args, **kwargs)
@staticmethod
def _get_image_name(image_meta):
if image_meta.obj_attr_is_set("name"):
return image_meta.name
else:
return ''
def _do_rebuild_instance(self, context, instance, orig_image_ref,
image_ref, injected_files, new_pass,
orig_sys_metadata, bdms, recreate,
on_shared_storage, preserve_ephemeral):
orig_vm_state = instance.vm_state
if recreate:
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
if on_shared_storage is None:
LOG.debug('on_shared_storage is not provided, using driver'
'information to decide if the instance needs to'
'be recreated')
on_shared_storage = self.driver.instance_on_disk(instance)
elif (on_shared_storage !=
self.driver.instance_on_disk(instance)):
# To cover case when admin expects that instance files are
# on shared storage, but not accessible and vice versa
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info(_LI('disk on shared storage, recreating using'
' existing disk'))
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info(_LI("disk not on shared storage, rebuilding from:"
" '%s'"), str(image_ref))
if image_ref:
image_meta = objects.ImageMeta.from_image_ref(
context, self.image_api, image_ref)
else:
image_meta = instance.image_meta
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
# TODO(jaypipes): Move generate_image_url() into the nova.image.api
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(
self.notifier, context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': self._get_image_name(image_meta)}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
# Needed for nova-network, does nothing for neutron
self.network_api.setup_networks_on_host(
context, instance, self.host)
# For nova-network this is needed to move floating IPs
# For neutron this updates the host in the port binding
# TODO(cfriesen): this network_api call and the one above
# are so similar, we should really try to unify them.
self.network_api.setup_instance_network_on_host(
context, instance, self.host)
network_info = compute_utils.get_nw_info_for_instance(instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = \
self._get_instance_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
self._detach_volume(context, bdm.volume_id, instance,
destroy_bdm=False)
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
recreate=recreate)
try:
with instance.mutated_migration_context():
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
self._update_instance_after_spawn(context, instance)
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
if orig_vm_state == vm_states.STOPPED:
LOG.info(_LI("bringing vm to original state: '%s'"),
orig_vm_state, instance=instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance, False)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info(_LI("Detaching from volume api: %s"), volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
# acknowledge the request made it to the manager
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_PENDING
expected_states = (task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED)
else:
instance.task_state = task_states.REBOOT_PENDING_HARD
expected_states = (task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD)
context = context.elevated()
LOG.info(_LI("Rebooting instance"), context=context, instance=instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=expected_states)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
context=context, instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance.vm_state == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_STARTED
expected_state = task_states.REBOOT_PENDING
else:
instance.task_state = task_states.REBOOT_STARTED_HARD
expected_state = task_states.REBOOT_PENDING_HARD
instance.save(expected_task_state=expected_state)
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_LW('Reboot failed but instance is running'),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
ctxt.reraise = False
else:
LOG.error(_LE('Cannot reboot instance: %s'), error,
context=context, instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warning(_LW("Instance disappeared during reboot"),
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
@delete_image_on_error
def _do_snapshot_instance(self, context, image_id, instance, rotation):
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
self._do_snapshot_instance(context, image_id, instance, rotation)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param image_id: glance.db.sqlalchemy.models.Image.Id
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug("Instance not found, could not set state %s "
"for instance.",
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug("Instance being deleted, snapshot cannot continue",
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
instance.power_state = self._get_power_state(context, instance)
try:
instance.save()
LOG.info(_LI('instance snapshotting'), context=context,
instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = 'Instance disappeared during snapshot'
LOG.debug(msg, instance=instance)
try:
image_service = glance.get_default_image_service()
image = image_service.show(context, image_id)
if image['status'] != 'active':
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Error while trying to clean up image %s"),
image_id, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
msg = _LW("Image not found during snapshot")
LOG.warn(msg, instance=instance)
def _post_interrupted_snapshot_cleanup(self, context, instance):
self.driver.post_interrupted_snapshot_cleanup(context, instance)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: a user-defined type, like "daily" or "weekly" etc.
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = self.image_api.get_all(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)",
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug("Rotating out %d backups", excess,
instance=instance)
for i in range(excess):
image = images.pop()
image_id = image['id']
LOG.debug("Deleting image %s", image_id,
instance=instance)
self.image_api.delete(context, image_id)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
@param context: Nova auth context.
@param instance: Nova instance object.
@param new_pass: The admin password for the instance.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('instance %s is not running') % instance.uuid
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
try:
self.driver.set_admin_password(instance, new_pass)
LOG.info(_LI("Root password set"), instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
LOG.warning(_LW('set_admin_password is not implemented '
'by this driver or guest instance.'),
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_('set_admin_password is not '
'implemented by this driver or guest '
'instance.'))
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception:
# Catch all here because this could be anything.
LOG.exception(_LE('set_admin_password failed'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warning(_LW('trying to inject a file into a non-running '
'(state: %(current_state)s expected: '
'%(expected_state)s)'),
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.info(_LI('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance, rescue_image_ref=None):
"""Determine what image should be used to boot the rescue VM."""
# 1. If rescue_image_ref is passed in, use that for rescue.
# 2. Else, use the base image associated with instance's current image.
# The idea here is to provide the customer with a rescue
# environment which they are familiar with.
# So, if they built their instance off of a Debian image,
# their rescue VM will also be Debian.
# 3. As a last resort, use instance's current image.
if not rescue_image_ref:
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
if not rescue_image_ref:
LOG.warning(_LW('Unable to find a different image to use for '
'rescue VM, using instance\'s current image'),
instance=instance)
rescue_image_ref = instance.image_ref
return objects.ImageMeta.from_image_ref(
context, self.image_api, rescue_image_ref)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref, clean_shutdown):
context = context.elevated()
LOG.info(_LI('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self.network_api.get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance,
rescue_image_ref)
extra_usage_info = {'rescue_image_name':
self._get_image_name(rescue_image_meta)}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
try:
self._power_off_instance(context, instance, clean_shutdown)
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception(_LE("Error trying to Rescue Instance"),
instance=instance)
self._set_instance_obj_error_state(context, instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
reason=_("Driver Error: %s") % e)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unrescue_instance(self, context, instance):
context = context.elevated()
LOG.info(_LI('Unrescuing'), context=context, instance=instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
with self._error_out_instance_on_exception(context, instance):
self.driver.unrescue(instance,
network_info)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug("Changing instance metadata according to %r",
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id,
context=context, instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = objects.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_LE("Migration %s is not found during confirmation"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
if migration.status == 'confirmed':
LOG.info(_LI("Migration %s is already confirmed"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
elif migration.status not in ('finished', 'confirming'):
LOG.warning(_LW("Unexpected confirmation status '%(status)s' "
"of migration %(id)s, exit confirmation "
"process"),
{"status": migration.status, "id": migration_id},
context=context, instance=instance)
quotas.rollback()
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata', 'flavor']
try:
instance = objects.Instance.get_by_uuid(
context, instance.uuid,
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_LI("Instance is not found during confirmation"),
context=context, instance=instance)
quotas.rollback()
return
self._confirm_resize(context, instance, quotas,
migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, quotas,
migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(danms): delete stashed migration information
old_instance_type = instance.old_flavor
instance.old_flavor = None
instance.new_flavor = None
instance.system_metadata.pop('old_vm_state', None)
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self.network_api.get_instance_nw_info(context,
instance)
self.driver.confirm_migration(migration, instance,
network_info)
migration.status = 'confirmed'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(migration.source_node)
rt.drop_move_claim(context, instance, old_instance_type)
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'.", vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
quotas.commit()
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def revert_resize(self, context, instance, migration, reservations):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
network_info = self.network_api.get_instance_nw_info(context,
instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
destroy_disks = not self._is_instance_storage_shared(
context, instance, host=migration.source_compute)
self.driver.destroy(context, instance, network_info,
block_device_info, destroy_disks)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
with migration.obj_as_admin():
migration.save()
# NOTE(ndipanov): We need to do this here because dropping the
# claim means we lose the migration_context data. We really should
# fix this by moving the drop_move_claim call to the
# finish_revert_resize method as this is racy (revert is dropped,
# but instance resources will be tracked with the new flavor until
# it gets rolled back in finish_revert_resize, which is
# potentially wrong for a period of time).
instance.revert_migration_context()
instance.save()
rt = self._get_resource_tracker(instance.node)
rt.drop_move_claim(context, instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,
quotas.reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations, migration):
"""Finishes the second half of reverting a resize.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
network_info = self.network_api.get_instance_nw_info(context,
instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
self._set_instance_info(instance, instance.old_flavor)
instance.old_flavor = None
instance.new_flavor = None
instance.host = migration.source_compute
instance.node = migration.source_node
instance.save()
migration.dest_compute = migration.source_compute
with migration.obj_as_admin():
migration.save()
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info(_LI("Updating instance to original state: '%s'"),
old_vm_state, instance=instance)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance,
clean_shutdown=True)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
quotas.commit()
def _prep_resize(self, context, image, instance, instance_type,
quotas, request_spec, filter_properties, node,
clean_shutdown=True):
if not filter_properties:
filter_properties = {}
if not instance.host:
self._set_instance_obj_error_state(context, instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
same_host = instance.host == self.host
# if the flavor IDs match, it's migrate; otherwise resize
if same_host and instance_type.id == instance['instance_type_id']:
# check driver whether support migrate to same host
if not self.driver.capabilities['supports_migrate_to_same_host']:
raise exception.UnableToMigrateToSelf(
instance_id=instance.uuid, host=self.host)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
instance.new_flavor = instance_type
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)
instance.system_metadata['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, quotas.reservations,
clean_shutdown)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node,
clean_shutdown):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node,
instance=instance)
# NOTE(melwitt): Remove this in version 5.0 of the RPC API
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if not isinstance(instance_type, objects.Flavor):
instance_type = objects.Flavor.get_by_id(context,
instance_type['id'])
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, quotas,
request_spec, filter_properties,
node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented.
except exception.MigrationPreCheckError:
raise
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, quotas, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type.name,
new_instance_type_id=instance_type.id)
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, quotas, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance.uuid
try:
reschedule_method = self.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type,
quotas.reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance, reschedule_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
else:
# not re-scheduling
six.reraise(*exc_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations, migration, instance_type,
clean_shutdown):
"""Starts the migration of a running instance to another host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# TODO(chaochin) Remove this until v5 RPC API
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if (not instance_type or
not isinstance(instance_type, objects.Flavor)):
instance_type = objects.Flavor.get_by_id(
context, migration['new_instance_type_id'])
network_info = self.network_api.get_instance_nw_info(context,
instance)
migration.status = 'migrating'
with migration.obj_as_admin():
migration.save()
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
migration.status = 'post-migrating'
with migration.obj_as_admin():
migration.save()
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=quotas.reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
@staticmethod
def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type.id
instance.memory_mb = instance_type.memory_mb
instance.vcpus = instance_type.vcpus
instance.root_gb = instance_type.root_gb
instance.ephemeral_gb = instance_type.ephemeral_gb
instance.flavor = instance_type
def _finish_resize(self, context, instance, migration, disk_info,
image_meta):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = instance.get_flavor()
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = instance.system_metadata.get('old_vm_state',
vm_states.ACTIVE)
instance.old_flavor = old_instance_type
if old_instance_type_id != new_instance_type_id:
instance_type = instance.get_flavor('new')
self._set_instance_info(instance, instance_type)
for key in ('root_gb', 'swap', 'ephemeral_gb'):
if old_instance_type[key] != instance_type[key]:
resize_instance = True
break
instance.apply_migration_context()
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self.network_api.get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
try:
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image_meta, resize_instance,
block_device_info, power_on)
except Exception:
with excutils.save_and_reraise_exception():
if old_instance_type_id != new_instance_type_id:
self._set_instance_info(instance,
old_instance_type)
migration.status = 'finished'
with migration.obj_as_admin():
migration.save()
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
image_meta = objects.ImageMeta.from_dict(image)
self._finish_resize(context, instance, migration,
disk_info, image_meta)
quotas.commit()
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.exception(_LE("Failed to rollback quota for failed "
"finish_resize"),
instance=instance)
self._set_instance_obj_error_state(context, instance)
@wrap_exception()
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
network_info = self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
network_info = self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.info(_LI('Pausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
self.driver.pause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.info(_LI('Unpausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime()
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
diags = self.driver.get_instance_diagnostics(instance)
return diags.serialize()
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
# Store the old state
instance.system_metadata['old_vm_state'] = instance.vm_state
self._notify_about_instance_usage(context, instance, 'suspend.start')
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.suspend(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.info(_LI('Resuming'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start')
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(
context, instance)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
# We default to the ACTIVE state for backwards compatibility
instance.vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id,
clean_shutdown):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
:param clean_shutdown: give the GuestOS a chance to stop
"""
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self._power_off_instance(context, instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.utcnow().isoformat()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance, clean_shutdown):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: nova.objects.instance.Instance
:param clean_shutdown: give the GuestOS a chance to stop
"""
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.network_api.cleanup_instance_network_on_host(context, instance,
instance.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.destroy(context, instance, network_info,
block_device_info)
instance.power_state = current_power_state
instance.host = None
instance.node = None
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
# NOTE(ndipanov): This frees the resources with the resource_tracker
self._update_resource_tracker(context, instance)
self._delete_scheduler_instance_info(context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties, node):
"""Unshelve the instance.
:param context: request context
:param instance: a nova.objects.instance.Instance object
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
instance.task_state = task_states.SPAWNING
instance.save()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms,
do_check_attach=False)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
rt = self._get_resource_tracker(node)
limits = filter_properties.get('limits', {})
shelved_image_ref = instance.image_ref
if image:
instance.image_ref = image['id']
image_meta = objects.ImageMeta.from_dict(image)
else:
image_meta = objects.ImageMeta.from_dict(
utils.get_image_from_system_metadata(
instance.system_metadata))
self.network_api.setup_instance_network_on_host(context, instance,
self.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, limits):
self.driver.spawn(context, instance, image_meta,
injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
if image:
instance.image_ref = shelved_image_ref
self._delete_snapshot_of_shelved_instance(context, instance,
image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
self._update_instance_after_spawn(context, instance)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.SPAWNING)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug('Reset network', context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', context=context, instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
network_info = self.network_api.get_instance_nw_info(context, instance)
self._inject_network_info(context, instance, network_info)
@messaging.expected_exceptions(NotImplementedError,
exception.ConsoleNotAvailable,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.info(_LI("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(context, instance)
if type(output) is six.text_type:
# the console output will be bytes.
output = six.b(output)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return b''
else:
return b'\n'.join(log.split(b'\n')[-int(length):])
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug("Getting vnc console", instance=instance)
token = str(uuid.uuid4())
if not CONF.vnc.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.vnc.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.vnc.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_vnc_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug("Getting spice console", instance=instance)
token = str(uuid.uuid4())
if not CONF.spice.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_spice_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug("Getting RDP console", instance=instance)
token = str(uuid.uuid4())
if not CONF.rdp.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_rdp_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_mks_console(self, context, console_type, instance):
"""Return connection information for a MKS console."""
context = context.elevated()
LOG.debug("Getting MKS console", instance=instance)
token = str(uuid.uuid4())
if not CONF.mks.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'webmks':
access_url = '%s?token=%s' % (CONF.mks.mksproxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_mks_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(
exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
exception.SocketPortRangeExhaustedException,
exception.ImageSerialPortNumberInvalid,
exception.ImageSerialPortNumberExceedFlavorValue,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_serial_console(self, context, console_type, instance):
"""Returns connection information for a serial console."""
LOG.debug("Getting serial console", instance=instance)
if not CONF.serial_console.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
context = context.elevated()
token = str(uuid.uuid4())
access_url = '%s?token=%s' % (CONF.serial_console.base_url, token)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_serial_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
elif console_type == "serial":
console_info = self.driver.get_serial_console(ctxt, instance)
elif console_type == "webmks":
console_info = self.driver.get_mks_console(ctxt, instance)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info.port == port
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus, device_type):
@utils.synchronized(instance.uuid)
def do_reserve():
bdms = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
# NOTE(ndipanov): We need to explicitly set all the fields on the
# object so that obj_load_attr does not fail
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid, boot_index=None,
volume_id=volume_id,
device_name=device, guest_format=None,
disk_bus=disk_bus, device_type=device_type)
new_bdm.device_name = self._get_device_name_for_instance(
instance, bdms, new_bdm)
# NOTE(vish): create bdm here to avoid race condition
new_bdm.create()
return new_bdm
return do_reserve()
@wrap_exception()
@wrap_instance_fault
def attach_volume(self, context, instance, bdm):
"""Attach a volume to an instance."""
driver_bdm = driver_block_device.convert_volume(bdm)
@utils.synchronized(instance.uuid)
def do_attach_volume(context, instance, driver_bdm):
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy()
do_attach_volume(context, instance, driver_bdm)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _driver_detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm.device_name
volume_id = bdm.volume_id
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
connection_info = jsonutils.loads(bdm.connection_info)
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
if not self.driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
context=context, instance=instance)
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
self.driver.detach_volume(connection_info,
instance,
mp,
encryption=encryption)
except exception.DiskNotFound as err:
LOG.warning(_LW('Ignoring DiskNotFound exception while detaching '
'volume %(volume_id)s from %(mp)s: %(err)s'),
{'volume_id': volume_id, 'mp': mp, 'err': err},
instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to detach volume %(volume_id)s '
'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
return connection_info
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True,
attachment_id=None):
"""Detach a volume from an instance.
:param context: security context
:param volume_id: the volume id
:param instance: the Instance object to detach the volume from
:param destroy_bdm: if True, the corresponding BDM entry will be marked
as deleted. Disabling this is useful for operations
like rebuild, when we don't want to destroy BDM
"""
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
if CONF.volume_usage_poll_interval > 0:
vol_stats = []
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance, mp)
except NotImplementedError:
pass
if vol_stats:
LOG.debug("Updating volume usage cache with totals",
instance=instance)
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
vol_usage = objects.VolumeUsage(context)
vol_usage.volume_id = volume_id
vol_usage.instance_uuid = instance.uuid
vol_usage.project_id = instance.project_id
vol_usage.user_id = instance.user_id
vol_usage.availability_zone = instance.availability_zone
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
vol_usage.save(update_totals=True)
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
connection_info = self._driver_detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
if connection_info and not destroy_bdm and (
connector.get('host') != instance.host):
# If the volume is attached to another host (evacuate) then
# this connector is for the wrong host. Use the connector that
# was stored in connection_info instead (if we have one, and it
# is for the expected host).
stashed_connector = connection_info.get('connector')
if not stashed_connector:
# Volume was attached before we began stashing connectors
LOG.warning(_LW("Host mismatch detected, but stashed "
"volume connector not found. Instance host is "
"%(ihost)s, but volume connector host is "
"%(chost)s."),
{'ihost': instance.host,
'chost': connector.get('host')})
elif stashed_connector.get('host') != instance.host:
# Unexpected error. The stashed connector is also not matching
# the needed instance host.
LOG.error(_LE("Host mismatch detected in stashed volume "
"connector. Will use local volume connector. "
"Instance host is %(ihost)s. Local volume "
"connector host is %(chost)s. Stashed volume "
"connector host is %(schost)s."),
{'ihost': instance.host,
'chost': connector.get('host'),
'schost': stashed_connector.get('host')})
else:
# Fix found. Use stashed connector.
LOG.debug("Host mismatch detected. Found usable stashed "
"volume connector. Instance host is %(ihost)s. "
"Local volume connector host was %(chost)s. "
"Stashed volume connector host is %(schost)s.",
{'ihost': instance.host,
'chost': connector.get('host'),
'schost': stashed_connector.get('host')})
connector = stashed_connector
self.volume_api.terminate_connection(context, volume_id, connector)
if destroy_bdm:
bdm.destroy()
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
self.volume_api.detach(context.elevated(), volume_id, instance.uuid,
attachment_id)
@wrap_exception()
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance, attachment_id=None):
"""Detach a volume from an instance."""
self._detach_volume(context, volume_id, instance,
attachment_id=attachment_id)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
new_cinfo['serial'] = old_cinfo['serial']
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector,
old_volume_id, new_volume_id, resize_to):
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
old_volume_id,
connector,
instance,
bdm)
LOG.debug("swap_volume: Calling driver volume swap with "
"connection infos: new: %(new_cinfo)s; "
"old: %(old_cinfo)s",
{'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo},
contex=context, instance=instance)
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
resize_to)
except Exception:
failed = True
with excutils.save_and_reraise_exception():
if new_cinfo:
msg = _LE("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
msg = _LE("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
finally:
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
LOG.debug("swap_volume: calling Cinder terminate_connection "
"for %(volume)s", {'volume': conn_volume},
context=context, instance=instance)
self.volume_api.terminate_connection(context,
conn_volume,
connector)
# If Cinder initiated the swap, it will keep
# the original ID
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
LOG.debug("swap_volume: Cinder migrate_volume_completion "
"returned: %(comp_ret)s", {'comp_ret': comp_ret},
context=context, instance=instance)
return (comp_ret, new_cinfo)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
"""Swap volume for an instance."""
context = context.elevated()
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, old_volume_id, instance.uuid)
connector = self.driver.get_volume_connector(instance)
resize_to = 0
old_vol_size = self.volume_api.get(context, old_volume_id)['size']
new_vol_size = self.volume_api.get(context, new_volume_id)['size']
if new_vol_size > old_vol_size:
resize_to = new_vol_size
LOG.info(_LI('Swapping volume %(old_volume)s for %(new_volume)s'),
{'old_volume': old_volume_id, 'new_volume': new_volume_id},
context=context, instance=instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
old_volume_id,
new_volume_id,
resize_to)
save_volume_id = comp_ret['save_volume_id']
# Update bdm
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'delete_on_termination': False,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'no_device': None}
if resize_to:
values['volume_size'] = resize_to
LOG.debug("swap_volume: Updating volume %(volume_id)s BDM record with "
"%(updates)s", {'volume_id': bdm.volume_id,
'updates': values},
context=context, instance=instance)
bdm.update(values)
bdm.save()
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
self._driver_detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass
@wrap_exception()
@wrap_instance_fault
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
bind_host_id = self.driver.network_binding_host_id(context, instance)
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip,
bind_host_id=bind_host_id)
if len(network_info) != 1:
LOG.error(_LE('allocate_port_for_instance returned %(ports)s '
'ports'), {'ports': len(network_info)})
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
image_meta = objects.ImageMeta.from_instance(instance)
try:
self.driver.attach_interface(instance, image_meta, network_info[0])
except exception.NovaException as ex:
port_id = network_info[0].get('id')
LOG.warn(_LW("attach interface failed , try to deallocate "
"port %(port_id)s, reason: %(msg)s"),
{'port_id': port_id, 'msg': ex},
instance=instance)
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception:
LOG.warn(_LW("deallocate port %(port_id)s failed"),
{'port_id': port_id}, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
return network_info[0]
@wrap_exception()
@wrap_instance_fault
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
network_info = instance.info_cache.network_info
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
try:
self.driver.detach_interface(instance, condemned)
except exception.NovaException as ex:
LOG.warning(_LW("Detach interface failed, port_id=%(port_id)s,"
" reason: %(msg)s"),
{'port_id': port_id, 'msg': ex}, instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
else:
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
# Since this is a cast operation, log the failure for
# triage.
LOG.warning(_LW('Failed to deallocate port %(port_id)s '
'for instance. Error: %(error)s'),
{'port_id': port_id, 'error': ex},
instance=instance)
def _get_compute_info(self, context, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param ctxt: security context
:param instance: dict of instance data
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
if None, calculate it in driver
:param disk_over_commit: if true, allow disk over commit
if None, ignore disk usage checking
:returns: a dict containing migration info
"""
return self._do_check_can_live_migrate_destination(ctxt, instance,
block_migration,
disk_over_commit)
def _do_check_can_live_migrate_destination(self, ctxt, instance,
block_migration,
disk_over_commit):
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, CONF.host))
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
LOG.debug('destination check data is %s', dest_check_data)
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
return migrate_data
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param ctxt: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance)
got_migrate_data_object = isinstance(dest_check_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
dest_check_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
dest_check_data)
dest_check_data.is_volume_backed = is_volume_backed
block_device_info = self._get_instance_block_device_info(
ctxt, instance, refresh_conn_info=True)
result = self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data,
block_device_info)
if not got_migrate_data_object:
result = result.to_legacy_dict()
LOG.debug('source check data is %s', result)
return result
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which holds data
required for live migration without shared
storage.
"""
LOG.debug('pre_live_migration data is %s', migrate_data)
got_migrate_data_object = isinstance(migrate_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
migrate_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
LOG.debug('driver pre_live_migration data is %s' % migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
if not got_migrate_data_object and migrate_data:
migrate_data = migrate_data.to_legacy_dict(
pre_migration_result=True)
migrate_data = migrate_data['pre_live_migration_result']
LOG.debug('pre_live_migration result data is %s', migrate_data)
return migrate_data
def _do_live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
# NOTE(danms): We should enhance the RT to account for migrations
# and use the status field to denote when the accounting has been
# done on source/destination. For now, this is just here for status
# reporting
self._set_migration_status(migration, 'preparing')
got_migrate_data_object = isinstance(migrate_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
try:
if ('block_migration' in migrate_data and
migrate_data.block_migration):
block_device_info = self._get_instance_block_device_info(
context, instance)
disk = self.driver.get_instance_disk_info(
instance, block_device_info=block_device_info)
else:
disk = None
migrate_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Pre live migration failed at %s'),
dest, instance=instance)
self._set_migration_status(migration, 'failed')
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
self._set_migration_status(migration, 'running')
if migrate_data:
migrate_data.migration = migration
LOG.debug('live_migration data is %s', migrate_data)
try:
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
except Exception:
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
LOG.exception(_LE('Live migration failed.'), instance=instance)
with excutils.save_and_reraise_exception():
self._set_migration_status(migration, 'failed')
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
"""Executing live migration.
:param context: security context
:param dest: destination host
:param instance: a nova.objects.instance.Instance object
:param block_migration: if true, prepare for block migration
:param migration: an nova.objects.Migration object
:param migrate_data: implementation specific params
"""
self._set_migration_status(migration, 'queued')
def dispatch_live_migration(*args, **kwargs):
with self._live_migration_semaphore:
self._do_live_migration(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
utils.spawn_n(dispatch_live_migration,
context, dest, instance,
block_migration, migration,
migrate_data)
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def live_migration_force_complete(self, context, instance, migration_id):
"""Force live migration to complete.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of ongoing migration
"""
migration = objects.Migration.get_by_id(context, migration_id)
if migration.status != 'running':
raise exception.InvalidMigrationState(migration_id=migration_id,
instance_uuid=instance.uuid,
state=migration.status,
method='force complete')
self._notify_about_instance_usage(
context, instance, 'live.migration.force.complete.start')
self.driver.live_migration_force_complete(instance)
self._notify_about_instance_usage(
context, instance, 'live.migration.force.complete.end')
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def live_migration_abort(self, context, instance, migration_id):
"""Abort an in-progress live migration.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of in-progress live migration
"""
migration = objects.Migration.get_by_id(context, migration_id)
if migration.status != 'running':
raise exception.InvalidMigrationState(migration_id=migration_id,
instance_uuid=instance.uuid,
state=migration.status,
method='abort live migration')
self._notify_about_instance_usage(
context, instance, 'live.migration.abort.start')
self.driver.live_migration_abort(instance)
self._notify_about_instance_usage(
context, instance, 'live.migration.abort.end')
def _live_migration_cleanup_flags(self, migrate_data):
"""Determine whether disks or instance path need to be cleaned up after
live migration (at source on success, at destination on rollback)
Block migration needs empty image at destination host before migration
starts, so if any failure occurs, any empty images has to be deleted.
Also Volume backed live migration w/o shared storage needs to delete
newly created instance-xxx dir on the destination as a part of its
rollback process
:param migrate_data: implementation specific data
:returns: (bool, bool) -- do_cleanup, destroy_disks
"""
# NOTE(pkoniszewski): block migration specific params are set inside
# migrate_data objects for drivers that expose block live migration
# information (i.e. Libvirt and Xenapi). For other drivers cleanup is
# not needed.
is_shared_block_storage = True
is_shared_instance_path = True
if isinstance(migrate_data, migrate_data_obj.LibvirtLiveMigrateData):
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
elif isinstance(migrate_data, migrate_data_obj.XenapiLiveMigrateData):
is_shared_block_storage = not migrate_data.block_migration
is_shared_instance_path = not migrate_data.block_migration
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
do_cleanup = not is_shared_instance_path
destroy_disks = not is_shared_block_storage
return (do_cleanup, destroy_disks)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info(_LI('_post_live_migration() is started..'),
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
ctxt, instance, bdms=bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self.network_api.get_instance_nw_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
LOG.debug('Calling driver.unfilter_instance from _post_live_migration',
instance=instance)
self.driver.unfilter_instance(instance,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.network_api.migrate_instance_start(ctxt,
instance,
migration)
destroy_vifs = False
try:
self.driver.post_live_migration_at_source(ctxt, instance,
network_info)
except NotImplementedError as ex:
LOG.debug(ex, instance=instance)
# For all hypervisors other than libvirt, there is a possibility
# they are unplugging networks from source node in the cleanup
# method
destroy_vifs = True
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance, block_migration, dest)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
if do_cleanup:
LOG.debug('Calling driver.cleanup from _post_live_migration',
instance=instance)
self.driver.cleanup(ctxt, instance, network_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
self.instance_events.clear_events_for_instance(instance)
# NOTE(timello): make sure we update available resources on source
# host even before next periodic task.
self.update_available_resource(ctxt)
self._update_scheduler_instance_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.end",
network_info=network_info)
LOG.info(_LI('Migrating instance to %s finished successfully.'),
dest, instance=instance)
LOG.info(_LI("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance)
self._clean_instance_console_tokens(ctxt, instance)
if migrate_data and migrate_data.obj_attr_is_set('migration'):
migrate_data.migration.status = 'completed'
migrate_data.migration.save()
def _consoles_enabled(self):
"""Returns whether a console is enable."""
return (CONF.vnc.enabled or CONF.spice.enabled or
CONF.rdp.enabled or CONF.serial_console.enabled or
CONF.mks.enabled)
def _clean_instance_console_tokens(self, ctxt, instance):
"""Clean console tokens stored for an instance."""
if self._consoles_enabled():
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(
ctxt, instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(
ctxt, instance.uuid)
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_LI('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance.host,
'dest_compute': self.host, }
self.network_api.migrate_instance_finish(context,
instance,
migration)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(context,
instance)
try:
self.driver.post_live_migration_at_destination(
context, instance, network_info, block_migration,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
instance.vm_state = vm_states.ERROR
LOG.error(_LE('Unexpected error during post live migration at '
'destination host.'), instance=instance)
finally:
# Restore instance state and update host
current_power_state = self._get_power_state(context, instance)
node_name = None
prev_host = instance.host
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.task_state = None
instance.node = node_name
instance.progress = 0
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
prev_host, teardown=True)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None,
migration_status='error'):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
:param migrate_data:
if not none, contains implementation specific data.
:param migration_status:
Contains the status we want to set for the migration object
"""
instance.task_state = None
instance.progress = 0
instance.save(expected_task_state=[task_states.MIGRATING])
if isinstance(migrate_data, dict):
migration = migrate_data.pop('migration', None)
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
elif (isinstance(migrate_data, migrate_data_obj.LiveMigrateData) and
migrate_data.obj_attr_is_set('migration')):
migration = migrate_data.migration
else:
migration = None
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(
context, bdm.volume_id, instance, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
if do_cleanup:
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
self._set_migration_status(migration, migration_status)
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance,
destroy_disks,
migrate_data):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object sent over rpc
"""
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
try:
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
except Exception:
with excutils.save_and_reraise_exception():
# NOTE(tdurakov): even if teardown networks fails driver
# should try to rollback live migration on destination.
LOG.exception(
_LE('An error occurred while deallocating network.'),
instance=instance)
finally:
# always run this even if setup_networks_on_host fails
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_block_device_info(context,
instance)
if isinstance(migrate_data, dict):
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task(
spacing=CONF.heal_instance_info_cache_interval)
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
instance_uuids = getattr(self, '_instance_uuids_to_heal', [])
instance = None
LOG.debug('Starting heal instance info cache')
if not instance_uuids:
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
# in the list. If they are building they will get
# added to the list next time we build it.
if (inst.vm_state == vm_states.BUILDING):
LOG.debug('Skipping network cache update for instance '
'because it is Building.', instance=inst)
continue
if (inst.task_state == task_states.DELETING):
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
continue
if not instance:
# Save the first one we find so we don't
# have to get it again
instance = inst
else:
instance_uuids.append(inst['uuid'])
self._instance_uuids_to_heal = instance_uuids
else:
# Find the next valid instance on the list
while instance_uuids:
try:
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache',
'flavor'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
# Check the instance hasn't been migrated
if inst.host != self.host:
LOG.debug('Skipping network cache update for instance '
'because it has been migrated to another '
'host.', instance=inst)
# Check the instance isn't being deleting
elif inst.task_state == task_states.DELETING:
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
else:
instance = inst
break
if instance:
# We have an instance now to refresh
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self.network_api.get_instance_nw_info(context, instance)
LOG.debug('Updated the network info_cache for instance',
instance=instance)
except exception.InstanceNotFound:
# Instance is gone.
LOG.debug('Instance no longer exists. Unable to refresh',
instance=instance)
return
except exception.InstanceInfoCacheNotFound:
# InstanceInfoCache is gone.
LOG.debug('InstanceInfoCache no longer exists. '
'Unable to refresh', instance=instance)
except Exception:
LOG.error(_LE('An error occurred while refreshing the network '
'cache.'), instance=instance, exc_info=True)
else:
LOG.debug("Didn't find any instances for network info cache "
"update.")
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state':
[task_states.REBOOTING,
task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance.updated_at,
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance.launched_at,
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.compute_api.unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_LI("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warning(_LW("Setting migration %(migration_id)s to error: "
"%(reason)s"),
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
with migration.obj_as_admin():
migration.save()
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info(_LI("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance.vm_state == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
# race condition: The instance in DELETING state should not be
# set the migration state to error, otherwise the instance in
# to be deleted which is in RESIZED state
# will not be able to confirm resize
if instance.task_state in [task_states.DELETING,
task_states.SOFT_DELETING]:
msg = ("Instance being deleted or soft deleted during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
# race condition: This condition is hit when this method is
# called between the save of the migration record with a status of
# finished and the save of the instance object with a state of
# RESIZED. The migration record should not be set to error.
if instance.task_state == task_states.RESIZE_FINISH:
msg = ("Instance still resizing during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
vm_state = instance.vm_state
task_state = instance.task_state
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.info(_LI("Error auto-confirming resize: %s. "
"Will retry later."),
e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
if CONF.shelved_offload_time <= 0:
return
filters = {'vm_state': vm_states.SHELVED,
'task_state': None,
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=(None,))
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
except Exception:
LOG.exception(_LE('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
begin, end = utils.last_completed_audit_period()
if objects.TaskLog.get(context, 'instance_usage_audit', begin, end,
self.host):
return
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata',
'flavor'],
use_slave=True)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_LI("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances."),
{'host': self.host,
'begin_time': begin,
'end_time': end,
'number_instances': num_instances})
start_time = time.time()
task_log = objects.TaskLog(context)
task_log.task_name = 'instance_usage_audit'
task_log.period_beginning = begin
task_log.period_ending = end
task_log.host = self.host
task_log.task_items = num_instances
task_log.message = 'Instance usage audit started...'
task_log.begin_task()
for instance in instances:
try:
compute_utils.notify_usage_exists(
self.notifier, context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_LE('Failed to generate usage '
'audit for instance '
'on host %s'), self.host,
instance=instance)
errors += 1
task_log.errors = errors
task_log.message = (
'Instance usage audit ran for host %s, %s instances in %s seconds.'
% (self.host, num_instances, time.time() - start_time))
task_log.end_task()
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if not self._bw_usage_supported:
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_LI("Updating bandwidth usage cache"))
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.info(_LI("Bandwidth usage not supported by "
"hypervisor."))
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
else:
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
objects.BandwidthUsage(context=context).create(
bw_ctr['uuid'],
bw_ctr['mac_address'],
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
start_period=start_time,
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context, use_slave=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
vol_usage = objects.VolumeUsage(context)
vol_usage.volume_id = usage['volume']
vol_usage.instance_uuid = usage['instance'].uuid
vol_usage.project_id = usage['instance'].project_id
vol_usage.user_id = usage['instance'].user_id
vol_usage.availability_zone = usage['instance'].availability_zone
vol_usage.curr_reads = usage['rd_req']
vol_usage.curr_read_bytes = usage['rd_bytes']
vol_usage.curr_writes = usage['wr_req']
vol_usage.curr_write_bytes = usage['wr_bytes']
vol_usage.save()
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval)
def _poll_volume_usage(self, context):
if CONF.volume_usage_poll_interval == 0:
return
compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
if not compute_host_bdms:
return
LOG.debug("Updating volume usage cache")
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warning(_LW("While synchronizing instance power states, found "
"%(num_db_instances)s instances in the database "
"and %(num_vm_instances)s instances on the "
"hypervisor."),
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
def _sync(db_instance):
# NOTE(melwitt): This must be synchronized as we query state from
# two separate sources, the driver and the database.
# They are set (in stop_instance) and read, in sync.
@utils.synchronized(db_instance.uuid)
def query_driver_power_state_and_sync():
self._query_driver_power_state_and_sync(context, db_instance)
try:
query_driver_power_state_and_sync()
except Exception:
LOG.exception(_LE("Periodic sync_power_state task had an "
"error while processing an instance."),
instance=db_instance)
self._syncs_in_progress.pop(db_instance.uuid)
for db_instance in db_instances:
# process syncs asynchronously - don't want instance locking to
# block entire periodic task thread
uuid = db_instance.uuid
if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s' % uuid)
else:
LOG.debug('Triggering sync for uuid %s' % uuid)
self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state}, instance=db_instance)
return
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance.state
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_LI("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s"),
{'src': db_instance.host,
'dst': self.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state},
instance=db_instance)
return
orig_db_power_state = db_power_state
if vm_power_state != db_power_state:
LOG.info(_LI('During _sync_instance_power_state the DB '
'power_state (%(db_power_state)s) does not match '
'the vm_power_state from the hypervisor '
'(%(vm_power_state)s). Updating power_state in the '
'DB to match the hypervisor.'),
{'db_power_state': db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance shutdown by itself. Calling the "
"stop API. Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
if db_instance.shutdown_terminate:
self.compute_api.delete(context, db_instance)
else:
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warning(_LW("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warning(_LW("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warning(_LW("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance is not stopped. Calling "
"the stop API. Current vm_state: %(vm_state)s,"
" current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a stopped
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state == vm_states.PAUSED:
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Paused instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warning(_LW("Instance is not (soft-)deleted."),
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return
# TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
# The only case that the quota might be inconsistent is
# the compute node died between set instance state to SOFT_DELETED
# and quota commit to DB. When compute node starts again
# it will have no idea the reservation is committed or not or even
# expired, since it's a rare case, so marked as todo.
quotas = objects.Quotas.from_reservations(context, None)
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
LOG.info(_LI('Reclaiming deleted instance'), instance=instance)
try:
self._delete_instance(context, instance, bdms, quotas)
except Exception as e:
LOG.warning(_LW("Periodic reclaim failed to delete "
"instance: %s"),
e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.update_resources_interval)
def update_available_resource(self, context):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
"""
new_resource_tracker_dict = {}
compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True)
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
rt = self._get_resource_tracker(nodename)
try:
rt.update_available_resource(context)
except exception.ComputeHostNotFound:
# NOTE(comstud): We can get to this case if a node was
# marked 'deleted' in the DB and then re-added with a
# different auto-increment id. The cached resource
# tracker tried to update a deleted record and failed.
# Don't add this resource tracker to the new dict, so
# that this will resolve itself on the next run.
LOG.info(_LI("Compute node '%s' not found in "
"update_available_resource."), nodename)
continue
except Exception:
LOG.exception(_LE("Error updating resources for node "
"%(node)s."), {'node': nodename})
new_resource_tracker_dict[nodename] = rt
# NOTE(comstud): Replace the RT cache before looping through
# compute nodes to delete below, as we can end up doing greenthread
# switches there. Best to have everyone using the newest cache
# ASAP.
self._resource_tracker_dict = new_resource_tracker_dict
# Delete orphan compute node not reported by driver but still in db
for cn in compute_nodes_in_db:
if cn.hypervisor_hostname not in nodenames:
LOG.info(_LI("Deleting orphan compute node %s"), cn.id)
cn.destroy()
def _get_compute_nodes_in_db(self, context, use_slave=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
except exception.NotFound:
LOG.error(_LE("No compute node record for host %s"), self.host)
return []
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
LOG.warning(_LW("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
elif action == 'shutdown':
LOG.info(_LI("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.debug("set_bootable is not implemented "
"for the current driver")
# and power it off
self.driver.power_off(instance)
except Exception:
msg = _LW("Failed to power off instance")
LOG.warn(msg, instance=instance, exc_info=True)
elif action == 'reap':
LOG.info(_LI("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception as e:
LOG.warning(_LW("Periodic cleanup failed to delete "
"instance: %s"),
e, instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False,
'host': self.host}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance.deleted_at
if deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance,
quotas=None,
instance_state=vm_states.ACTIVE):
instance_uuid = instance.uuid
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to %(state)s after: "
"%(error)s"),
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to ACTIVE after: %s"),
error, instance_uuid=instance_uuid)
self._instance_update(context, instance,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
self._set_instance_obj_error_state(context, instance)
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.delete_host,
aggregate, host)
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.add_host,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug('Processing event %(event)s',
{'event': event.key}, instance=instance)
_event.send(event)
def _process_instance_vif_deleted_event(self, context, instance,
deleted_vif_id):
# If an attached port is deleted by neutron, it needs to
# be detached from the instance.
# And info cache needs to be updated.
network_info = instance.info_cache.network_info
for index, vif in enumerate(network_info):
if vif['id'] == deleted_vif_id:
LOG.info(_LI('Neutron deleted interface %(intf)s; '
'detaching it from the instance and '
'deleting it from the info cache'),
{'intf': vif['id']},
instance=instance)
del network_info[index]
base_net_api.update_instance_cache_with_nw_info(
self.network_api, context,
instance,
nw_info=network_info)
try:
self.driver.detach_interface(instance, vif)
except exception.NovaException as ex:
LOG.warning(_LW("Detach interface failed, "
"port_id=%(port_id)s, reason: %(msg)s"),
{'port_id': deleted_vif_id, 'msg': ex},
instance=instance)
break
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
LOG.debug('Received event %(event)s',
{'event': event.key},
instance=instance)
if event.name == 'network-changed':
try:
self.network_api.get_instance_nw_info(context, instance)
except exception.NotFound as e:
LOG.info(_LI('Failed to process external instance event '
'%(event)s due to: %(error)s'),
{'event': event.key, 'error': six.text_type(e)},
instance=instance)
elif event.name == 'network-vif-deleted':
self._process_instance_vif_deleted_event(context,
instance,
event.tag)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
LOG.debug('Cleaning up deleted instances')
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
LOG.debug('There are %d instances to clean', len(instances))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug('Instance has had %(attempts)s of %(max)s '
'cleanup attempts',
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save()
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _cleanup_incomplete_migrations(self, context):
"""Delete instance files on failed resize/revert-resize operation
During resize/revert-resize operation, if that instance gets deleted
in-between then instance files might remain either on source or
destination compute node because of race condition.
"""
LOG.debug('Cleaning up deleted instances with incomplete migration ')
migration_filters = {'host': CONF.host,
'status': 'error'}
migrations = objects.MigrationList.get_by_filters(context,
migration_filters)
if not migrations:
return
inst_uuid_from_migrations = set([migration.instance_uuid for migration
in migrations])
inst_filters = {'deleted': True, 'soft_deleted': False,
'uuid': inst_uuid_from_migrations}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, inst_filters, expected_attrs=attrs, use_slave=True)
for instance in instances:
if instance.host != CONF.host:
for migration in migrations:
if instance.uuid == migration.instance_uuid:
# Delete instance files if not cleanup properly either
# from the source or destination compute nodes when
# the instance is deleted during resizing.
self.driver.delete_instance_files(instance)
try:
migration.status = 'failed'
with migration.obj_as_admin():
migration.save()
except exception.MigrationNotFound:
LOG.warning(_LW("Migration %s is not found."),
migration.id, context=context,
instance=instance)
break
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def quiesce_instance(self, context, instance):
"""Quiesce an instance on this host."""
context = context.elevated()
image_meta = objects.ImageMeta.from_instance(instance)
self.driver.quiesce(context, instance, image_meta)
def _wait_for_snapshots_completion(self, context, mapping):
for mapping_dict in mapping:
if mapping_dict.get('source_type') == 'snapshot':
def _wait_snapshot():
snapshot = self.volume_api.get_snapshot(
context, mapping_dict['snapshot_id'])
if snapshot.get('status') != 'creating':
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_snapshot)
timer.start(interval=0.5).wait()
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def unquiesce_instance(self, context, instance, mapping=None):
"""Unquiesce an instance on this host.
If snapshots' image mapping is provided, it waits until snapshots are
completed before unqueiscing.
"""
context = context.elevated()
if mapping:
try:
self._wait_for_snapshots_completion(context, mapping)
except Exception as error:
LOG.exception(_LE("Exception while waiting completion of "
"volume snapshots: %s"),
error, instance=instance)
image_meta = objects.ImageMeta.from_instance(instance)
self.driver.unquiesce(context, instance, image_meta)
| 46.579461 | 79 | 0.585384 |
7956267377cdac6ae1e58d6a1b16eb87ce218177 | 7,396 | py | Python | scripts/visualize_policy_upper.py | ManUtdMoon/safety-starter-agents | d94f087f6eccb92833c6fc4b0705cb6607c334d7 | [
"MIT"
] | null | null | null | scripts/visualize_policy_upper.py | ManUtdMoon/safety-starter-agents | d94f087f6eccb92833c6fc4b0705cb6607c334d7 | [
"MIT"
] | null | null | null | scripts/visualize_policy_upper.py | ManUtdMoon/safety-starter-agents | d94f087f6eccb92833c6fc4b0705cb6607c334d7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import time
import numpy as np
from safe_rl.utils.load_utils import load_policy, load_feasibiltiy
from safe_rl.utils.logx import EpochLogger
import matplotlib.pyplot as plt
import os.path as osp
import json
import gym
plt.rcParams.update({'font.size': 16})
def visualize_region(env_name, args, get_feasibility, get_action, bound):
# generate batch obses
d = np.linspace(bound[0], bound[1], 10)
v = np.linspace(bound[2], bound[3], 10)
# cmaplist = ['springgreen'] * 3 + ['crimson'] * 87
# cmap1 = ListedColormap(cmaplist)
D, V = np.meshgrid(d, v)
flatten_d = np.reshape(D, [-1, ])
flatten_v = np.reshape(V, [-1, ])
env = gym.make(env_name)
env_name = env_name.split("-")[0]
if env_name == 'Air3d':
x3 = np.pi * np.ones_like(flatten_d)
obs = np.stack([flatten_d, flatten_v, x3], 1)
else:
obs = np.stack([flatten_d, flatten_v], 1)
flatten_mu, flatten_fea_v = get_feasibility(obs)
flatten_cs = np.multiply(flatten_fea_v, flatten_mu)
fea_v = flatten_fea_v.reshape(D.shape)
check = fea_v < 0.0
idx = np.sum(check, axis=1)
print("idx", idx)
plot_items = ['cstr'] # 'cs', 'mu',
data_dict = {'cs': flatten_cs, 'mu': flatten_mu, 'cstr': flatten_fea_v}
# if baseline:
# grid, target_values = hj_baseline()
# grid1, target_values1 = hj_baseline(timet=10.0)
def plot_region(data_reshape, name):
fig = plt.figure(figsize=[5, 6])
ax = plt.axes([0.1, 0.2, 0.8, 0.75])
data_reshape += 0.15 * np.where(data_reshape == 0,
np.zeros_like(data_reshape),
np.ones_like(data_reshape))
ct1 = ax.contourf(D, V, data_reshape, cmap='Accent') # 50
plt.colorbar(ct1)
ct1.collections[0].set_label('Learned Boundary')
ax.contour(D, V, data_reshape, levels=0,
colors="green",
linewidths=3)
# if baseline:
# ct2 = ax.contour(grid.coordinate_vectors[0],
# grid.coordinate_vectors[1],
# target_values.T,
# levels=0,
# colors="grey",
# linewidths=3)
#
# # data = np.load('/home/mahaitong/PycharmProjects/toyota_exp_train (copy)/baseline/init_feasible_f1.npy')
# # data2 = np.load('/home/mahaitong/PycharmProjects/toyota_exp_train (copy)/baseline/init_feasible_f0.4.npy')
# # ds = np.linspace(bound[0], bound[1], 100)
# # vs = np.linspace(bound[2], bound[3], 100)
# # Ds, Vs = np.meshgrid(ds, vs)
# # ct3 = ax.contour(Ds,
# # Vs,
# # data.T,
# # levels=0,
# # colors="cornflowerblue",
# # linewidths=3)
# # ct2 = ax.contour(Ds,
# # Vs,
# # data2.T,
# # levels=0,
# # colors="orange",
# # linewidths=3)
# # ct2.collections[0].set_label('HJ-Reachability Boundary')
for i in range(10):
print("{} point traj".format(i))
obs = [d[i], v[idx[i]-2]]
obs = np.array(obs)
done = False
obses = []
obses.append(obs)
env.reset(state=obs)
while not done:
a = get_action(obs)
obs, r, done, info = env.step(a)
obses.append(obs)
obses = np.array(obses)
ax.plot(obses[:, 0], obses[:, 1])
name_2d = name + '_plus_traj.jpg'
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# rect1 = plt.Rectangle((0, 0), 1, 1, fc=ct1.collections[0].get_facecolor()[0], ec='green', linewidth=3)
# rect2 = plt.Rectangle((0, 0), 1, 1, fill=False, ec='grey', linewidth=3)
# rect3 = plt.Rectangle((0, 0), 1, 1, fill=False, ec='orange', linewidth=3)
# rect4 = plt.Rectangle((0, 0), 1, 1, fill=False, ec='cornflowerblue', linewidth=3)
ax = plt.axes([0.05, 0.02, 0.9, 0.16])
plt.axis('off')
# ax.legend((rect1,rect2, rect3, rect4), ('Feasible region', 'HJ avoid set', 'Energy-based','MPC-feasiblity')
# , loc='lower center',ncol=2, fontsize=15)
# plt.title('Feasible Region of Double Integrator')
plt.tight_layout(pad=0.5)
plt.savefig(osp.join(args.fpath, name_2d))
for plot_item in plot_items:
data = data_dict.get(plot_item)
data_reshape = data.reshape(D.shape)
plot_region(data_reshape, plot_item)
def run_policy(env, get_action, max_ep_len=None, num_episodes=100, render=True):
assert env is not None, \
"Environment not found!\n\n It looks like the environment wasn't saved, " + \
"and we can't run the agent in it. :("
logger = EpochLogger()
o, r, d, ep_ret, ep_cost, ep_len, n = env.reset(), 0, False, 0, 0, 0, 0
while n < num_episodes:
if render:
env.render()
time.sleep(1e-3)
a = get_action(o)
a = np.clip(a, env.action_space.low, env.action_space.high)
o, r, d, info = env.step(a)
ep_ret += r
ep_cost += info.get('cost', 0)
ep_len += 1
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpCost=ep_cost, EpLen=ep_len)
print('Episode %d \t EpRet %.3f \t EpCost %.3f \t EpLen %d'%(n, ep_ret, ep_cost, ep_len))
o, r, d, ep_ret, ep_cost, ep_len = env.reset(), 0, False, 0, 0, 0
n += 1
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpCost', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
from custom_env_utils import register_custom_env
register_custom_env()
parser = argparse.ArgumentParser()
parser.add_argument('--fpath', type=str, default='/home/mahaitong/PycharmProjects/safety-starter-agents/data/2021-12-19_ppo_dual_ascent_UpperTriangle-v0/2021-12-19_16-42-04-ppo_dual_ascent_UpperTriangle-v0_s0')
parser.add_argument('--len', '-l', type=int, default=None)
parser.add_argument('--episodes', '-n', type=int, default=5)
parser.add_argument('--norender', '-nr', action='store_true', default=False)
parser.add_argument('--itr', '-i', type=int, default=-1)
parser.add_argument('--deterministic', '-d', action='store_true')
args = parser.parse_args()
env, get_action, sess = load_policy(args.fpath,
args.itr if args.itr >=0 else 'last',
args.deterministic)
env, get_feasibility_indicator, sess = load_feasibiltiy(args.fpath,
args.itr if args.itr >=0 else 'last',
args.deterministic)
# collect_obs(env, args)
# run_policy(env, get_action, args.len, args.episodes, not(args.norender))
visualize_region("UpperTriangle-v0", args, get_feasibility_indicator, get_action, bound=(-5., 5., 0., -5.),)
# get_pic(env, args)
| 42.751445 | 214 | 0.553678 |
795627b835227b9f121f0cee7a1c2959702d259b | 13,508 | py | Python | docs/conf.py | philipnbbc/PyAV | 6f9a1561f43e0cedc10c0ee33cd30bded7d34dc0 | [
"BSD-3-Clause"
] | 538 | 2020-05-01T00:55:03.000Z | 2022-03-31T03:06:17.000Z | docs/conf.py | philipnbbc/PyAV | 6f9a1561f43e0cedc10c0ee33cd30bded7d34dc0 | [
"BSD-3-Clause"
] | 301 | 2020-04-30T20:24:37.000Z | 2022-03-31T21:26:59.000Z | docs/conf.py | philipnbbc/PyAV | 6f9a1561f43e0cedc10c0ee33cd30bded7d34dc0 | [
"BSD-3-Clause"
] | 96 | 2020-05-01T23:56:50.000Z | 2022-03-28T22:14:38.000Z | # -*- coding: utf-8 -*-
#
# PyAV documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 7 22:13:16 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from docutils import nodes
import logging
import math
import os
import re
import sys
import sys
import xml.etree.ElementTree as etree
import sphinx
from sphinx import addnodes
from sphinx.util.docutils import SphinxDirective
logging.basicConfig()
if sphinx.version_info < (1, 8):
print("Sphinx {} is too old; we require >= 1.8.".format(sphinx.__version__), file=sys.stderr)
exit(1)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'sphinx.ext.doctest',
# We used to use doxylink, but we found its caching behaviour annoying, and
# so made a minimally viable version of our own.
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyAV'
copyright = u'2017, Mike Boers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = open('../VERSION.txt').read().strip()
# The short X.Y version.
version = release.split('-')[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyav'
html_theme_path = [os.path.abspath(os.path.join(__file__, '..', '_themes'))]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo-250.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
doctest_global_setup = '''
import errno
import os
import av
from av.datasets import fate, fate as fate_suite, curated
from tests import common
from tests.common import sandboxed as _sandboxed
def sandboxed(*args, **kwargs):
kwargs['timed'] = True
return _sandboxed('docs', *args, **kwargs)
_cwd = os.getcwd()
here = sandboxed('__cwd__')
try:
os.makedirs(here)
except OSError as e:
if e.errno != errno.EEXIST:
raise
os.chdir(here)
video_path = curated('pexels/time-lapse-video-of-night-sky-857195.mp4')
'''
doctest_global_cleanup = '''
os.chdir(_cwd)
'''
doctest_test_doctest_blocks = ''
extlinks = {
'ffstruct': ('http://ffmpeg.org/doxygen/trunk/struct%s.html', 'struct '),
'issue': ('https://github.com/PyAV-Org/PyAV/issues/%s', '#'),
'pr': ('https://github.com/PyAV-Org/PyAV/pull/%s', '#'),
'gh-user': ('https://github.com/%s', '@'),
}
intersphinx_mapping = {
'https://docs.python.org/3': None,
}
autodoc_member_order = 'bysource'
autodoc_default_options = {
'undoc-members': True,
'show-inheritance': True,
}
todo_include_todos = True
class PyInclude(SphinxDirective):
has_content = True
def run(self):
source = '\n'.join(self.content)
output = []
def write(*content, sep=' ', end='\n'):
output.append(sep.join(map(str, content)) + end)
namespace = dict(write=write)
exec(compile(source, '<docs>', 'exec'), namespace, namespace)
output = ''.join(output).splitlines()
self.state_machine.insert_input(output, 'blah')
return [] #[nodes.literal('hello', repr(content))]
def load_entrypoint(name):
parts = name.split(':')
if len(parts) == 1:
parts = name.rsplit('.', 1)
mod_name, attrs = parts
attrs = attrs.split('.')
try:
obj = __import__(mod_name, fromlist=['.'])
except ImportError as e:
print('Error while importing.', (name, mod_name, attrs, e))
raise
for attr in attrs:
obj = getattr(obj, attr)
return obj
class EnumTable(SphinxDirective):
required_arguments = 1
option_spec = {
'class': lambda x: x,
}
def run(self):
cls_ep = self.options.get('class')
cls = load_entrypoint(cls_ep) if cls_ep else None
enum = load_entrypoint(self.arguments[0])
properties = {}
if cls is not None:
for name, value in vars(cls).items():
if isinstance(value, property):
try:
item = value._enum_item
except AttributeError:
pass
else:
if isinstance(item, enum):
properties[item] = name
colwidths = [15, 15, 5, 65] if cls else [15, 5, 75]
ncols = len(colwidths)
table = nodes.table()
tgroup = nodes.tgroup(cols=ncols)
table += tgroup
for width in colwidths:
tgroup += nodes.colspec(colwidth=width)
thead = nodes.thead()
tgroup += thead
tbody = nodes.tbody()
tgroup += tbody
def makerow(*texts):
row = nodes.row()
for text in texts:
if text is None:
continue
row += nodes.entry('', nodes.paragraph('', str(text)))
return row
thead += makerow(
'{} Attribute'.format(cls.__name__) if cls else None,
'{} Name'.format(enum.__name__),
'Flag Value',
'Meaning in FFmpeg',
)
seen = set()
for name, item in enum._by_name.items():
if name.lower() in seen:
continue
seen.add(name.lower())
try:
attr = properties[item]
except KeyError:
if cls:
continue
attr = None
value = '0x{:X}'.format(item.value)
doc = item.__doc__ or '-'
tbody += makerow(
attr,
name,
value,
doc,
)
return [table]
doxylink = {}
ffmpeg_tagfile = os.path.abspath(os.path.join(__file__, '..', '_build', 'doxygen', 'tagfile.xml'))
if not os.path.exists(ffmpeg_tagfile):
print("ERROR: Missing FFmpeg tagfile.")
exit(1)
doxylink['ffmpeg'] = (ffmpeg_tagfile, 'https://ffmpeg.org/doxygen/trunk/')
def doxylink_create_handler(app, file_name, url_base):
print("Finding all names in Doxygen tagfile", file_name)
doc = etree.parse(file_name)
root = doc.getroot()
parent_map = {} # ElementTree doesn't five us access to parents.
urls = {}
for node in root.findall('.//name/..'):
for child in node:
parent_map[child] = node
kind = node.attrib['kind']
if kind not in ('function', 'struct', 'variable'):
continue
name = node.find('name').text
if kind not in ('function', ):
parent = parent_map.get(node)
parent_name = parent.find('name') if parent else None
if parent_name is not None:
name = '{}.{}'.format(parent_name.text, name)
filenode = node.find('filename')
if filenode is not None:
url = filenode.text
else:
url = '{}#{}'.format(
node.find('anchorfile').text,
node.find('anchor').text,
)
urls.setdefault(kind, {})[name] = url
def get_url(name):
# These are all the kinds that seem to exist.
for kind in (
'function',
'struct',
'variable', # These are struct members.
# 'class',
# 'define',
# 'enumeration',
# 'enumvalue',
# 'file',
# 'group',
# 'page',
# 'typedef',
# 'union',
):
try:
return urls[kind][name]
except KeyError:
pass
def _doxylink_handler(name, rawtext, text, lineno, inliner, options={}, content=[]):
m = re.match(r'^(.+?)(?:<(.+?)>)?$', text)
title, name = m.groups()
name = name or title
url = get_url(name)
if not url:
print("ERROR: Could not find", name)
exit(1)
node = addnodes.literal_strong(title, title)
if url:
url = url_base + url
node = nodes.reference(
'', '', node, refuri=url
)
return [node], []
return _doxylink_handler
def setup(app):
app.add_stylesheet('custom.css')
app.add_directive('flagtable', EnumTable)
app.add_directive('enumtable', EnumTable)
app.add_directive('pyinclude', PyInclude)
skip = os.environ.get('PYAV_SKIP_DOXYLINK')
for role, (filename, url_base) in doxylink.items():
if skip:
app.add_role(role, lambda *args: ([], []))
else:
app.add_role(role, doxylink_create_handler(app, filename, url_base))
| 27.288889 | 98 | 0.623927 |
7956287ddae85a8c07c28d9a76a7ae268241c698 | 1,399 | py | Python | python/setup.py | Zelenyy/phd-code | d5b8bfefd2418a915dde89f7da2cb6683f438556 | [
"MIT"
] | null | null | null | python/setup.py | Zelenyy/phd-code | d5b8bfefd2418a915dde89f7da2cb6683f438556 | [
"MIT"
] | null | null | null | python/setup.py | Zelenyy/phd-code | d5b8bfefd2418a915dde89f7da2cb6683f438556 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
"""
import os
import setuptools
with open(os.path.join(os.path.dirname(__file__), "README.md"), "r") as fh:
long_description = fh.read()
NUMPY_MIN_VERSION = '1.8.2'
SCIPY_MIN_VERSION = '1.3.1'
# PANDAS_MIN_VERSION = ''
MATPLOTLIB_MIN_VERSION = '3.1.1'
PYTABLES_MIN_VERSION = '3.5.1'
setuptools.setup(
name="phd",
version="0.0.1",
author="Mikhail Zelenyi",
author_email="mihail.zelenyy@phystech.edu",
url='http://npm.mipt.ru/',
description="Python scripts for my phd thesis",
license="MIT License",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="phd",
packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
# project_urls={
# "Bug Tracker": "",
# "Documentation": "",
# "Source Code": "",
# },
install_requires=[
'numpy>={0}'.format(NUMPY_MIN_VERSION),
'scipy>={0}'.format(SCIPY_MIN_VERSION),
# 'pandas>={0}'.format(PANDAS_MIN_VERSION),
'matplotlib>={0}'.format(MATPLOTLIB_MIN_VERSION),
'tables>={0}'.format(PYTABLES_MIN_VERSION),
'dataforge',
'tabulate'
],
test_suite='tests'
)
| 27.98 | 92 | 0.617584 |
795628a8e736dac1ba01982255ba9e29e3a04194 | 1,359 | py | Python | django_test/django_test/urls.py | MachineLearningIsEasy/python_lesson_26 | 24a219862c5e0577a804e7365cf441f45846f9ae | [
"MIT"
] | null | null | null | django_test/django_test/urls.py | MachineLearningIsEasy/python_lesson_26 | 24a219862c5e0577a804e7365cf441f45846f9ae | [
"MIT"
] | null | null | null | django_test/django_test/urls.py | MachineLearningIsEasy/python_lesson_26 | 24a219862c5e0577a804e7365cf441f45846f9ae | [
"MIT"
] | null | null | null | """django_test URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django_test import views
from django.conf.urls.static import static
urlpatterns = [
path('', views.home_page, name='home'),
path('admin/', admin.site.urls),
path('articles/', include('articles.urls')),
path('users/', include('users.urls')),
]
if settings.DEBUG is True:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 33.146341 | 80 | 0.702723 |
7956295d2bfc881175d2942d2c3086ec24c06062 | 4,322 | py | Python | certbot/certbot/plugins/storage.py | radek-sprta/certbot | a6772043d6631341b525c4d69b47d6ef2d8b5d02 | [
"Apache-2.0"
] | 1 | 2020-01-23T05:57:42.000Z | 2020-01-23T05:57:42.000Z | certbot/certbot/plugins/storage.py | radek-sprta/certbot | a6772043d6631341b525c4d69b47d6ef2d8b5d02 | [
"Apache-2.0"
] | null | null | null | certbot/certbot/plugins/storage.py | radek-sprta/certbot | a6772043d6631341b525c4d69b47d6ef2d8b5d02 | [
"Apache-2.0"
] | null | null | null | """Plugin storage class."""
import json
import logging
from acme.magic_typing import Any # pylint: disable=unused-import, no-name-in-module
from acme.magic_typing import Dict # pylint: disable=unused-import, no-name-in-module
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
logger = logging.getLogger(__name__)
class PluginStorage(object):
"""Class implementing storage functionality for plugins"""
def __init__(self, config, classkey):
"""Initializes PluginStorage object storing required configuration
options.
:param .configuration.NamespaceConfig config: Configuration object
:param str classkey: class name to use as root key in storage file
"""
self._config = config
self._classkey = classkey
self._initialized = False
self._data = None
self._storagepath = None
def _initialize_storage(self):
"""Initializes PluginStorage data and reads current state from the disk
if the storage json exists."""
self._storagepath = os.path.join(self._config.config_dir, ".pluginstorage.json")
self._load()
self._initialized = True
def _load(self):
"""Reads PluginStorage content from the disk to a dict structure
:raises .errors.PluginStorageError: when unable to open or read the file
"""
data = dict() # type: Dict[str, Any]
filedata = ""
try:
with open(self._storagepath, 'r') as fh:
filedata = fh.read()
except IOError as e:
errmsg = "Could not read PluginStorage data file: {0} : {1}".format(
self._storagepath, str(e))
if os.path.isfile(self._storagepath):
# Only error out if file exists, but cannot be read
logger.error(errmsg)
raise errors.PluginStorageError(errmsg)
try:
data = json.loads(filedata)
except ValueError:
if not filedata:
logger.debug("Plugin storage file %s was empty, no values loaded",
self._storagepath)
else:
errmsg = "PluginStorage file {0} is corrupted.".format(
self._storagepath)
logger.error(errmsg)
raise errors.PluginStorageError(errmsg)
self._data = data
def save(self):
"""Saves PluginStorage content to disk
:raises .errors.PluginStorageError: when unable to serialize the data
or write it to the filesystem
"""
if not self._initialized:
errmsg = "Unable to save, no values have been added to PluginStorage."
logger.error(errmsg)
raise errors.PluginStorageError(errmsg)
try:
serialized = json.dumps(self._data)
except TypeError as e:
errmsg = "Could not serialize PluginStorage data: {0}".format(
str(e))
logger.error(errmsg)
raise errors.PluginStorageError(errmsg)
try:
with os.fdopen(filesystem.open(
self._storagepath,
os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
0o600), 'w') as fh:
fh.write(serialized)
except IOError as e:
errmsg = "Could not write PluginStorage data to file {0} : {1}".format(
self._storagepath, str(e))
logger.error(errmsg)
raise errors.PluginStorageError(errmsg)
def put(self, key, value):
"""Put configuration value to PluginStorage
:param str key: Key to store the value to
:param value: Data to store
"""
if not self._initialized:
self._initialize_storage()
if not self._classkey in self._data.keys():
self._data[self._classkey] = dict()
self._data[self._classkey][key] = value
def fetch(self, key):
"""Get configuration value from PluginStorage
:param str key: Key to get value from the storage
:raises KeyError: If the key doesn't exist in the storage
"""
if not self._initialized:
self._initialize_storage()
return self._data[self._classkey][key]
| 34.854839 | 88 | 0.600648 |
79562a3bf731559738201484e63946bd639a313d | 140 | py | Python | Food/apps.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | 1 | 2021-08-12T08:46:56.000Z | 2021-08-12T08:46:56.000Z | Food/apps.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | null | null | null | Food/apps.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class FoodConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Food'
| 20 | 56 | 0.75 |
79562a4653dc7daf335f2dd82954a5d4ff9ddc38 | 2,299 | py | Python | test/functional/p2p_pos_fakestake.py | braveheart12/DogeCash | 5138cbe1adb8a82f014b8706255e47407408d04f | [
"MIT"
] | null | null | null | test/functional/p2p_pos_fakestake.py | braveheart12/DogeCash | 5138cbe1adb8a82f014b8706255e47407408d04f | [
"MIT"
] | 2 | 2019-05-02T13:30:58.000Z | 2019-05-06T11:15:36.000Z | test/functional/p2p_pos_fakestake.py | braveheart12/DogeCash | 5138cbe1adb8a82f014b8706255e47407408d04f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The DogeCash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a PoS block where the coinstake input prevout is already spent.
'''
from time import sleep
from fake_stake.base_test import dogecash_FakeStakeTest
class PoSFakeStake(dogecash_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a PoS block where the coinstake input prevout is already spent."
self.init_test()
INITAL_MINED_BLOCKS = 150 # First mined blocks (rewards collected to spend)
MORE_MINED_BLOCKS = 100 # Blocks mined after spending
STAKE_AMPL_ROUNDS = 2 # Rounds of stake amplification
self.NUM_BLOCKS = 3 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks.." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
# 2) Collect the possible prevouts
self.log.info("Collecting all unspent coins which we generated from mining...")
# 3) Create 10 addresses - Do the stake amplification
self.log.info("Performing the stake amplification (%d rounds)..." % STAKE_AMPL_ROUNDS)
utxo_list = self.node.listunspent()
address_list = []
for i in range(10):
address_list.append(self.node.getnewaddress())
utxo_list = self.stake_amplification(utxo_list, STAKE_AMPL_ROUNDS, address_list)
self.log.info("Done. Utxo list has %d elements." % len(utxo_list))
sleep(2)
# 4) Start mining again so that spent prevouts get confirmted in a block.
self.log.info("Mining %d more blocks..." % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
# 5) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake blocks")
err_msgs = self.test_spam("Main", utxo_list)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
PoSFakeStake().main()
| 38.966102 | 115 | 0.671596 |
79562ae454248e427a661ec32b3ec6a49eb050d8 | 5,119 | py | Python | Algoritm_of_Gamp.py | Quimica-supramolecular/scripts_graficas | 2db5352bd1e8080de7f71db172c1f85d053f379c | [
"MIT"
] | null | null | null | Algoritm_of_Gamp.py | Quimica-supramolecular/scripts_graficas | 2db5352bd1e8080de7f71db172c1f85d053f379c | [
"MIT"
] | null | null | null | Algoritm_of_Gamp.py | Quimica-supramolecular/scripts_graficas | 2db5352bd1e8080de7f71db172c1f85d053f379c | [
"MIT"
] | null | null | null | #Algoritm of Gamp
from autograd.differential_operators import jacobian
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from autograd import elementwise_grad as egrad
from autograd import grad
# This function open window for search local file .xlxs and .xlsx in directory and return filename
def open_file():
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
root.filename = filedialog.askopenfilename(initialdir="/", title="Select file",
filetypes=(("xlsx files", "*.xlsx"), ("all files", "*.*")))
return root.filename
# This function read data from file and return dataframe
filename = open_file()
def read_file(filename, sheet_name, index_col):
data = pd.read_excel(filename, sheet_name= sheet_name, header=0, index_col=index_col)
return data
df_spec = read_file(filename,"datos_titulacion", 0)
df_conc = read_file(filename,"conc", None)
C_T = df_conc#[:,0:-1]
G = C_T.iloc[:,1]
H = C_T.iloc[:,0]
nc = len(C_T)
nw = len(df_spec)
u, s, v = np.linalg.svd(df_spec, full_matrices=False)
plt.plot(range(0, nc), np.log10(s), "o")
plt.ylabel("log(EV)")
plt.xlabel("# de autovalores")
plt.show()
EV = int(input("¿Cuantos autovalores incluirá en el cálculo?: ", ))
Y = u[:,0:EV] @ np.diag(s[0:EV:]) @ v[0:EV:]
# =============================================================================
# plt.plot(range(0, nw), u[:,0:EV])
# plt.ylabel("Matriz U de SVD")
# plt.xlabel("# de autovalores")
# plt.show()
# =============================================================================
#EFA fijo
L = range(1,(nc + 1), 1)
L2 = range(0, nc, 1)
X = []
for i in L:
uj, sj, vj = np.linalg.svd(df_spec.T.iloc[:i,:], full_matrices=False)
X.append(sj**2)
ev_s = pd.DataFrame(X)
ev_s0 = np.array(ev_s)
X2 = []
for i in L2:
ui, si, vi = np.linalg.svd(df_spec.T.iloc[i:,:], full_matrices=False)
X2.append(si**2)
ev_s1 = pd.DataFrame(X2)
ev_s10 = np.array(ev_s1)
plt.figure()
plt.plot(G, np.log10(ev_s0), "k-o")
plt.plot(G, np.log10(ev_s10), "b:o")
plt.ylabel("log(EV)")
plt.xlabel("[G], M")
plt.show()
C1 = (ev_s.iloc[:,0:EV]).fillna(0)
C2 = (ev_s1.iloc[:,0:EV]).fillna(0)
EFA0 = []
for i in range(0, EV):
EFA1 = np.array([C1.iloc[:,i], C2.iloc[:,-1-i]])
EFA2 = np.min(EFA1, 0)
EFA0.append(EFA2)
EFA = np.array(EFA0)
plt.plot(G, EFA.T, ":o")
plt.show()
print("\n Escriba la tolerancia para separar los autovalores del ruido. Para obtener el valor por default escriba 0", end="")
tolerancia = float(input("¿Cual es el nivel de tolerancia deseada?: ", ))
if tolerancia == 0:
tolerancia = 0.25
EFA = np.log10(abs(EFA / EFA[EFA != 0].min())) - tolerancia
c_e = EFA / EFA.max()
c_e[c_e < 0] = 0
C = c_e * max(H)
plt.plot(G, C.T, ":o")
plt.ylabel("[H_libre], M")
plt.xlabel("[G], M")
plt.show()
n_K = EV - 1
if n_K == 1:
k_e = float(input("Indique un valor estimado para la constante de asociación: ",))
else:
k_e = []
for i in range(n_K):
print("K" + str(i+1) + ":", end="")
i = float(input("Indique un valor estimado para esta constante de asociación: ",))
k_e.append(i)
k_p = np.array(k_e)
k_u = np.array([1, 1])
A = (np.linalg.pinv(C.T) @ Y.T)
C_t = np.concatenate((C_T, C.T), axis=1)
model = np.array([[1, 0, 1, 1],[0,1,1,2]])
def el_finito(fun, x):
dfdx = []
delta = 1e-20
for i in range(len(x)):
step = np.zeros(len(x), dtype=complex)
step[i] = complex(0, delta)
dfdx.append(np.imag(fun(x + step)) / delta)
return np.array(dfdx)
def conteo(lista, u_lista):
cont = 0
for ele in lista:
if (ele == u_lista):
cont = cont + 1
return cont
def multiply(L):
new_list = []
for i in range(len(L)):
if i == 0:
new_list.append(L[i])
else:
new_list.append(L[i] * new_list[i-1])
return np.array(new_list)
def fun_obj(g, h):
f = abs(H - h) + abs(G - g)
return f
def g_free(C, args = (k_e, G)):
g_f = []
for m in range(1, len(k_e)+1):
g_st = m * C[:,m]
g_st += g_st
g_f.append(g_st.T)
g_f = G - np.sum(np.array(g_f), axis=0)
return np.array(g_f)
def c_complex(K, C, args = (H, G)):
g = g_free(C)
hg_p = []
for z in range(1, len(K)+1):
hg = multiply(K[0:z])[-1] * g**z * C[:,0]
hg += hg
hg_p.append(hg.T)
hg_p = np.array(hg_p)
h_f = np.array(H - np.sum(hg_p, axis=0))
c = np.column_stack((h_f, hg_p.T))
C = np.array(c)
return C
def loss(K, C):
g = g_free(C)
c_0 = c_complex(K, C)
h = np.sum(c_0, axis= 1)
f = fun_obj(g, h)
C = c_0
return f, c_0
f, c_0 = loss(k_e, C.T)
#C_cal = c_complex(k_e, C.T, H, G)
plt.plot(G, c_0, ":o")
plt.show()
| 25.216749 | 126 | 0.542684 |
79562b1752c3e67509544863eaaa19db6472c364 | 1,495 | py | Python | grizzly/steps/setup.py | boffman/grizzly | eabe7b8f6cd7098914a1473928135c1e05758af7 | [
"MIT"
] | null | null | null | grizzly/steps/setup.py | boffman/grizzly | eabe7b8f6cd7098914a1473928135c1e05758af7 | [
"MIT"
] | null | null | null | grizzly/steps/setup.py | boffman/grizzly | eabe7b8f6cd7098914a1473928135c1e05758af7 | [
"MIT"
] | null | null | null | from typing import cast
from os import environ
from behave import given, then # pylint: disable=no-name-in-module
from behave.runner import Context
from ..context import GrizzlyContext
@then(u'ask for value of variable "{name}"')
@given(u'ask for value of variable "{name}"')
def step_setup_variable_value_ask(context: Context, name: str) -> None:
'''This step is used to indicate for `grizzly-cli` that it should ask for an initial value for the variable.
It will then inject the value into the locust runtime environment, and in this step read it and insert it
into the locust context which grizzly will use to setup locust.
If `grizzly-cli` is not used, one has to manually set the environment variable, which requires a prefix of
`TESTDATA_VARIABLE_` and the suffix should match the variable name in question.
Use this step for variables that should have different initial values for each run of the feature.
```gherkin
And ask for value for variable "AtomicIntegerIncrementer.messageID"
```
Args:
name (str): variable name used in templates
'''
grizzly = cast(GrizzlyContext, context.grizzly)
value = environ.get(f'TESTDATA_VARIABLE_{name}', None)
assert value is not None, f'variable "{name}" does not have a value'
assert name not in grizzly.state.variables, f'variable "{name}" has already been set'
try:
grizzly.state.variables[name] = value
except ValueError as e:
assert 0, str(e)
| 38.333333 | 112 | 0.724415 |
79562b8ef217325096ccb9e539997fbc94135be6 | 1,525 | py | Python | pages/migrations/0003_auto_20170621_1537.py | quis/government-form-explorer | 57073bff91c8e72e63217b7b0e42491076f021fb | [
"MIT"
] | null | null | null | pages/migrations/0003_auto_20170621_1537.py | quis/government-form-explorer | 57073bff91c8e72e63217b7b0e42491076f021fb | [
"MIT"
] | 1 | 2018-06-14T13:59:07.000Z | 2018-06-14T13:59:07.000Z | pages/migrations/0003_auto_20170621_1537.py | quis/government-form-explorer | 57073bff91c8e72e63217b7b0e42491076f021fb | [
"MIT"
] | 6 | 2018-06-13T09:50:01.000Z | 2021-04-10T19:42:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-21 15:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('taggit', '0002_auto_20150616_2121'),
('pages', '0002_attachment_tags'),
]
operations = [
migrations.CreateModel(
name='GenericStringTaggedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.CharField(db_index=True, max_length=256, verbose_name='Object id')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pages_genericstringtaggeditem_tagged_items', to='contenttypes.ContentType', verbose_name='Content type')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pages_genericstringtaggeditem_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='attachment',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='pages.GenericStringTaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| 41.216216 | 216 | 0.651148 |
79562c1d901bf9362a9330a5245aa9ccfeb8f9df | 5,003 | py | Python | covid_tracker/crud.py | KahfiA/covid-tracker-backend | a1f627dc7e20d8b23aff7762b55e1e44553e98f2 | [
"Apache-2.0"
] | null | null | null | covid_tracker/crud.py | KahfiA/covid-tracker-backend | a1f627dc7e20d8b23aff7762b55e1e44553e98f2 | [
"Apache-2.0"
] | null | null | null | covid_tracker/crud.py | KahfiA/covid-tracker-backend | a1f627dc7e20d8b23aff7762b55e1e44553e98f2 | [
"Apache-2.0"
] | null | null | null | from datetime import date, datetime
from sqlalchemy import sql
from sqlalchemy.orm import Session
import math
from geopy import distance
from . import models, schemas
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.user_id == user_id).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
def create_user(db: Session, user: schemas.UserCreate):
db_user = models.User(username = user.username, password = user.password, level = user.level)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def edit_user(db: Session, user_id: int, user: schemas.UserCreate):
db_user = db.query(models.User).filter(models.User.user_id == user_id).first()
db_user.username = user.username
db_user.password = user.password
db_user.level = user.level
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def delete_user(db: Session, user_id: int):
db_user = db.query(models.User).filter(models.User.user_id == user_id).first()
db.delete(db_user)
db.commit()
return db_user
####################
def get_person(db: Session, person_id: int):
return db.query(models.Person).filter(models.Person.person_id == person_id).first()
def get_persons(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Person).offset(skip).limit(limit).all()
def create_person(db: Session, person: schemas.PersonCreate):
db_person = models.Person(name = person.name, age = person.age, gender = person.gender, condition = person.condition)
db.add(db_person)
db.commit()
db.refresh(db_person)
return db_person
def edit_person(db: Session, person_id: int, person: schemas.PersonCreate):
db_person = db.query(models.Person).filter(models.Person.person_id == person_id).first()
db_person.name = person.name
db_person.age = person.age
db_person.gender = person.gender
db_person.condition = person.condition
db.add(db_person)
db.commit()
db.refresh(db_person)
return db_person
def delete_person(db: Session, person_id: int):
db_person = db.query(models.Person).filter(models.Person.person_id == person_id).first()
db.delete(db_person)
db.commit()
return db_person
####################
def create_position(db: Session, person_id: int, position = schemas.PositionCreate):
db_position = models.Position(person_id = person_id, long = position.long, lat = position.lat, date = position.date)
db.add(db_position)
db.commit()
db.refresh(db_position)
return db_position
def get_positions(db: Session, person_id: int):
return db.query(models.Person).filter(models.Person.person_id == person_id).first().positions.all()
def get_position(db: Session, person_id: int, position_id: int):
return db.query(models.Person).filter(models.Person.person_id == person_id).first().positions.filter(models.Position.position_id == position_id).first()
def delete_position(db: Session, person_id: int, position_id: int):
db_position = db.query(models.Person).filter(models.Person.person_id == person_id).first().positions.filter(models.Position.position_id == position_id).first()
db.delete(db_position)
db.commit()
return db_position
####################
def check(pos: models.Position, pos_other: models.Position):
# print("#"*40)
# print(f"Person: {pos}")
# print(f"Person other: {pos_other}")
# Less than 5 minutes
ret = (pos.date-pos_other.date).total_seconds() <= (60*5)
# Distance lesser than 100
# ret = ret & (math.sqrt(pow(pos.long-pos_other.long, 2) + pow(pos.lat-pos_other.lat, 2)) <= 100)
ret = ret & (distance.distance((pos.lat, pos.long), (pos_other.lat, pos_other.long)).m <= 100)
# Only sick person
ret = ret & ( (pos_other.person.condition == models.KondisiEnum.sakit) | (pos.person.condition == models.KondisiEnum.sakit) )
return ret
def get_traces(db: Session, person_id: int):
positions = db.query(models.Person).filter(models.Person.person_id == person_id).first().positions.all()
person_others = db.query(models.Person).filter(models.Person.person_id != person_id).all()
positions_others = []
for person in person_others:
positions_others.extend(person.positions.all())
ret = []
for pos in positions:
for pos_other in positions_others:
if(check(pos, pos_other)):
ret.append(schemas.PositionTraced(
date = pos.date,
position_id = pos.position_id,
long = pos.long,
lat = pos.lat,
contacted_position_id = pos_other.position_id,
contacted_long = pos_other.long,
contacted_lat = pos_other.lat,
contacted_person_id = pos_other.person_id,
contact = True
))
return ret | 37.616541 | 163 | 0.673996 |
79562e0caf2adf1297b50ad386ea4edc18205a22 | 2,345 | py | Python | main.py | ahnorr/Naive-Resume-Matching | 22e0bdc638e7bfae4d2771782a291f7bab0c99a6 | [
"Apache-2.0"
] | null | null | null | main.py | ahnorr/Naive-Resume-Matching | 22e0bdc638e7bfae4d2771782a291f7bab0c99a6 | [
"Apache-2.0"
] | null | null | null | main.py | ahnorr/Naive-Resume-Matching | 22e0bdc638e7bfae4d2771782a291f7bab0c99a6 | [
"Apache-2.0"
] | null | null | null | from tf_idf import do_tfidf
import argparse
import re
import os
import Similar
import pandas as pd
from flask import Flask, request, send_from_directory
app = Flask(__name__)
app.config["PATH"] = os.path.dirname(os.path.realpath(__file__))
@app.route('/extract', methods=["POST"])
def hello():
if list(request.json.keys()) != ["JD", "Count", "return_pdf"]:
return {"Error" : "Invalid or all parameters not present"}
JD = request.json["JD"]
JD = re.sub(r"\n+|\s+", " ", JD)
tf_JD = do_tfidf(JD.split(" "))
skills = pd.read_csv("skills.csv", index_col=False)["skill_name"].tolist()
req_skills = []
skills = [" ".join(re.split("-", skill)) for skill in skills]
JD = re.sub(r",|\.", " ", JD)
for skill in skills:
# skill = f" {skill} "
if skill.lower() in JD.lower() and skill.lower() not in ["com", "edi"]:
req_skills.append(skill)
req_skills = list(set(req_skills))
print(req_skills)
if len(req_skills) < 5: return {"Error" : "JD too vague"}
def find_score(x, req_skills):
x = re.sub(r",|\.", " ", x)
val = 0
for req_skill in req_skills:
if req_skill in x.lower():
val += 1
return val/len(req_skills)
df = pd.read_csv("Resume_PDF.csv", index_col=False)
df["Score"] = df["TFIDF"].apply(lambda x : Similar.match(x, tf_JD))
df["Score2"] = df["Text"].apply(lambda x : find_score(x, req_skills))
df["Score2"] = df.apply(lambda x : (((x["Score2"]*100*6)+x["Score"])+ 100)/8, axis=1)
df2 = df.nlargest(request.json["Count"], "Score2")
df2 = df2.sort_values(by=["Score2"], ascending=False)
df2["Score"] = df2["Score2"]
df2.drop(columns=["Score2"], inplace=True)
df2.to_csv("TEST.csv")
df2 = df2.sort_values(by=["Score"], ascending=False)
out = {}
val = re.sub("extract", "", str(request.base_url))
for i, j in zip(df2["Name"].tolist(), df2["Score"].tolist()):
out[i] = {"score" : j, "url" : f"{val}/{i}"}
# print(val)
return out
@app.route('/<path:filename>')
def download_file(filename):
print(os.path.join(app.config['PATH'],filename))
return send_from_directory(app.config['PATH'],
filename, as_attachment=True)
if __name__ == "__main__":
app.run(host ='0.0.0.0', port = 8080, debug = True)
| 37.222222 | 89 | 0.596588 |
79562f15e89fd1f08a3e980a7b78c693fac14971 | 23,323 | py | Python | upconvert/writer/t/geda_t.py | EasyPCB/schematic-file-converter | 0d575171e2aa19a35ca15d5e39c716287d885ec4 | [
"Apache-2.0"
] | 1 | 2020-09-05T03:41:18.000Z | 2020-09-05T03:41:18.000Z | upconvert/writer/t/geda_t.py | EasyPCB/schematic-file-converter | 0d575171e2aa19a35ca15d5e39c716287d885ec4 | [
"Apache-2.0"
] | null | null | null | upconvert/writer/t/geda_t.py | EasyPCB/schematic-file-converter | 0d575171e2aa19a35ca15d5e39c716287d885ec4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# encoding: utf-8
#pylint: disable=R0904
""" The geda writer test class """
# upconvert.py - A universal hardware design file format converter using
# Format: upverter.com/resources/open-json-format/
# Development: github.com/upverter/schematic-file-converter
#
# Copyright 2011 Upverter, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import StringIO
import shutil
from upconvert.core.net import NetPoint
from upconvert.core import shape
from upconvert.core import components
from upconvert.core.design import Design
import upconvert.parser.geda
from upconvert.writer.geda import GEDA
from upconvert.parser.openjson import JSON
class GEDAWriterTestCase(unittest.TestCase):
""" The tests of the geda writer """
# pylint: disable=W0212
def setUp(self):
self.geda_writer = GEDA()
self.oj_parser = JSON()
class GEDAWriterTests(GEDAWriterTestCase):
""" The tests of the geda writer """
# pylint: disable=W0212
def test_converter_methods(self):
""" Test if converter methods are available for all known
shapes in the core.
"""
shape_types = [
'line',
'bezier',
'label',
'rectangle',
'rounded_rectangle',
'circle',
'polygon',
]
for typ in shape_types:
self.assertTrue(hasattr(self.geda_writer, "_convert_"+typ))
def test_create_project_files(self):
""" Test creating project files in the directory derived from the
output filename. Should try to create *gafrc* file and *symbol*
directory.
"""
geda_filename = '/tmp/test_geda.sch'
self.geda_writer.create_project_files(geda_filename)
self.assertEquals(
self.geda_writer.project_dirs['project'],
'/tmp'
)
self.assertEquals(
self.geda_writer.project_dirs['symbol'],
'/tmp/symbols-test_geda.sch'
)
def test_write_schematic_file(self):
""" Reads the gEDA *simple_example* file into a design using the
gEDA parser, writes the result to a gEDA file and reads it into
a new design. Both designs are then compared regarding their
respective components, instances and nets. """
# pylint: disable=R0914
# pylint: disable=R0915
sym_dir = '/tmp/sym'
if os.path.exists('/tmp/converted.sch'):
os.remove('/tmp/converted.sch')
if os.path.exists(sym_dir):
shutil.rmtree(sym_dir)
geda_parser = upconvert.parser.geda.GEDA(
symbol_dirs=['test/geda/simple_example/symbols']
)
geda_parser.set_offset(shape.Point(0, 0))
simple_design = geda_parser.parse(
'test/geda/simple_example/simple_example.sch'
)
geda_writer = GEDA()
geda_writer.write(simple_design, '/tmp/converted.sch')
converted_design = geda_parser.parse(
'/tmp/converted.sch'
)
## parse design again to make sure it is a clean slate
geda_parser = upconvert.parser.geda.GEDA(
symbol_dirs=['test/geda/simple_example/symbols']
)
geda_parser.set_offset(shape.Point(0, 0))
simple_design = geda_parser.parse(
'test/geda/simple_example/simple_example.sch'
)
##compare nets
self.assertEquals(
sorted([(net.net_id, len(net.points)) for net in simple_design.nets]),
sorted([(net.net_id, len(net.points)) for net in converted_design.nets])
)
snets = dict([(net.net_id, net) for net in simple_design.nets])
cnets = dict([(net.net_id, net) for net in converted_design.nets])
for snet_id, snet in snets.items():
cnet = cnets[snet_id]
spoints = dict([(pt.point_id, pt) for pt in snet.points.values()])
cpoints = dict([(pt.point_id, pt) for pt in cnet.points.values()])
self.assertEqual(sorted(spoints.keys()), sorted(cpoints.keys()))
for spoint_id, spoint in spoints.items():
cpoint = cpoints[spoint_id]
self.assertEquals(spoint.x, cpoint.x)
self.assertEquals(spoint.y, cpoint.y)
## compare component library
self.assertEqual(
sorted(simple_design.components.components.keys()),
sorted(converted_design.components.components.keys())
)
for lib_id in simple_design.components.components:
scomponent = simple_design.components.components[lib_id]
ccomponent = converted_design.components.components[lib_id]
self.assertEquals(scomponent.name, ccomponent.name)
self.assertEquals(scomponent.attributes, ccomponent.attributes)
self.assertEquals(len(scomponent.symbols), 1)
self.assertEquals(
len(scomponent.symbols),
len(ccomponent.symbols)
)
self.assertEquals(len(scomponent.symbols[0].bodies), 1)
self.assertEquals(
len(scomponent.symbols[0].bodies),
len(ccomponent.symbols[0].bodies)
)
sbody = scomponent.symbols[0].bodies[0]
cbody = ccomponent.symbols[0].bodies[0]
## skip the unassigned shapes component as it adds an additional
## text from the basename when importing GEDA
if scomponent.name != 'UNASSIGNED_SHAPES':
self.assertEquals(len(sbody.shapes), len(cbody.shapes))
self.assertEquals(len(sbody.pins), len(cbody.pins))
for spin, cpin in zip(sbody.pins, cbody.pins):
self.assertEquals(spin.p1.x, cpin.p1.x)
self.assertEquals(spin.p1.x, cpin.p1.x)
self.assertEquals(spin.p2.y, cpin.p2.y)
self.assertEquals(spin.p2.y, cpin.p2.y)
if spin.label is None:
self.assertEquals(cpin.label, None)
else:
self.assertEquals(spin.label.text, cpin.label.text)
for sshape, cshape in zip(sbody.shapes, cbody.shapes):
self.assertEquals(sshape.type, cshape.type)
## compare component instances
scomp_instances = dict([(comp.instance_id, comp) for comp in simple_design.component_instances])
ccomp_instances = dict([(comp.instance_id, comp) for comp in converted_design.component_instances])
for instance_id in scomp_instances:
sinst = scomp_instances[instance_id]
cinst = ccomp_instances[instance_id]
self.assertEquals(sinst.instance_id, cinst.instance_id)
self.assertEquals(sinst.library_id, cinst.library_id)
self.assertEquals(sinst.symbol_index, cinst.symbol_index)
self.assertEquals(
sinst.symbol_attributes[0].x,
cinst.symbol_attributes[0].x
)
self.assertEquals(
sinst.symbol_attributes[0].y,
cinst.symbol_attributes[0].y
)
self.assertEquals(
sinst.symbol_attributes[0].rotation,
cinst.symbol_attributes[0].rotation
)
def test_write_component_to_file(self):
""" Tests writing a component to a symbol file. """
sym_dir = '/tmp/sym'
if os.path.exists(sym_dir):
shutil.rmtree(sym_dir)
os.mkdir(sym_dir)
self.geda_writer.set_offset(shape.Point(-500, -500))
self.geda_writer.component_library = dict()
self.geda_writer.project_dirs['symbol'] = sym_dir
simple_design = self.oj_parser.parse('test/openjson/simple.upv')
library_id = '0000000000000001'
component = simple_design.components.components[library_id]
self.geda_writer.write_component_to_file(library_id, component)
component_library = self.geda_writer.component_library
self.assertEquals(len(component_library), 4)
self.assertEquals(
component_library,
{
(library_id, 0): 'Flag_1-0.sym',
(library_id, 1): 'Flag_2-1.sym',
(library_id, 2): 'GND-2.sym',
(library_id, 3): 'VCC-3.sym'
}
)
self.assertEquals(
sorted(os.listdir(sym_dir)),
['Flag_1-0.sym', 'Flag_2-1.sym', 'GND-2.sym', 'VCC-3.sym']
)
def test_write_component_to_file_symbol_dirs(self):
""" Tests writing a component to a symbol file with symbol dirs.
"""
sym_dir = '/tmp/sym'
if os.path.exists(sym_dir):
shutil.rmtree(sym_dir)
os.mkdir(sym_dir)
self.geda_writer = GEDA(
symbol_dirs=['test/geda/simple_example/symbols'])
self.geda_writer.component_library = dict()
self.geda_writer.project_dirs['symbol'] = sym_dir
geda_parser = upconvert.parser.geda.GEDA(
symbol_dirs=['test/geda/simple_example/symbols']
)
converted_design = geda_parser.parse(
'test/geda/simple_example/simple_example.sch'
)
library_id = 'opamp'
component = converted_design.components.components[library_id]
self.geda_writer.write_component_to_file(library_id, component)
component_library = self.geda_writer.component_library
self.assertEquals(len(component_library), 1)
self.assertEquals(
component_library,
{
(library_id, 0): 'opamp.sym',
}
)
library_id = 'capacitor-1'
component = converted_design.components.components[library_id]
self.geda_writer.write_component_to_file(library_id, component)
component_library = self.geda_writer.component_library
self.assertEquals(len(component_library), 2)
self.assertEquals(
component_library,
{
('opamp', 0): 'opamp.sym',
(library_id, 0): 'capacitor-1.sym',
}
)
self.assertEquals(sorted(os.listdir(sym_dir)), [])
def test_generate_net_commands(self):
""" Tests creating commands for nets that can then be
written to the schematic file.
"""
design = self.oj_parser.parse('test/geda/nets_exported.upv')
self.geda_writer.set_offset(design.bounds()[0])
commands = self.geda_writer.generate_net_commands(design.nets)
self.assertTrue(len(commands) > 0)
segment_count = 0
for command in commands:
if command.startswith('N '):
segment_count += 1
self.assertEquals(segment_count, 21)
env_count = 0
for command in commands:
if command.startswith('{'):
env_count += 1
self.assertEquals(env_count, 4)
commands += ['v 20110115 2\n']
geda_parser = upconvert.parser.geda.GEDA()
geda_parser.unassigned_body = components.SBody()
new_design = geda_parser.parse_schematic(
StringIO.StringIO('\n'.join(commands))
)
self.assertEquals(len(design.nets), len(new_design.nets))
def test_create_component(self):
""" Tests creating components from various gEDA commands. """
component = self.geda_writer._create_component(0, 0, 'test-1.sym')
self.assertEquals(
component,
['C 0 0 0 0 0 test-1.sym']
)
def test_create_attribute(self):
""" Tests creating attribute commands. """
attribute = self.geda_writer._create_attribute(
'_private_attr', 'U1',
0, 0
)
self.assertEquals(
attribute,
['T 0 0 5 10 0 1 0 0 1', 'private_attr=U1']
)
attribute = self.geda_writer._create_attribute(
'attr', 'U1',
0, 0,
style_size=25
)
self.assertEquals(
attribute,
['T 0 0 5 25 1 1 0 0 1', 'attr=U1']
)
def test_create_text(self):
""" Tests creating text commands. """
text = self.geda_writer._create_text('some text', 0, 0)
self.assertEquals(len(text), 2)
self.assertEquals(
text,
['T 0 0 9 10 1 1 0 0 1', 'some text']
)
text = self.geda_writer._create_text(
"some text\nmulti line\ntext",
0, 0, style_size=25, style_color=5, visibility=0,
alignment='right',
)
self.assertEquals(len(text), 4)
self.assertEquals(
text,
['T 0 0 5 25 0 1 0 4 3', "some text", "multi line", "text"]
)
def test_create_pin(self):
""" Tests creating pin commands. """
pin = components.Pin('E', (0, 0), (0, 30))
command = self.geda_writer._create_pin(1, pin)
self.assertEquals(
command,
[
'P 0 300 0 0 1 0 0',
'{',
'T 100 400 5 10 0 1 0 0 1',
'pinseq=1',
'T 100 500 5 10 0 1 0 0 1',
'pinnumber=E',
'}'
]
)
label = shape.Label(10, 0, 'p1', align='left', rotation=0.5)
pin = components.Pin('E', (0, 0), (0, 30), label=label)
command = self.geda_writer._create_pin(1, pin)
self.assertEquals(
command,
[
'P 0 300 0 0 1 0 0',
'{',
'T 100 0 5 10 1 1 270 0 1',
'pinlabel=p1',
'T 100 400 5 10 0 1 0 0 1',
'pinseq=1',
'T 100 500 5 10 0 1 0 0 1',
'pinnumber=E',
'}'
]
)
def test_convert_arc(self):
""" Tests converting Arc objects to arc commands."""
arc = shape.Arc(0, 0, 0.0, 0.7, 30)
command = self.geda_writer._convert_arc(arc)
self.assertEquals(
command,
['A 0 0 300 0 235 3 10 0 0 -1 -1']
)
arc = shape.Arc(200, 400, 1.0, 0.5, 10)
command = self.geda_writer._convert_arc(arc)
self.assertEquals(
command,
['A 2000 4000 100 180 90 3 10 0 0 -1 -1']
)
arc = shape.Arc(200, 400, 0.2, 0.1, 10)
command = self.geda_writer._convert_arc(arc)
self.assertEquals(
command,
['A 2000 4000 100 324 18 3 10 0 0 -1 -1']
)
def test_convert_circle(self):
""" Tests converting Circle objects to circle commands."""
circle = shape.Circle(0, 0, 300)
command = self.geda_writer._convert_circle(circle)
self.assertEquals(
command,
['V 0 0 3000 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1']
)
circle = shape.Circle(10, 30, 10)
command = self.geda_writer._convert_circle(circle)
self.assertEquals(
command,
['V 100 300 100 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1']
)
def test_convert_rectangle(self):
""" Tests converting Rectancle and RoundedRectangle
objects to box commands.
"""
rect = shape.Rectangle(0, 0, 40, 50)
command = self.geda_writer._convert_rectangle(rect)
self.assertEquals(
command,
['B 0 -500 400 500 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1']
)
rect = shape.Rectangle(100, 50, 150, 30)
command = self.geda_writer._convert_rectangle(rect)
self.assertEquals(
command,
['B 1000 200 1500 300 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1']
)
rect = shape.RoundedRectangle(0, 0, 40, 50, 0.5)
command = self.geda_writer._convert_rounded_rectangle(rect)
self.assertEquals(
command,
['B 0 -500 400 500 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1']
)
rect = shape.RoundedRectangle(100, 50, 150, 30, 0.1)
command = self.geda_writer._convert_rounded_rectangle(rect)
self.assertEquals(
command,
['B 1000 200 1500 300 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1']
)
def test_convert_line(self):
""" Tests converting Line objects to line commands. """
line = shape.Line((0, 0), (0, 50))
command = self.geda_writer._convert_line(line)
self.assertEquals(
command,
['L 0 0 0 500 3 10 0 0 -1 -1']
)
line = shape.Line((20, 40), (-20, 40))
command = self.geda_writer._convert_line(line)
self.assertEquals(
command,
['L 200 400 -200 400 3 10 0 0 -1 -1']
)
line = shape.Line((20, 40), (-30, 50))
command = self.geda_writer._convert_line(line)
self.assertEquals(
command,
['L 200 400 -300 500 3 10 0 0 -1 -1']
)
def test_convert_label(self):
""" Tests converting Lable objects to label commands. """
label = shape.Label(0, 0, 'test label', align='center', rotation=0.0)
command = self.geda_writer._convert_label(label)
self.assertEquals(
command,
[
'T 0 0 9 10 1 1 0 3 1',
'test label'
]
)
label = shape.Label(0, 0, 'test label', align='left', rotation=0.5)
command = self.geda_writer._convert_label(label)
self.assertEquals(
command,
[
'T 0 0 9 10 1 1 270 0 1',
'test label'
]
)
def test_create_segment(self):
""" Tests creating segment commands from NetPoint objects. """
np1 = NetPoint('0a0', 0, 0)
np2 = NetPoint('0a10', 0, 10)
self.assertEquals(
self.geda_writer._create_segment(np1, np2),
['N 0 0 0 100 4']
)
np1 = NetPoint('100a40', 100, 40)
np2 = NetPoint('50a40', 50, 40)
attrs = {'netname': 'test_net'}
self.assertEquals(
self.geda_writer._create_segment(np1, np2, attributes=attrs),
[
'N 1000 400 500 400 4',
'{',
'T 1100 500 5 10 1 1 0 0 1',
'netname=test_net',
'}',
]
)
def test_convert_polygon(self):
""" Tests converting Polygon objects to path commands."""
polygon = shape.Polygon()
polygon.add_point((0, 0))
polygon.add_point((100, 200))
polygon.add_point((150, 200))
polygon.add_point((200, 100))
self.assertEquals(
self.geda_writer._convert_polygon(polygon),
[
'H 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1 5',
'M 0,0',
'L 1000,2000',
'L 1500,2000',
'L 2000,1000',
'z'
]
)
def test_convert_bezier(self):
""" Tests converting BezierCurve objects to path commands. """
curve = shape.BezierCurve((9, -10), (11, -10), (3, -12), (17, -12))
self.assertEquals(
self.geda_writer._convert_bezier(curve),
[
'H 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1 2',
'M 30,-120',
'C 90,-100 110,-100 170,-120',
]
)
def test_create_path(self):
""" Test creating path commands from SBody objects. """
shapes = [
shape.Line((10, 10), (50, 10)),
shape.BezierCurve((70, 10), (80, 30), (50, 10), (80, 40)),
shape.BezierCurve((80, 50), (70, 70), (80, 40), (50, 70)),
shape.Line((50, 70), (10, 70)),
]
self.assertEquals(
self.geda_writer._create_path(shapes),
[
'H 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1 5',
'M 100,100',
'L 500,100',
'C 700,100 800,300 800,400',
'C 800,500 700,700 500,700',
'L 100,700',
]
)
shapes.append(shape.Line((10, 70), (10, 10)))
self.assertEquals(
self.geda_writer._create_path(shapes),
[
'H 3 10 0 0 -1 -1 0 -1 -1 -1 -1 -1 6',
'M 100,100',
'L 500,100',
'C 700,100 800,300 800,400',
'C 800,500 700,700 500,700',
'L 100,700',
'z',
]
)
def test_is_valid_path(self):
""" Tests if SBody objects contain valid paths."""
shapes = [
shape.Line((10, 10), (50, 10)), #L 500,100
shape.BezierCurve((70, 10), (80, 30), (50, 10), (80, 40)), #C 700,100 800,300 800,400
shape.BezierCurve((80, 50), (70, 70), (80, 40), (50, 70)), #C 800,500 700,700 500,700
shape.Line((50, 70), (10, 70)), #L 100,700
]
body = components.SBody()
body.shapes = shapes
self.assertTrue(self.geda_writer.is_valid_path(body))
body.add_shape(shape.Line((10, 70), (10, 10)))
self.assertTrue(self.geda_writer.is_valid_path(body))
shapes = [
shape.Line((10, 10), (50, 10)), #L 500,100
shape.BezierCurve((70, 10), (80, 30), (50, 10), (80, 40)), #C 700,100 800,300 800,400
shape.Line((50, 70), (10, 70)), #L 100,700
]
body.shapes = shapes
self.assertFalse(self.geda_writer.is_valid_path(body))
body.add_shape(shape.Circle(0, 0, 10))
self.assertFalse(self.geda_writer.is_valid_path(body))
def test_conv_angle(self):
""" Test conversion of angles from pi radians to degrees. """
angle_samples = [
# angle, steps, expected result
(0.0, 1, 0),
(0.0, 10.0, 0),
(0.5, 90, 270),
(0.8, 1, 216),
(0.8, 90, 270),
(1.5, 1, 90),
(1.5, 90, 90),
]
for angle, steps, expected in angle_samples:
self.assertEquals(
self.geda_writer.conv_angle(angle, steps),
expected
)
class GEDAWriteTopLevelShapeTests(GEDAWriterTestCase):
def test_generating_geda_commands_for_toplevel_shapes(self):
design = Design()
design.shapes = [
shape.Line((0, 0), (0, 50)),
shape.Circle(0, 0, 300),
]
design.pins = [
components.Pin('E', (0, 0), (0, 30)),
components.Pin('E', (0, 0), (0, 30)),
]
commands = self.geda_writer.generate_body_commands(design)
## default pins require 6 commands, shapes require 1 command
self.assertEquals(len(commands), 2*6 + 2*1)
| 33.035411 | 107 | 0.550658 |
79562fb42fc1642730179f0234e3051e3fcca5e5 | 11,284 | py | Python | xgcm/gridops.py | yz3062/xgcm | d3555b43e494c0641d2b3e7865d819cb02eebaed | [
"MIT"
] | 1 | 2020-05-22T14:48:16.000Z | 2020-05-22T14:48:16.000Z | xgcm/gridops.py | yz3062/xgcm | d3555b43e494c0641d2b3e7865d819cb02eebaed | [
"MIT"
] | 4 | 2021-07-26T15:10:42.000Z | 2021-09-13T15:06:17.000Z | xgcm/gridops.py | andersy005/xgcm | 95f4f33d72d2add00136e27f6b3bedecb97d4d77 | [
"MIT"
] | null | null | null | # python 3 compatiblity
import numpy as np
import xarray as xr
# make some functions for taking divergence
from dask import array as da
def _append_to_name(array, append):
try:
return array.name + "_" + append
except TypeError:
return append
class GCMDataset(object):
"""Representation of GCM (General Circulation Model) output data, numerical
grid information, and operations related to finite-volume analysis.
"""
# without these variables in the dataset, can't function
needed_vars = [
"Z",
"Zp1",
"Zl",
"Zu",
"X",
"Xp1",
"Y",
"Yp1",
"XC",
"YC",
"XG",
"YG",
"drF",
"drC",
"dxC",
"dxG",
"dyC",
"dyG",
]
def __init__(self, ds):
"""Initialize GCM object.
Parameters
----------
ds : xr.Dataset
"""
# check that needed variables are present
for v in self.needed_vars:
if v not in ds:
raise KeyError("Needed variable %s not found in dataset" % v)
self.ds = ds
# silly functions
def _get_coords_from_dims(self, dims, replace=None):
"""Utility function for quickly fetching coordinates from parent
dataset.
"""
dims = list(dims)
if replace:
for k in replace:
dims[dims.index(k)] = replace[k]
return {dim: self.ds[dim] for dim in dims}, dims
def _get_hfac_for_array(self, array):
"""Figure out the correct hfac given array dimensions."""
hfac = None
if "X" in array.dims and "Y" in array.dims and "HFacC" in self.ds:
hfac = self.ds.HFacC
if "Xp1" in array.dims and "Y" in array.dims and "HFacW" in self.ds:
hfac = self.ds.HFacW
if "X" in array.dims and "Yp1" in array.dims and "HFacW" in self.ds:
hfac = self.ds.HFacS
return hfac
# Vertical Differences, Derivatives, and Interpolation
def pad_zl_to_zp1(self, array, fill_value=0.0, zlname="Zl", zp1name="Zp1"):
"""Pad an array located at zl points such that it is located at
zp1 points. An additional fill value is required for the bottom point.
Parameters
----------
array : xr.DataArray
The array to difference. Must have the coordinate zp1.
fill_value : number, optional
The value to be used at the bottom point.
zlname : str, optional
The variable name for the zl point
zp1name : str, optional
The variable name for the zp1 point
Returns
-------
padded : xr.DataArray
Padded array with vertical coordinate zp1.
"""
coords, dims = self._get_coords_from_dims(array.dims)
zdim = dims.index(zlname)
# shape of the new array to concat at the bottom
shape = list(array.shape)
shape[zdim] = 1
# replace Zl with the bottom level
coords[zlname] = np.atleast_1d(self.ds[zp1name][-1].data)
# an array of zeros at the bottom
# need different behavior for numpy vs dask
if array.chunks:
chunks = list(array.data.chunks)
chunks[zdim] = (1,)
zarr = fill_value * da.ones(shape, dtype=array.dtype, chunks=chunks)
zeros = xr.DataArray(zarr, coords, dims).chunk()
else:
zarr = np.zeros(shape, array.dtype)
zeros = xr.DataArray(zarr, coords, dims)
newarray = xr.concat([array, zeros], dim=zlname).rename({zlname: zp1name})
if newarray.chunks:
# this assumes that there was only one chunk in the vertical to begin with
# how can we do that better
return newarray.chunk({zp1name: len(newarray[zp1name])})
else:
return newarray
def diff_zp1_to_z(self, array, zname="Z", zp1name="Zp1"):
"""Take the vertical difference of an array located at zp1 points, resulting
in a new array at z points.
Parameters
----------
array : xr.DataArray
The array to difference. Must have the coordinate zp1.
zname : str, optional
The variable name for the z point
zp1name : str, optional
The variable name for the zp1 point
Returns
-------
diff : xr.DataArray
A new array with vertical coordinate z.
"""
a_up = array.isel(**{zp1name: slice(None, -1)})
a_dn = array.isel(**{zp1name: slice(1, None)})
a_diff = a_up.data - a_dn.data
# dimensions and coords of new array
coords, dims = self._get_coords_from_dims(array.dims, replace={zp1name: zname})
return xr.DataArray(
a_diff, coords, dims, name=_append_to_name(array, "diff_zp1_to_z")
)
def diff_zl_to_z(self, array, fill_value=0.0):
"""Take the vertical difference of an array located at zl points, resulting
in a new array at z points. A fill value is required to provide the bottom
boundary condition for ``array``.
Parameters
----------
array : xr.DataArray
The array to difference. Must have the coordinate zl.
fill_value : number, optional
The value to be used at the bottom point. The default (0) is the
appropriate choice for vertical fluxes.
Returns
-------
diff : xr.DataArray
A new array with vertical coordinate z.
"""
array_zp1 = self.pad_zl_to_zp1(array, fill_value)
array_diff = self.diff_zp1_to_z(array_zp1)
return array_diff.rename(_append_to_name(array, "_diff_zl_to_z"))
def diff_z_to_zp1(self, array):
"""Take the vertical difference of an array located at z points, resulting
in a new array at zp1 points, but missing the upper and lower point.
Parameters
----------
array : xr.DataArray
The array to difference. Must have the coordinate z.
Returns
-------
diff : xr.DataArray
A new array with vertical coordinate zp1.
"""
a_up = array.isel(Z=slice(None, -1))
a_dn = array.isel(Z=slice(1, None))
a_diff = a_up.data - a_dn.data
# dimensions and coords of new array
coords, dims = self._get_coords_from_dims(array.dims, replace={"Z": "Zp1"})
# trim vertical
coords["Zp1"] = coords["Zp1"][1:-1]
return xr.DataArray(
a_diff, coords, dims, name=_append_to_name(array, "diff_z_to_zp1")
)
def derivative_zp1_to_z(self, array):
"""Take the vertical derivative of an array located at zp1 points, resulting
in a new array at z points.
Parameters
----------
array : xr.DataArray
The array to differentiate. Must have the coordinate zp1.
Returns
-------
deriv : xr.DataArray
A new array with vertical coordinate z.
"""
a_diff = self.diff_zp1_to_z(array)
dz = self.ds.drF
return a_diff / dz
def derivative_zl_to_z(self, array, fill_value=0.0):
"""Take the vertical derivative of an array located at zl points, resulting
in a new array at z points. A fill value is required to provide the bottom
boundary condition for ``array``.
Parameters
----------
array : xr.DataArray
The array to differentiate. Must have the coordinate zl.
fill_value : number, optional
The assumed value at the bottom point. The default (0) is the
appropriate choice for vertical fluxes.
Returns
-------
deriv : xr.DataArray
A new array with vertical coordinate z.
"""
a_diff = self.diff_zl_to_z(array, fill_value)
dz = self.ds.drF
return a_diff / dz
def derivative_z_to_zp1(self, array):
"""Take the vertical derivative of an array located at z points, resulting
in a new array at zp1 points, but missing the upper and lower point.
Parameters
----------
array : xr.DataArray
The array to differentiate. Must have the coordinate z.
Returns
-------
diff : xr.DataArray
A new array with vertical coordinate zp1.
"""
a_diff = self.diff_z_to_zp1(array)
dz = self.ds.drC[1:-1]
return a_diff / dz
# Vertical Integrals
# if the array to integrate is 1D or 2D, don't multiply by hFac
# but what if it is 3D, how do we decide what to do?
# what if points are missing? xray should take care of that
# how do we pick which hFac to use? look at dims
def integrate_z(self, array, average=False):
"""Integrate ``array`` in vertical dimension, accounting for vertical
grid geometry.
Parameters
----------
array : xr.DataArray
The array to integrate. Must have the dimension Z.
average : bool, optional
If ``True``, return an average instead of an integral.
Returns
-------
integral : xr.DataArray
The vertical integral of ``array``.
"""
if "Z" not in array.dims:
raise ValueError("Can only integrate arrays on Z grid")
dz = self.ds.drF
# look at horizontal dimensions and try to find an hfac
hfac = self._get_hfac_for_array(array)
if hfac is not None:
# brodcast hfac against dz
dz *= hfac
a_int = (array * dz).sum(dim="Z")
if average:
return a_int / dz.sum(dim="Z")
else:
return a_int
# Horizontal Differences, Derivatives, and Interpolation
# doesn't actually need parent ds
# this could go in xray
def roll(self, array, n, dim):
"""Clone of numpy.roll for xr.DataArrays."""
left = array.isel(**{dim: slice(None, -n)})
right = array.isel(**{dim: slice(-n, None)})
return xr.concat([right, left], dim=dim)
def diff_xp1_to_x(self, array):
"""Difference DataArray ``array`` in the x direction.
Assumes that ``array`` is located at the xp1 point."""
left = array
right = self.roll(array, -1, "Xp1")
if array.chunks:
right = right.chunk(array.chunks)
diff = right.data - left.data
coords, dims = self._get_coords_from_dims(array.dims, replace={"Xp1": "X"})
return xr.DataArray(diff, coords, dims).rename(
_append_to_name(array, "diff_xp1_to_x")
)
def diff_yp1_to_y(self, array):
"""Difference DataArray ``array`` in the y direction.
Assumes that ``array`` is located at the yp1 point."""
left = array
right = self.roll(array, -1, "Yp1")
if array.chunks:
right = right.chunk(array.chunks)
diff = right.data - left.data
coords, dims = self._get_coords_from_dims(array.dims, replace={"Yp1": "Y"})
return xr.DataArray(diff, coords, dims).rename(
_append_to_name(array, "_diff_yp1_to_y")
)
| 33.987952 | 87 | 0.579848 |
79562fb4cecb215ce0c350e1f8572d999345b21f | 24,284 | py | Python | NodeGraphQt/qgraphics/node_base.py | MaGrosseGit/NodeGraphQt | cc79d0c1285290b485d2a2376a0cbec786b5af2f | [
"MIT"
] | null | null | null | NodeGraphQt/qgraphics/node_base.py | MaGrosseGit/NodeGraphQt | cc79d0c1285290b485d2a2376a0cbec786b5af2f | [
"MIT"
] | null | null | null | NodeGraphQt/qgraphics/node_base.py | MaGrosseGit/NodeGraphQt | cc79d0c1285290b485d2a2376a0cbec786b5af2f | [
"MIT"
] | null | null | null | #!/usr/bin/python
from .. import QtGui, QtCore, QtWidgets
from ..constants import (IN_PORT, OUT_PORT,
NODE_WIDTH, NODE_HEIGHT,
NODE_ICON_SIZE, ICON_NODE_BASE,
NODE_SEL_COLOR, NODE_SEL_BORDER_COLOR,
PORT_FALLOFF, Z_VAL_NODE, Z_VAL_NODE_WIDGET,
ITEM_CACHE_MODE)
from ..errors import NodeWidgetError
from .node_abstract import AbstractNodeItem
from .port import PortItem
class XDisabledItem(QtWidgets.QGraphicsItem):
"""
Node disabled overlay item.
Args:
parent (NodeItem): the parent node item.
text (str): disable overlay text.
"""
def __init__(self, parent=None, text=None):
super(XDisabledItem, self).__init__(parent)
self.setZValue(Z_VAL_NODE_WIDGET + 2)
self.setVisible(False)
self.color = (0, 0, 0, 255)
self.text = text
def boundingRect(self):
return self.parentItem().boundingRect()
def paint(self, painter, option, widget):
"""
Draws the overlay disabled X item on top of a node item.
Args:
painter (QtGui.QPainter): painter used for drawing the item.
option (QtGui.QStyleOptionGraphicsItem):
used to describe the parameters needed to draw.
widget (QtWidgets.QWidget): not used.
"""
painter.save()
margin = 20
rect = self.boundingRect()
dis_rect = QtCore.QRectF(rect.left() - (margin / 2),
rect.top() - (margin / 2),
rect.width() + margin,
rect.height() + margin)
pen = QtGui.QPen(QtGui.QColor(*self.color), 8)
pen.setCapStyle(QtCore.Qt.RoundCap)
painter.setPen(pen)
painter.drawLine(dis_rect.topLeft(), dis_rect.bottomRight())
painter.drawLine(dis_rect.topRight(), dis_rect.bottomLeft())
bg_color = QtGui.QColor(*self.color)
bg_color.setAlpha(100)
bg_margin = -0.5
bg_rect = QtCore.QRectF(dis_rect.left() - (bg_margin / 2),
dis_rect.top() - (bg_margin / 2),
dis_rect.width() + bg_margin,
dis_rect.height() + bg_margin)
painter.setPen(QtGui.QPen(QtGui.QColor(0, 0, 0, 0)))
painter.setBrush(bg_color)
painter.drawRoundedRect(bg_rect, 5, 5)
pen = QtGui.QPen(QtGui.QColor(155, 0, 0, 255), 0.7)
painter.setPen(pen)
painter.drawLine(dis_rect.topLeft(), dis_rect.bottomRight())
painter.drawLine(dis_rect.topRight(), dis_rect.bottomLeft())
point_size = 4.0
point_pos = (dis_rect.topLeft(), dis_rect.topRight(),
dis_rect.bottomLeft(), dis_rect.bottomRight())
painter.setBrush(QtGui.QColor(255, 0, 0, 255))
for p in point_pos:
p.setX(p.x() - (point_size / 2))
p.setY(p.y() - (point_size / 2))
point_rect = QtCore.QRectF(
p, QtCore.QSizeF(point_size, point_size))
painter.drawEllipse(point_rect)
if self.text:
font = painter.font()
font.setPointSize(10)
painter.setFont(font)
font_metrics = QtGui.QFontMetrics(font)
font_width = font_metrics.width(self.text)
font_height = font_metrics.height()
txt_w = font_width * 1.25
txt_h = font_height * 2.25
text_bg_rect = QtCore.QRectF((rect.width() / 2) - (txt_w / 2),
(rect.height() / 2) - (txt_h / 2),
txt_w, txt_h)
painter.setPen(QtGui.QPen(QtGui.QColor(255, 0, 0), 0.5))
painter.setBrush(QtGui.QColor(*self.color))
painter.drawRoundedRect(text_bg_rect, 2, 2)
text_rect = QtCore.QRectF((rect.width() / 2) - (font_width / 2),
(rect.height() / 2) - (font_height / 2),
txt_w * 2, font_height * 2)
painter.setPen(QtGui.QPen(QtGui.QColor(255, 0, 0), 1))
painter.drawText(text_rect, self.text)
painter.restore()
class NodeItem(AbstractNodeItem):
"""
Base Node item.
Args:
name (str): name displayed on the node.
parent (QtWidgets.QGraphicsItem): parent item.
"""
def __init__(self, name='node', parent=None):
super(NodeItem, self).__init__(name, parent)
pixmap = QtGui.QPixmap(ICON_NODE_BASE)
if pixmap.size().height() > NODE_ICON_SIZE:
pixmap = pixmap.scaledToHeight(NODE_ICON_SIZE,
QtCore.Qt.SmoothTransformation)
self._properties['icon'] = ICON_NODE_BASE
self._icon_item = QtWidgets.QGraphicsPixmapItem(pixmap, self)
self._icon_item.setTransformationMode(QtCore.Qt.SmoothTransformation)
self._text_item = QtWidgets.QGraphicsTextItem(self.name, self)
self._x_item = XDisabledItem(self, 'DISABLED')
self._input_items = {}
self._output_items = {}
self._widgets = {}
self._proxy_mode = False
self._porxy_mode_threshold = 70
def paint(self, painter, option, widget):
"""
Draws the node base not the ports.
Args:
painter (QtGui.QPainter): painter used for drawing the item.
option (QtGui.QStyleOptionGraphicsItem):
used to describe the parameters needed to draw.
widget (QtWidgets.QWidget): not used.
"""
self.auto_switch_mode()
painter.save()
bg_border = 1.0
rect = QtCore.QRectF(0.5 - (bg_border / 2),
0.5 - (bg_border / 2),
self._width + bg_border,
self._height + bg_border)
radius = 2
border_color = QtGui.QColor(*self.border_color)
path = QtGui.QPainterPath()
path.addRoundedRect(rect, radius, radius)
rect = self.boundingRect()
bg_color = QtGui.QColor(*self.color)
painter.setBrush(bg_color)
painter.setPen(QtCore.Qt.NoPen)
painter.drawRoundedRect(rect, radius, radius)
if self.selected and NODE_SEL_COLOR:
painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR))
painter.drawRoundedRect(rect, radius, radius)
label_rect = QtCore.QRectF(rect.left() + (radius / 2),
rect.top() + (radius / 2),
self._width - (radius / 1.25),
28)
path = QtGui.QPainterPath()
path.addRoundedRect(label_rect, radius / 1.5, radius / 1.5)
painter.setBrush(QtGui.QColor(0, 0, 0, 50))
painter.fillPath(path, painter.brush())
border_width = 0.8
if self.selected and NODE_SEL_BORDER_COLOR:
border_width = 1.2
border_color = QtGui.QColor(*NODE_SEL_BORDER_COLOR)
border_rect = QtCore.QRectF(rect.left() - (border_width / 2),
rect.top() - (border_width / 2),
rect.width() + border_width,
rect.height() + border_width)
pen = QtGui.QPen(border_color, border_width)
pen.setCosmetic(self.viewer().get_zoom() < 0.0)
path = QtGui.QPainterPath()
path.addRoundedRect(border_rect, radius, radius)
painter.setBrush(QtCore.Qt.NoBrush)
painter.setPen(pen)
painter.drawPath(path)
painter.restore()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
start = PortItem().boundingRect().width() - PORT_FALLOFF
end = self.boundingRect().width() - start
x_pos = event.pos().x()
if not start <= x_pos <= end:
event.ignore()
super(NodeItem, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if event.modifiers() == QtCore.Qt.AltModifier:
event.ignore()
return
super(NodeItem, self).mouseReleaseEvent(event)
def mouseDoubleClickEvent(self, event):
viewer = self.viewer()
if viewer:
viewer.node_double_clicked.emit(self.id)
super(NodeItem, self).mouseDoubleClickEvent(event)
def itemChange(self, change, value):
if change == self.ItemSelectedChange and self.scene():
self.reset_pipes()
if value:
self.hightlight_pipes()
self.setZValue(Z_VAL_NODE)
if not self.selected:
self.setZValue(Z_VAL_NODE + 1)
return super(NodeItem, self).itemChange(change, value)
def _tooltip_disable(self, state):
tooltip = '<b>{}</b>'.format(self.name)
if state:
tooltip += ' <font color="red"><b>(DISABLED)</b></font>'
tooltip += '<br/>{}<br/>'.format(self.type_)
self.setToolTip(tooltip)
def _set_base_size(self, add_w=0.0, add_h=0.0):
"""
setup initial base size.
Args:
add_w (float): additional width.
add_h (float): additional height.
"""
self._width = NODE_WIDTH
self._height = NODE_HEIGHT
width, height = self.calc_size(add_w, add_h)
if width > self._width:
self._width = width
if height > self._height:
self._height = height
def _set_text_color(self, color):
"""
set text color.
Args:
color (tuple): color value in (r, g, b, a).
"""
text_color = QtGui.QColor(*color)
for port, text in self._input_items.items():
text.setDefaultTextColor(text_color)
for port, text in self._output_items.items():
text.setDefaultTextColor(text_color)
self._text_item.setDefaultTextColor(text_color)
def activate_pipes(self):
"""
active pipe color.
"""
ports = self.inputs + self.outputs
for port in ports:
for pipe in port.connected_pipes:
pipe.activate()
def hightlight_pipes(self):
"""
highlight pipe color.
"""
ports = self.inputs + self.outputs
for port in ports:
for pipe in port.connected_pipes:
pipe.highlight()
def reset_pipes(self):
"""
reset the pipe color.
"""
ports = self.inputs + self.outputs
for port in ports:
for pipe in port.connected_pipes:
pipe.reset()
def calc_size(self, add_w=0.0, add_h=0.0):
"""
calculate minimum node size.
Args:
add_w (float): additional width.
add_h (float): additional height.
"""
width = self._text_item.boundingRect().width()
height = self._text_item.boundingRect().height()
if self._widgets:
wid_width = max([
w.boundingRect().width() for w in self._widgets.values()
])
if width < wid_width:
width = wid_width
port_height = 0.0
if self._input_items:
input_widths = []
for port, text in self._input_items.items():
input_width = port.boundingRect().width() - PORT_FALLOFF
if text.isVisible():
input_width += text.boundingRect().width() / 1.5
input_widths.append(input_width)
width += max(input_widths)
port_height = port.boundingRect().height()
if self._output_items:
output_widths = []
for port, text in self._output_items.items():
output_width = port.boundingRect().width()
if text.isVisible():
output_width += text.boundingRect().width() / 1.5
output_widths.append(output_width)
width += max(output_widths)
port_height = port.boundingRect().height()
in_count = len([p for p in self.inputs if p.isVisible()])
out_count = len([p for p in self.outputs if p.isVisible()])
height += port_height * max([in_count, out_count])
if self._widgets:
wid_height = 0.0
for w in self._widgets.values():
wid_height += w.boundingRect().height()
wid_height += wid_height / len(self._widgets.values())
if wid_height > height:
height = wid_height
width += add_w
height += add_h
return width, height
def arrange_icon(self, h_offset=0.0, v_offset=0.0):
"""
Arrange node icon to the default top left of the node.
Args:
v_offset (float): vertical offset.
h_offset (float): horizontal offset.
"""
x = 2.0 + h_offset
y = 2.0 + v_offset
self._icon_item.setPos(x, y)
def arrange_label(self, h_offset=0.0, v_offset=0.0):
"""
Arrange node label to the default top center of the node.
Args:
v_offset (float): vertical offset.
h_offset (float): horizontal offset.
"""
text_rect = self._text_item.boundingRect()
text_x = (self._width / 2) - (text_rect.width() / 2)
text_x += h_offset
text_y = 1.0 + v_offset
self._text_item.setPos(text_x, text_y)
def arrange_widgets(self, v_offset=0.0):
"""
Arrange node widgets to the default center of the node.
Args:
v_offset (float): vertical offset.
"""
if not self._widgets:
return
wid_heights = sum(
[w.boundingRect().height() for w in self._widgets.values()])
pos_y = self._height / 2
pos_y -= wid_heights / 2
pos_y += v_offset
for widget in self._widgets.values():
rect = widget.boundingRect()
pos_x = (self._width / 2) - (rect.width() / 2)
widget.setPos(pos_x, pos_y)
pos_y += rect.height()
def arrange_ports(self, v_offset=0.0):
"""
Arrange input, output ports in the node layout.
Args:
v_offset (float): port vertical offset.
"""
width = self._width
txt_offset = PORT_FALLOFF - 2
spacing = 1
# adjust input position
inputs = [p for p in self.inputs if p.isVisible()]
if inputs:
port_width = inputs[0].boundingRect().width()
port_height = inputs[0].boundingRect().height()
port_x = (port_width / 2) * -1
port_y = v_offset
for port in inputs:
port.setPos(port_x, port_y)
port_y += port_height + spacing
# adjust input text position
for port, text in self._input_items.items():
if port.isVisible():
txt_x = port.boundingRect().width() / 2 - txt_offset
text.setPos(txt_x, port.y() - 1.5)
# adjust output position
outputs = [p for p in self.outputs if p.isVisible()]
if outputs:
port_width = outputs[0].boundingRect().width()
port_height = outputs[0].boundingRect().height()
port_x = width - (port_width / 2)
port_y = v_offset
for port in outputs:
port.setPos(port_x, port_y)
port_y += port_height + spacing
# adjust output text position
for port, text in self._output_items.items():
if port.isVisible():
txt_width = text.boundingRect().width() - txt_offset
txt_x = port.x() - txt_width
text.setPos(txt_x, port.y() - 1.5)
def offset_label(self, x=0.0, y=0.0):
"""
offset the label in the node layout.
Args:
x (float): horizontal x offset
y (float): vertical y offset
"""
icon_x = self._text_item.pos().x() + x
icon_y = self._text_item.pos().y() + y
self._text_item.setPos(icon_x, icon_y)
def draw_node(self):
"""
Draw the node item in the scene.
"""
height = self._text_item.boundingRect().height()
# setup initial base size.
self._set_base_size(add_w=0.0, add_h=height)
# set text color when node is initialized.
self._set_text_color(self.text_color)
# set the tooltip
self._tooltip_disable(self.disabled)
# --- setup node layout ---
# arrange label text
self.arrange_label(h_offset=0.0, v_offset=0.0)
# arrange icon
self.arrange_icon(h_offset=0.0, v_offset=0.0)
# arrange input and output ports.
self.arrange_ports(v_offset=height + (height / 2))
# arrange node widgets
self.arrange_widgets(v_offset=height / 2)
self.update()
def post_init(self, viewer=None, pos=None):
"""
Called after node has been added into the scene.
Adjust the node layout and form after the node has been added.
Args:
viewer (NodeGraphQt.widgets.viewer.NodeViewer): not used
pos (tuple): cursor position.
"""
self.draw_node()
# set initial node position.
if pos:
self.xy_pos = pos
def auto_switch_mode(self):
if ITEM_CACHE_MODE is QtWidgets.QGraphicsItem.ItemCoordinateCache:
return
rect = self.sceneBoundingRect()
l = self.viewer().mapToGlobal(self.viewer().mapFromScene(rect.topLeft()))
r = self.viewer().mapToGlobal(self.viewer().mapFromScene(rect.topRight()))
# with is the node with in screen
width = r.x() - l.x()
self.set_proxy_mode(width < self._porxy_mode_threshold)
def set_proxy_mode(self, mode):
if mode is self._proxy_mode:
return
self._proxy_mode = mode
visible = not mode
for w in self._widgets.values():
w.widget.setVisible(visible)
for port, text in self._input_items.items():
port.setVisible(visible)
text.setVisible(visible)
for pipe in port.connected_pipes:
pipe.setVisible(visible)
for port, text in self._output_items.items():
port.setVisible(visible)
text.setVisible(visible)
for pipe in port.connected_pipes:
pipe.setVisible(visible)
self._text_item.setVisible(visible)
self._icon_item.setVisible(visible)
@property
def icon(self):
return self._properties['icon']
@icon.setter
def icon(self, path=None):
self._properties['icon'] = path
path = path or ICON_NODE_BASE
pixmap = QtGui.QPixmap(path)
if pixmap.size().height() > NODE_ICON_SIZE:
pixmap = pixmap.scaledToHeight(NODE_ICON_SIZE,
QtCore.Qt.SmoothTransformation)
self._icon_item.setPixmap(pixmap)
if self.scene():
self.post_init()
self.update()
@AbstractNodeItem.width.setter
def width(self, width=0.0):
w, h = self.calc_size()
width = width if width > w else w
AbstractNodeItem.width.fset(self, width)
@AbstractNodeItem.height.setter
def height(self, height=0.0):
w, h = self.calc_size()
h = 70 if h < 70 else h
height = height if height > h else h
AbstractNodeItem.height.fset(self, height)
@AbstractNodeItem.disabled.setter
def disabled(self, state=False):
AbstractNodeItem.disabled.fset(self, state)
for n, w in self._widgets.items():
w.widget.setDisabled(state)
self._tooltip_disable(state)
self._x_item.setVisible(state)
@AbstractNodeItem.selected.setter
def selected(self, selected=False):
AbstractNodeItem.selected.fset(self, selected)
if selected:
self.hightlight_pipes()
@AbstractNodeItem.name.setter
def name(self, name=''):
AbstractNodeItem.name.fset(self, name)
self._text_item.setPlainText(name)
if self.scene():
self.draw_node()
self.update()
@AbstractNodeItem.color.setter
def color(self, color=(100, 100, 100, 255)):
AbstractNodeItem.color.fset(self, color)
if self.scene():
self.scene().update()
self.update()
@AbstractNodeItem.text_color.setter
def text_color(self, color=(100, 100, 100, 255)):
AbstractNodeItem.text_color.fset(self, color)
self._set_text_color(color)
self.update()
@property
def inputs(self):
"""
Returns:
list[PortItem]: input port graphic items.
"""
return list(self._input_items.keys())
@property
def outputs(self):
"""
Returns:
list[PortItem]: output port graphic items.
"""
return list(self._output_items.keys())
def add_input(self, name='input', multi_port=False, display_name=True):
"""
Args:
name (str): name for the port.
multi_port (bool): allow multiple connections.
display_name (bool): display the port name.
Returns:
PortItem: input item widget
"""
port = PortItem(self)
port.name = name
port.port_type = IN_PORT
port.multi_connection = multi_port
port.display_name = display_name
text = QtWidgets.QGraphicsTextItem(port.name, self)
text.font().setPointSize(8)
text.setFont(text.font())
text.setVisible(display_name)
text.setCacheMode(ITEM_CACHE_MODE)
self._input_items[port] = text
if self.scene():
self.post_init()
return port
def add_output(self, name='output', multi_port=False, display_name=True):
"""
Args:
name (str): name for the port.
multi_port (bool): allow multiple connections.
display_name (bool): display the port name.
Returns:
PortItem: output item widget
"""
port = PortItem(self)
port.name = name
port.port_type = OUT_PORT
port.multi_connection = multi_port
port.display_name = display_name
text = QtWidgets.QGraphicsTextItem(port.name, self)
text.font().setPointSize(8)
text.setFont(text.font())
text.setVisible(display_name)
text.setCacheMode(ITEM_CACHE_MODE)
self._output_items[port] = text
if self.scene():
self.post_init()
return port
def get_input_text_item(self, port_item):
"""
Args:
port_item (PortItem): port item.
Returns:
QGraphicsTextItem: graphic item used for the port text.
"""
return self._input_items[port_item]
def get_output_text_item(self, port_item):
"""
Args:
port_item (PortItem): port item.
Returns:
QGraphicsTextItem: graphic item used for the port text.
"""
return self._output_items[port_item]
@property
def widgets(self):
return self._widgets.copy()
def add_widget(self, widget):
self._widgets[widget.name] = widget
def get_widget(self, name):
widget = self._widgets.get(name)
if widget:
return widget
raise NodeWidgetError('node has no widget "{}"'.format(name))
def has_widget(self, name):
return name in self._widgets.keys()
def delete(self):
for port, text in self._input_items.items():
port.delete()
for port, text in self._output_items.items():
port.delete()
super(NodeItem, self).delete()
def from_dict(self, node_dict):
super(NodeItem, self).from_dict(node_dict)
widgets = node_dict.pop('widgets', {})
for name, value in widgets.items():
if self._widgets.get(name):
self._widgets[name].value = value
| 34.154712 | 82 | 0.565722 |
79563210ee66e8a3ef51b30587c4e4ccef13f529 | 2,985 | py | Python | src/marapp_metrics/helpers/earthengine.py | natgeosociety/marapp-metrics | f3883452fbbe4101c7a6a6481316fe78246c8be2 | [
"Apache-2.0"
] | null | null | null | src/marapp_metrics/helpers/earthengine.py | natgeosociety/marapp-metrics | f3883452fbbe4101c7a6a6481316fe78246c8be2 | [
"Apache-2.0"
] | 4 | 2020-11-25T12:03:26.000Z | 2022-01-22T13:58:02.000Z | src/marapp_metrics/helpers/earthengine.py | natgeosociety/marapp-metrics | f3883452fbbe4101c7a6a6481316fe78246c8be2 | [
"Apache-2.0"
] | 1 | 2021-02-23T18:58:10.000Z | 2021-02-23T18:58:10.000Z | """
Copyright 2018-2020 National Geographic Society
Use of this software does not constitute endorsement by National Geographic
Society (NGS). The NGS name and NGS logo may not be used for any purpose without
written permission from NGS.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import os
import ee
GOOGLE_SERVICE_ACCOUNT = os.environ.get("GOOGLE_SERVICE_ACCOUNT")
def initialize_google_ee():
"""Initialize the EE library."""
if GOOGLE_SERVICE_ACCOUNT:
credentials = ee.ServiceAccountCredentials(
None, key_data=GOOGLE_SERVICE_ACCOUNT
)
ee.Initialize(credentials)
else:
ee.Initialize()
def map_function(image, scale, reducers, keep_geom, best_effort, max_pixels, band=True):
def reducer_wrapper(feat):
geom = feat.geometry()
for key, reducer in reducers.items():
result = image.reduceRegion(
reducer=reducer,
geometry=geom,
scale=scale,
maxPixels=max_pixels,
bestEffort=best_effort,
crs="EPSG:4326",
)
if not keep_geom:
feat = feat.setGeometry(None)
if band:
result = result.get(key)
feat = feat.set({key: result})
return feat
return reducer_wrapper
def simple_mask_function(im, mask_im, **kwargs):
"""
Applies a simple mask onto im with a single QA value from mask_im.
"""
mask = None
for k, v in kwargs.items():
if str(k) == "gt":
mask = mask_im.gt(v)
elif str(k) == "gte":
mask = mask_im.gte(v)
elif str(k) == "lt":
mask = mask_im.lt(v)
elif str(k) == "lte":
mask = mask_im.lte(v)
elif str(k) == "eq":
mask = mask_im.eq(v)
elif str(k) == "eq_or":
v = list(v)
mask = mask_im.eq(v[0]).Or(mask_im.eq(v[1]))
elif str(k) == "range":
v = list(v)
mask = mask_im.gte(v[0]).And(mask_im.lt(v[1]))
if mask is not None:
return im.updateMask(mask)
def filter_fires(im):
"""
Earth engine QA filter for fires
"""
burn_dates = im.select("BurnDate")
valid_dates = burn_dates.gt(0).And(burn_dates.lt(367))
valid_qa = im.select("QA").lte(4)
# keep QA values 1-4 (5 is detection over agricultural areas)
mask = valid_dates.And(valid_qa)
return im.updateMask(mask)
| 29.85 | 88 | 0.610385 |
7956331882cd36cb0f3459af23461abe7158a399 | 21,835 | py | Python | pyevolve_graph.py | UJICompSci/ga-team-rocket | 7aecf21fb1258a954399d339d918b782b3d5b192 | [
"MIT"
] | 1 | 2017-02-08T16:54:59.000Z | 2017-02-08T16:54:59.000Z | pyevolve_graph.py | UJICompSci/ga-team-rocket | 7aecf21fb1258a954399d339d918b782b3d5b192 | [
"MIT"
] | null | null | null | pyevolve_graph.py | UJICompSci/ga-team-rocket | 7aecf21fb1258a954399d339d918b782b3d5b192 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# This code is part of Pyevolve.
# It requires matplotlib v.0.98.5.0+
from optparse import OptionParser
from optparse import OptionGroup
def graph_pop_heatmap_raw(pop, minimize, colormap="jet", filesave=None):
pylab.imshow(pop, aspect="auto", interpolation="gaussian", cmap=matplotlib.cm.__dict__[colormap])
pylab.title("Plot of pop. raw scores along the generations")
pylab.xlabel('Population')
pylab.ylabel('Generations')
pylab.grid(True)
pylab.colorbar()
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_pop_heatmap_fitness(pop, minimize, colormap="jet", filesave=None):
pylab.imshow(pop, aspect="equal", interpolation="gaussian", cmap=matplotlib.cm.__dict__[colormap])
pylab.title("Plot of pop. fitness scores along the generations")
pylab.xlabel('Population')
pylab.ylabel('Generations')
pylab.grid(True)
pylab.colorbar()
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_diff_raw(pop, minimize, filesave=None):
x = []
diff_raw_y = []
diff_fit_y = []
for it in pop:
x.append(it["generation"])
diff_raw_y.append(it["rawMax"] - it["rawMin"])
diff_fit_y.append(it["fitMax"] - it["fitMin"])
pylab.figure()
pylab.subplot(211)
pylab.plot(x, diff_raw_y, "g", label="Raw difference", linewidth=1.2)
pylab.fill_between(x, diff_raw_y, color="g", alpha=0.1)
diff_raw_max= max(diff_raw_y)
gen_max_raw = x[diff_raw_y.index(diff_raw_max)]
pylab.annotate("Maximum (%.2f)" % (diff_raw_max,), xy=(gen_max_raw, diff_raw_max), xycoords='data',
xytext=(-150, -20), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.xlabel("Generation (#)")
pylab.ylabel("Raw difference")
pylab.title("Plot of evolution identified by '%s'" % (options.identify))
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"))
pylab.subplot(212)
pylab.plot(x, diff_fit_y, "b", label="Fitness difference", linewidth=1.2)
pylab.fill_between(x, diff_fit_y, color="b", alpha=0.1)
diff_fit_max= max(diff_fit_y)
gen_max_fit = x[diff_fit_y.index(diff_fit_max)]
pylab.annotate("Maximum (%.2f)" % (diff_fit_max,), xy=(gen_max_fit, diff_fit_max), xycoords='data',
xytext=(-150, -20), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.xlabel("Generation (#)")
pylab.ylabel("Fitness difference")
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"))
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_maxmin_raw(pop, minimize, filesave=None):
x = []
max_y = []
min_y = []
std_dev_y = []
avg_y = []
for it in pop:
x.append(it["generation"])
max_y.append(it["rawMax"])
min_y.append(it["rawMin"])
std_dev_y.append(it["rawDev"])
avg_y.append(it["rawAve"])
pylab.figure()
pylab.plot(x, max_y, "g", label="Max raw", linewidth=1.2)
pylab.plot(x, min_y, "r", label="Min raw", linewidth=1.2)
pylab.plot(x, avg_y, "b", label="Avg raw", linewidth=1.2)
pylab.plot(x, std_dev_y, "k", label="Std Dev raw", linewidth=1.2)
pylab.fill_between(x, min_y, max_y, color="g", alpha=0.1, label="Diff max/min")
if minimize: raw_max = min(min_y)
else: raw_max= max(max_y)
if minimize: gen_max = x[min_y.index(raw_max)]
else: gen_max = x[max_y.index(raw_max)]
min_std = min(std_dev_y)
gen_min_std = x[std_dev_y.index(min_std)]
max_std = max(std_dev_y)
gen_max_std = x[std_dev_y.index(max_std)]
if minimize: annot_label = "Minimum (%.2f)" % (raw_max,)
else: annot_label = "Maximum (%.2f)" % (raw_max,)
pylab.annotate(annot_label, xy=(gen_max, raw_max), xycoords='data',
xytext=(8, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.annotate("Min StdDev (%.2f)" % (min_std,), xy=(gen_min_std, min_std), xycoords='data',
xytext=(8, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.annotate("Max StdDev (%.2f)" % (max_std,), xy=(gen_max_std, max_std), xycoords='data',
xytext=(8, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.xlabel("Generation (#)")
pylab.ylabel("Raw score")
pylab.title("Plot of evolution identified by '%s' (raw scores)" % (options.identify))
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"))
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_maxmin_fitness(pop, minimize, filesave=None):
x = []
max_y = []
min_y = []
avg_y = []
for it in pop:
x.append(it["generation"])
max_y.append(it["fitMax"])
min_y.append(it["fitMin"])
avg_y.append(it["fitAve"])
pylab.figure()
pylab.plot(x, max_y, "g", label="Max fitness")
pylab.plot(x, min_y, "r", label="Min fitness")
pylab.plot(x, avg_y, "b", label="Avg fitness")
pylab.fill_between(x, min_y, max_y, color="g", alpha=0.1, label="Diff max/min")
if minimize: raw_max = min(min_y)
else: raw_max = max(max_y)
if minimize: gen_max = x[min_y.index(raw_max)]
else: gen_max = x[max_y.index(raw_max)]
if minimize: annot_label = "Minimum (%.2f)" % (raw_max,)
else: annot_label = "Maximum (%.2f)" % (raw_max,)
pylab.annotate(annot_label, xy=(gen_max, raw_max), xycoords='data',
xytext=(8, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.xlabel("Generation (#)")
pylab.ylabel("Fitness score")
pylab.title("Plot of evolution identified by '%s' (fitness scores)" % (options.identify))
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"))
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_errorbars_raw(pop, minimize, filesave=None):
x = []
y = []
yerr_max = []
yerr_min = []
for it in pop:
x.append(it["generation"])
y.append(it["rawAve"])
ymax = it["rawMax"] - it["rawAve"]
ymin = it["rawAve"] - it["rawMin"]
yerr_max.append(ymax)
yerr_min.append(ymin)
pylab.figure()
pylab.errorbar(x, y, [yerr_min, yerr_max], ecolor="g")
pylab.xlabel('Generation (#)')
pylab.ylabel('Raw score Min/Avg/Max')
pylab.title("Plot of evolution identified by '%s' (raw scores)" % (options.identify))
pylab.grid(True)
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_errorbars_fitness(pop, minimize, filesave=None):
x = []
y = []
yerr_max = []
yerr_min = []
for it in pop:
x.append(it["generation"])
y.append(it["fitAve"])
ymax = it["fitMax"] - it["fitAve"]
ymin = it["fitAve"] - it["fitMin"]
yerr_max.append(ymax)
yerr_min.append(ymin)
pylab.figure()
pylab.errorbar(x, y, [yerr_min, yerr_max], ecolor="g")
pylab.xlabel('Generation (#)')
pylab.ylabel('Fitness score Min/Avg/Max')
pylab.title("Plot of evolution identified by '%s' (fitness scores)" % (options.identify))
pylab.grid(True)
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_compare_raw(pop, minimize, id_list, filesave=None):
colors_list = ["g", "b", "r", "k", "m", "y"]
index = 0
pylab.figure()
for it_out in pop:
x = []
max_y = []
min_y = []
for it in it_out:
x.append(it["generation"])
max_y.append(it["rawMax"])
min_y.append(it["rawMin"])
if minimize:
pylab.plot(x, max_y, colors_list[index], linewidth=0.05)
pylab.plot(x, min_y, colors_list[index], label="Raw min (%s)" % (id_list[index],), linewidth=1.3)
else:
pylab.plot(x, max_y, colors_list[index], label="Raw max (%s)" % (id_list[index],), linewidth=1.3)
pylab.plot(x, min_y, colors_list[index], linewidth=0.05)
pylab.fill_between(x, min_y, max_y, color=colors_list[index], alpha=0.06,)
index += 1
pylab.xlabel("Generation (#)")
pylab.ylabel("Raw score")
pylab.title("Plot of evolution identified by '%s' (raw scores)" % ('many',))
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"))
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_compare_fitness(pop, minimize, id_list, filesave=None):
colors_list = ["g", "b", "r", "k", "m", "y"]
index = 0
pylab.figure()
for it_out in pop:
x = []
max_y = []
min_y = []
for it in it_out:
x.append(it["generation"])
max_y.append(it["fitMax"])
min_y.append(it["fitMin"])
if minimize:
pylab.plot(x, max_y, colors_list[index], linewidth=0.05)
pylab.plot(x, min_y, colors_list[index], label="Fitness min (%s)" % (id_list[index],), linewidth=1.3)
else:
pylab.plot(x, max_y, colors_list[index], label="Fitness max (%s)" % (id_list[index],), linewidth=1.3)
pylab.plot(x, min_y, colors_list[index], linewidth=0.05)
pylab.fill_between(x, min_y, max_y, color=colors_list[index], alpha=0.06,)
index += 1
pylab.xlabel("Generation (#)")
pylab.ylabel("Fitness score")
pylab.title("Plot of evolution identified by '%s' (fitness scores)" % ('many',))
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"))
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
if __name__ == "__main__":
from pyevolve import __version__ as pyevolve_version
from pyevolve import __author__ as pyevolve_author
popGraph = False
print "Pyevolve %s - Graph Plot Tool" % (pyevolve_version,)
print "By %s\n" % (pyevolve_author,)
parser = OptionParser()
parser.add_option("-f", "--file", dest="dbfile",
help="Database file to read (default is 'pyevolve.db').", metavar="FILENAME", default="pyevolve.db")
parser.add_option("-i", "--identify", dest="identify",
help="The identify of evolution.", metavar="IDENTIFY")
parser.add_option("-o", "--outfile", dest="outfile",
help="""Write the graph image to a file (don't use extension, just the filename, default is png format, but you can change using --extension (-e) parameter).""",
metavar="OUTFILE")
parser.add_option("-e", "--extension", dest="extension",
help="""Graph image file format. Supported options (formats) are: emf, eps, pdf, png, ps, raw, rgba, svg, svgz. Default is 'png'.""",
metavar="EXTENSION", default="png")
parser.add_option("-g", "--genrange", dest="genrange",
help="""This is the generation range of the graph, ex: 1:30 (interval between 1 and 30).""",
metavar="GENRANGE")
parser.add_option("-l", "--lindrange", dest="lindrange",
help="""This is the individual range of the graph, ex: 1:30 (individuals between 1 and 30), only applies to heatmaps.""",
metavar="LINDRANGE")
parser.add_option("-c", "--colormap", dest="colormap",
help="""Sets the Color Map for the graph types 8 and 9. Some options are: summer, bone, gray, hot, jet, cooper, spectral. The default is 'jet'.""",
metavar="COLORMAP", default="jet")
parser.add_option("-m", "--minimize", action="store_true",
help="Sets the 'Minimize' mode, default is the Maximize mode. This option makes sense if you are minimizing your evaluation function.", dest="minimize")
group = OptionGroup(parser, "Graph types", "This is the supported graph types")
group.add_option("-0", action="store_true", help="Write all graphs to files. Graph types: 1, 2, 3, 4 and 5.", dest="all_graphs")
group.add_option("-1", action="store_true", help="Error bars graph (raw scores).", dest="errorbars_raw")
group.add_option("-2", action="store_true", help="Error bars graph (fitness scores).", dest="errorbars_fitness")
group.add_option("-3", action="store_true", help="Max/min/avg/std. dev. graph (raw scores).", dest="maxmin_raw")
group.add_option("-4", action="store_true", help="Max/min/avg graph (fitness scores).", dest="maxmin_fitness")
group.add_option("-5", action="store_true", help="Raw and Fitness min/max difference graph.", dest="diff_raw")
group.add_option("-6", action="store_true", help="Compare best raw score of two or more evolutions (you must specify the identify comma-separed list with --identify (-i) parameter, like 'one, two, three'), the maximum is 6 items.", dest="compare_raw")
group.add_option("-7", action="store_true", help="Compare best fitness score of two or more evolutions (you must specify the identify comma-separed list with --identify (-i) parameter, like 'one, two, three'), the maximum is 6 items.", dest="compare_fitness")
group.add_option("-8", action="store_true", help="Show a heat map of population raw score distribution between generations.", dest="pop_heatmap_raw")
group.add_option("-9", action="store_true", help="Show a heat map of population fitness score distribution between generations.", dest="pop_heatmap_fitness")
parser.add_option_group(group)
(options, args) = parser.parse_args()
if options.identify and (not options.errorbars_raw
and not options.errorbars_fitness
and not options.maxmin_raw
and not options.maxmin_fitness
and not options.diff_raw
and not options.all_graphs
and not options.compare_raw
and not options.pop_heatmap_raw
and not options.pop_heatmap_fitness
and not options.compare_fitness):
parser.error("You must choose one graph type !")
if (not options.identify) or (not options.dbfile):
parser.print_help()
exit()
print "Loading modules...."
import os.path
if not os.path.exists(options.dbfile):
print "Database file '%s' not found !" % (options.dbfile, )
exit()
import pylab
from matplotlib.font_manager import FontProperties
import matplotlib.cm
import sqlite3
import math
import os
print "Loading database and creating graph..."
identify_list = options.identify.split(",")
identify_list = map(str.strip, identify_list)
pop = None
if options.pop_heatmap_raw or options.pop_heatmap_fitness:
conn = sqlite3.connect(options.dbfile)
conn.row_factory = sqlite3.Row
c = conn.cursor()
if options.genrange:
genrange = options.genrange.split(":")
ret = c.execute("select distinct generation from population where identify = ? and generation between ? and ?", (options.identify, genrange[0], genrange[1]))
else:
ret = c.execute("select distinct generation from population where identify = ?", (options.identify,))
generations = ret.fetchall()
if len(generations) <= 0:
print "No generation data found for the identify '%s' !" % (options.identify,)
exit()
pop = []
for gen in generations:
pop_tmp = []
if options.lindrange:
individual_range = options.lindrange.split(":")
ret = c.execute("""
select * from population
where identify = ?
and generation = ?
and individual between ? and ?
""", (options.identify, gen[0], individual_range[0], individual_range[1]))
else:
ret = c.execute("""
select * from population
where identify = ?
and generation = ?
""", (options.identify, gen[0]))
ret_fetch = ret.fetchall()
for it in ret_fetch:
if options.pop_heatmap_raw:
pop_tmp.append(it["raw"])
else:
pop_tmp.append(it["fitness"])
pop.append(pop_tmp)
ret.close()
conn.close()
if len(pop) <= 0:
print "No statistic data found for the identify '%s' !" % (options.identify,)
exit()
print "%d generations found !" % (len(pop),)
popGraph = True
if len(identify_list) == 1 and not popGraph:
if options.compare_raw or options.compare_fitness:
parser.error("You can't use this graph type with only one identify !")
conn = sqlite3.connect(options.dbfile)
conn.row_factory = sqlite3.Row
c = conn.cursor()
if options.genrange:
genrange = options.genrange.split(":")
ret = c.execute("select * from statistics where identify = ? and generation between ? and ?", (options.identify, genrange[0], genrange[1]))
else:
ret = c.execute("select * from statistics where identify = ?", (options.identify,))
pop = ret.fetchall()
ret.close()
conn.close()
if len(pop) <= 0:
print "No statistic data found for the identify '%s' !" % (options.identify,)
exit()
print "%d generations found !" % (len(pop),)
elif len(identify_list) > 1 and not popGraph:
pop = []
if (not options.compare_raw) and (not options.compare_fitness):
parser.error("You can't use many ids with this graph type !")
conn = sqlite3.connect(options.dbfile)
conn.row_factory = sqlite3.Row
c = conn.cursor()
for item in identify_list:
if options.genrange:
genrange = options.genrange.split(":")
ret = c.execute("select * from statistics where identify = ? and generation between ? and ?", (item, genrange[0], genrange[1]))
else:
ret = c.execute("select * from statistics where identify = ?", (item,))
fetchall = ret.fetchall()
if len(fetchall) > 0:
pop.append(fetchall)
ret.close()
conn.close()
if len(pop) <= 0:
print "No statistic data found for the identify list '%s' !" % (options.identify,)
exit()
print "%d identify found !" % (len(pop),)
if options.errorbars_raw:
if options.outfile: graph_errorbars_raw(pop, options.minimize, options.outfile + "." + options.extension)
else: graph_errorbars_raw(pop, options.minimize)
if options.errorbars_fitness:
if options.outfile: graph_errorbars_fitness(pop, options.minimize, options.outfile + "." + options.extension)
else: graph_errorbars_fitness(pop, options.minimize)
if options.maxmin_raw:
if options.outfile: graph_maxmin_raw(pop, options.minimize, options.outfile + "." + options.extension)
else: graph_maxmin_raw(pop, options.minimize)
if options.maxmin_fitness:
if options.outfile: graph_maxmin_fitness(pop, options.minimize, options.outfile + "." + options.extension)
else: graph_maxmin_fitness(pop, options.minimize)
if options.diff_raw:
if options.outfile: graph_diff_raw(pop, options.minimize, options.outfile + "." + options.extension)
else: graph_diff_raw(pop, options.minimize)
if options.all_graphs:
all_graph_functions = [graph_errorbars_raw, graph_errorbars_fitness, graph_maxmin_raw,
graph_maxmin_fitness, graph_diff_raw]
if options.outfile:
parser.error("You can't specify one file to all graphs !")
dirname = "graphs_" + options.identify
if not os.path.isdir(dirname):
os.mkdir(dirname)
for graph in all_graph_functions:
filename = dirname + "/"
filename += options.identify + "_" + graph.__name__[6:]
filename += "." + options.extension
graph(pop, options.minimize, filename)
print "\n\tDone ! The graphs was saved in the directory '%s'" % (dirname)
if options.compare_raw:
if options.outfile: graph_compare_raw(pop, options.minimize, identify_list, options.outfile + "." + options.extension)
else: graph_compare_raw(pop, options.minimize, identify_list )
if options.compare_fitness:
if options.outfile: graph_compare_fitness(pop, options.minimize, identify_list, options.outfile + "." + options.extension)
else: graph_compare_fitness(pop, options.minimize, identify_list )
if options.pop_heatmap_raw:
if options.outfile: graph_pop_heatmap_raw(pop, options.minimize, options.colormap, options.outfile + "." + options.extension)
else: graph_pop_heatmap_raw(pop, options.minimize, options.colormap)
if options.pop_heatmap_fitness:
if options.outfile: graph_pop_heatmap_fitness(pop, options.minimize, options.colormap, options.outfile + "." + options.extension)
else: graph_pop_heatmap_fitness(pop, options.minimize, options.colormap)
| 35.912829 | 262 | 0.620151 |
795633d04d3efe0c39e141a64ffe0b7945aa5db9 | 2,404 | py | Python | monai/deploy/packager/package_command.py | gigony/test_readthedocs | e8c820ecda15aa59b50c1fe7674cbef00e24553d | [
"Apache-2.0"
] | null | null | null | monai/deploy/packager/package_command.py | gigony/test_readthedocs | e8c820ecda15aa59b50c1fe7674cbef00e24553d | [
"Apache-2.0"
] | null | null | null | monai/deploy/packager/package_command.py | gigony/test_readthedocs | e8c820ecda15aa59b50c1fe7674cbef00e24553d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from argparse import ArgumentParser, Namespace, _SubParsersAction
from typing import List
from monai.deploy.packager import util as packager_util
def create_package_parser(subparser: _SubParsersAction, command: str, parents: List[ArgumentParser]) -> ArgumentParser:
parser = subparser.add_parser(
command, formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=parents, add_help=False
)
parser.add_argument("application", type=str, help="MONAI application path")
parser.add_argument("--tag", "-t", required=True, type=str, help="MONAI application package tag")
parser.add_argument("--base", "-b", type=str, help="Base Application Image")
parser.add_argument("--input-dir", "-i", type=str, help="Directory mounted in container for Application Input")
parser.add_argument("--models-dir", type=str, help="Directory mounted in container for Models Path")
parser.add_argument("--model", "-m", type=str, help="Optional Path to directory containing all application models")
parser.add_argument("--no-cache", "-n", action="store_true", help="Packager will not use cache when building image")
parser.add_argument("--output-dir", "-o", type=str, help="Directory mounted in container for Application Output")
parser.add_argument("--working-dir", "-w", type=str, help="Directory mounted in container for Application")
parser.add_argument(
"--requirements",
"-r",
type=str,
help="Optional Path to requirements.txt containing package dependencies of application",
)
parser.add_argument("--timeout", type=str, help="Timeout")
parser.add_argument("--version", type=str, help="Version of the Application")
return parser
def execute_package_command(args: Namespace):
packager_util.package_application(args)
| 51.148936 | 120 | 0.742097 |
795635899080ccaa0682e15ee4999100923b4ed5 | 8,865 | py | Python | geomstats/_backend/numpy/__init__.py | mvesin/geomstats | 5bed571a57fb41c7f071db07d4f67dd623fc4f94 | [
"MIT"
] | null | null | null | geomstats/_backend/numpy/__init__.py | mvesin/geomstats | 5bed571a57fb41c7f071db07d4f67dd623fc4f94 | [
"MIT"
] | null | null | null | geomstats/_backend/numpy/__init__.py | mvesin/geomstats | 5bed571a57fb41c7f071db07d4f67dd623fc4f94 | [
"MIT"
] | 1 | 2021-03-14T06:54:09.000Z | 2021-03-14T06:54:09.000Z | """Numpy based computation backend."""
import autograd # NOQA
import autograd.numpy as np
from autograd.numpy import ( # NOQA
abs,
all,
allclose,
amax,
amin,
any,
arange,
arccos,
arccosh,
arcsin,
arctan2,
arctanh,
argmax,
argmin,
array,
broadcast_arrays,
ceil,
clip,
concatenate,
cos,
cosh,
cross,
cumprod,
cumsum,
diagonal,
divide,
dot,
dtype,
einsum,
empty,
empty_like,
equal,
exp,
expand_dims,
eye,
flip,
float32,
float64,
floor,
greater,
hsplit,
hstack,
int32,
int64,
isclose,
isnan,
less,
less_equal,
linspace,
log,
logical_and,
logical_or,
matmul,
maximum,
mean,
meshgrid,
mod,
ones,
ones_like,
outer,
power,
repeat,
reshape,
shape,
sign,
sin,
sinh,
split,
sqrt,
squeeze,
stack,
std,
sum,
tan,
tanh,
tile,
trace,
transpose,
triu_indices,
tril_indices,
searchsorted,
tril,
uint8,
vstack,
where,
zeros,
zeros_like
)
from autograd.scipy.special import polygamma # NOQA
from scipy.sparse import coo_matrix
from . import linalg # NOQA
from . import random # NOQA
from .common import to_ndarray # NOQA
from ..constants import np_atol, np_rtol
DTYPES = {
dtype('int32'): 0,
dtype('int64'): 1,
dtype('float32'): 2,
dtype('float64'): 3}
atol = np_atol
rtol = np_rtol
def to_numpy(x):
return x
def convert_to_wider_dtype(tensor_list):
dtype_list = [DTYPES[x.dtype] for x in tensor_list]
wider_dtype_index = max(dtype_list)
wider_dtype = list(DTYPES.keys())[wider_dtype_index]
tensor_list = [cast(x, dtype=wider_dtype) for x in tensor_list]
return tensor_list
def flatten(x):
return x.flatten()
def get_mask_i_float(i, n):
"""Create a 1D array of zeros with one element at one, with floating type.
Parameters
----------
i : int
Index of the non-zero element.
n: n
Length of the created array.
Returns
-------
mask_i_float : array-like, shape=[n,]
1D array of zeros except at index i, where it is one
"""
range_n = arange(n)
i_float = cast(array([i]), int32)[0]
mask_i = equal(range_n, i_float)
mask_i_float = cast(mask_i, float32)
return mask_i_float
def _is_boolean(x):
if isinstance(x, bool):
return True
if isinstance(x, (tuple, list)):
return _is_boolean(x[0])
if isinstance(x, np.ndarray):
return x.dtype == bool
return False
def _is_iterable(x):
if isinstance(x, (list, tuple)):
return True
if isinstance(x, np.ndarray):
return ndim(x) > 0
return False
def assignment(x, values, indices, axis=0):
"""Assign values at given indices of an array.
Parameters
----------
x: array-like, shape=[dim]
Initial array.
values: {float, list(float)}
Value or list of values to be assigned.
indices: {int, tuple, list(int), list(tuple)}
Single int or tuple, or list of ints or tuples of indices where value
is assigned.
If the length of the tuples is shorter than ndim(x), values are
assigned to each copy along axis.
axis: int, optional
Axis along which values are assigned, if vectorized.
Returns
-------
x_new : array-like, shape=[dim]
Copy of x with the values assigned at the given indices.
Notes
-----
If a single value is provided, it is assigned at all the indices.
If a list is given, it must have the same length as indices.
"""
x_new = copy(x)
use_vectorization = hasattr(indices, '__len__') and len(indices) < ndim(x)
if _is_boolean(indices):
x_new[indices] = values
return x_new
zip_indices = _is_iterable(indices) and _is_iterable(indices[0])
len_indices = len(indices) if _is_iterable(indices) else 1
if zip_indices:
indices = tuple(zip(*indices))
if not use_vectorization:
if not zip_indices:
len_indices = len(indices) if _is_iterable(indices) else 1
len_values = len(values) if _is_iterable(values) else 1
if len_values > 1 and len_values != len_indices:
raise ValueError('Either one value or as many values as indices')
x_new[indices] = values
else:
indices = tuple(
list(indices[:axis]) + [slice(None)] + list(indices[axis:]))
x_new[indices] = values
return x_new
def assignment_by_sum(x, values, indices, axis=0):
"""Add values at given indices of an array.
Parameters
----------
x : array-like, shape=[dim]
Initial array.
values : {float, list(float)}
Value or list of values to be assigned.
indices : {int, tuple, list(int), list(tuple)}
Single int or tuple, or list of ints or tuples of indices where value
is assigned.
If the length of the tuples is shorter than ndim(x), values are
assigned to each copy along axis.
axis: int, optional
Axis along which values are assigned, if vectorized.
Returns
-------
x_new : array-like, shape=[dim]
Copy of x with the values assigned at the given indices.
Notes
-----
If a single value is provided, it is assigned at all the indices.
If a list is given, it must have the same length as indices.
"""
x_new = copy(x)
use_vectorization = hasattr(indices, '__len__') and len(indices) < ndim(x)
if _is_boolean(indices):
x_new[indices] += values
return x_new
zip_indices = _is_iterable(indices) and _is_iterable(indices[0])
if zip_indices:
indices = tuple(zip(*indices))
if not use_vectorization:
len_indices = len(indices) if _is_iterable(indices) else 1
len_values = len(values) if _is_iterable(values) else 1
if len_values > 1 and len_values != len_indices:
raise ValueError('Either one value or as many values as indices')
x_new[indices] += values
else:
indices = tuple(
list(indices[:axis]) + [slice(None)] + list(indices[axis:]))
x_new[indices] += values
return x_new
def get_slice(x, indices):
"""Return a slice of an array, following Numpy's style.
Parameters
----------
x : array-like, shape=[dim]
Initial array.
indices : iterable(iterable(int))
Indices which are kept along each axis, starting from 0.
Returns
-------
slice : array-like
Slice of x given by indices.
Notes
-----
This follows Numpy's convention: indices are grouped by axis.
Examples
--------
>>> a = np.array(range(30)).reshape(3,10)
>>> get_slice(a, ((0, 2), (8, 9)))
array([8, 29])
"""
return x[indices]
def vectorize(x, pyfunc, multiple_args=False, signature=None, **kwargs):
if multiple_args:
return np.vectorize(pyfunc, signature=signature)(*x)
return np.vectorize(pyfunc, signature=signature)(x)
def cast(x, dtype):
return x.astype(dtype)
def set_diag(x, new_diag):
"""Set the diagonal along the last two axis.
Parameters
----------
x : array-like, shape=[dim]
Initial array.
new_diag : array-like, shape=[dim[-2]]
Values to set on the diagonal.
Returns
-------
None
Notes
-----
This mimics tensorflow.linalg.set_diag(x, new_diag), when new_diag is a
1-D array, but modifies x instead of creating a copy.
"""
arr_shape = x.shape
x[..., range(arr_shape[-2]), range(arr_shape[-1])] = new_diag
return x
def ndim(x):
return x.ndim
def copy(x):
return x.copy()
def array_from_sparse(indices, data, target_shape):
"""Create an array of given shape, with values at specific indices.
The rest of the array will be filled with zeros.
Parameters
----------
indices : iterable(tuple(int))
Index of each element which will be assigned a specific value.
data : iterable(scalar)
Value associated at each index.
target_shape : tuple(int)
Shape of the output array.
Returns
-------
a : array, shape=target_shape
Array of zeros with specified values assigned to specified indices.
"""
return array(
coo_matrix((data, list(zip(*indices))), target_shape).todense())
def erf(x):
cst_erf = 8.0 / (3.0 * np.pi) * (np.pi - 3.0) / (4.0 - np.pi)
return \
np.sign(x) * \
np.sqrt(1 - np.exp(-x * x *
(4 / np.pi + cst_erf * x * x) /
(1 + cst_erf * x * x)))
def triu_to_vec(x, k=0):
n = x.shape[-1]
rows, cols = triu_indices(n, k=k)
return x[..., rows, cols]
| 23.514589 | 78 | 0.604512 |
7956371aa42f8963443b27b5ee00bc92efc12b63 | 2,350 | py | Python | eth/vm/opcode_values.py | dbfreem/py-evm | 02a1f6f38884b1f7a89640c2095ea5b0f20687c3 | [
"MIT"
] | 1,641 | 2017-11-24T04:24:22.000Z | 2022-03-31T14:59:30.000Z | eth/vm/opcode_values.py | UniqueMR/py-evm | 026ee20f8d9b70d7c1b6a4fb9484d5489d425e54 | [
"MIT"
] | 1,347 | 2017-11-23T10:37:36.000Z | 2022-03-20T16:31:44.000Z | eth/vm/opcode_values.py | UniqueMR/py-evm | 026ee20f8d9b70d7c1b6a4fb9484d5489d425e54 | [
"MIT"
] | 567 | 2017-11-22T18:03:27.000Z | 2022-03-28T17:49:08.000Z | #
# Stop and Arithmetic
#
STOP = 0x00
ADD = 0x01
MUL = 0x02
SUB = 0x03
DIV = 0x04
SDIV = 0x05
MOD = 0x06
SMOD = 0x07
ADDMOD = 0x08
MULMOD = 0x09
EXP = 0x0a
SIGNEXTEND = 0x0b
#
# Comparison and Bitwise Logic
#
LT = 0x10
GT = 0x11
SLT = 0x12
SGT = 0x13
EQ = 0x14
ISZERO = 0x15
AND = 0x16
OR = 0x17
XOR = 0x18
NOT = 0x19
BYTE = 0x1a
SHL = 0x1b
SHR = 0x1c
SAR = 0x1d
#
# Sha3
#
SHA3 = 0x20
#
# Environment Information
#
ADDRESS = 0x30
BALANCE = 0x31
ORIGIN = 0x32
CALLER = 0x33
CALLVALUE = 0x34
CALLDATALOAD = 0x35
CALLDATASIZE = 0x36
CALLDATACOPY = 0x37
CODESIZE = 0x38
CODECOPY = 0x39
GASPRICE = 0x3a
EXTCODESIZE = 0x3b
EXTCODECOPY = 0x3c
RETURNDATASIZE = 0x3d
RETURNDATACOPY = 0x3e
EXTCODEHASH = 0x3f
# These opcodes seem to belong in the environment block, but we are out of opcode space in 0x3*
CHAINID = 0x46
SELFBALANCE = 0x47
BASEFEE = 0x48
#
# Block Information
#
BLOCKHASH = 0x40
COINBASE = 0x41
TIMESTAMP = 0x42
NUMBER = 0x43
DIFFICULTY = 0x44
GASLIMIT = 0x45
#
# Stack, Memory, Storage and Flow Operations
#
POP = 0x50
MLOAD = 0x51
MSTORE = 0x52
MSTORE8 = 0x53
SLOAD = 0x54
SSTORE = 0x55
JUMP = 0x56
JUMPI = 0x57
PC = 0x58
MSIZE = 0x59
GAS = 0x5a
JUMPDEST = 0x5b
#
# Push Operations
#
PUSH1 = 0x60
PUSH2 = 0x61
PUSH3 = 0x62
PUSH4 = 0x63
PUSH5 = 0x64
PUSH6 = 0x65
PUSH7 = 0x66
PUSH8 = 0x67
PUSH9 = 0x68
PUSH10 = 0x69
PUSH11 = 0x6a
PUSH12 = 0x6b
PUSH13 = 0x6c
PUSH14 = 0x6d
PUSH15 = 0x6e
PUSH16 = 0x6f
PUSH17 = 0x70
PUSH18 = 0x71
PUSH19 = 0x72
PUSH20 = 0x73
PUSH21 = 0x74
PUSH22 = 0x75
PUSH23 = 0x76
PUSH24 = 0x77
PUSH25 = 0x78
PUSH26 = 0x79
PUSH27 = 0x7a
PUSH28 = 0x7b
PUSH29 = 0x7c
PUSH30 = 0x7d
PUSH31 = 0x7e
PUSH32 = 0x7f
#
# Duplicate Operations
#
DUP1 = 0x80
DUP2 = 0x81
DUP3 = 0x82
DUP4 = 0x83
DUP5 = 0x84
DUP6 = 0x85
DUP7 = 0x86
DUP8 = 0x87
DUP9 = 0x88
DUP10 = 0x89
DUP11 = 0x8a
DUP12 = 0x8b
DUP13 = 0x8c
DUP14 = 0x8d
DUP15 = 0x8e
DUP16 = 0x8f
#
# Exchange Operations
#
SWAP1 = 0x90
SWAP2 = 0x91
SWAP3 = 0x92
SWAP4 = 0x93
SWAP5 = 0x94
SWAP6 = 0x95
SWAP7 = 0x96
SWAP8 = 0x97
SWAP9 = 0x98
SWAP10 = 0x99
SWAP11 = 0x9a
SWAP12 = 0x9b
SWAP13 = 0x9c
SWAP14 = 0x9d
SWAP15 = 0x9e
SWAP16 = 0x9f
#
# Logging
#
LOG0 = 0xa0
LOG1 = 0xa1
LOG2 = 0xa2
LOG3 = 0xa3
LOG4 = 0xa4
#
# System
#
CREATE = 0xf0
CALL = 0xf1
CALLCODE = 0xf2
RETURN = 0xf3
DELEGATECALL = 0xf4
CREATE2 = 0xf5
STATICCALL = 0xfa
REVERT = 0xfd
SELFDESTRUCT = 0xff
| 11.868687 | 95 | 0.7 |
79563780668a445c573bea74278acc1f8fb7803e | 12,528 | py | Python | bokeh/io/export.py | daledali/bokeh | c4f0debe7bd230d7e1aa8500716e8e997c04f528 | [
"BSD-3-Clause"
] | 1 | 2020-01-19T03:17:18.000Z | 2020-01-19T03:17:18.000Z | bokeh/io/export.py | daledali/bokeh | c4f0debe7bd230d7e1aa8500716e8e997c04f528 | [
"BSD-3-Clause"
] | 1 | 2021-05-12T10:14:45.000Z | 2021-05-12T10:14:45.000Z | bokeh/io/export.py | daledali/bokeh | c4f0debe7bd230d7e1aa8500716e8e997c04f528 | [
"BSD-3-Clause"
] | 1 | 2020-01-21T12:03:58.000Z | 2020-01-21T12:03:58.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import io
import os
import warnings
from os.path import abspath
from tempfile import mkstemp
# External imports
from PIL import Image
# Bokeh imports
from ..embed import file_html
from ..resources import INLINE_LEGACY
from .util import default_filename
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'create_webdriver',
'export_png',
'export_svgs',
'get_layout_html',
'get_screenshot_as_png',
'get_svgs',
'terminate_webdriver',
'webdriver_control',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def export_png(obj, filename=None, height=None, width=None, webdriver=None, timeout=5):
''' Export the ``LayoutDOM`` object or document as a PNG.
If the filename is not given, it is derived from the script name (e.g.
``/foo/myplot.py`` will create ``/foo/myplot.png``)
Args:
obj (LayoutDOM or Document) : a Layout (Row/Column), Plot or Widget
object or Document to export.
filename (str, optional) : filename to save document under (default: None)
If None, infer from the filename.
height (int) : the desired height of the exported layout obj only if
it's a Plot instance. Otherwise the height kwarg is ignored.
width (int) : the desired width of the exported layout obj only if
it's a Plot instance. Otherwise the width kwarg is ignored.
webdriver (selenium.webdriver) : a selenium webdriver instance to use
to export the image.
timeout (int) : the maximum amount of time (in seconds) to wait for
Bokeh to initialize (default: 5) (Added in 1.1.1).
Returns:
filename (str) : the filename where the static file is saved.
If you would like to access an Image object directly, rather than save a
file to disk, use the lower-level :func:`~bokeh.io.export.get_screenshot_as_png`
function.
.. warning::
Responsive sizing_modes may generate layouts with unexpected size and
aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
'''
image = get_screenshot_as_png(obj, height=height, width=width, driver=webdriver, timeout=timeout)
if filename is None:
filename = default_filename("png")
if image.width == 0 or image.height == 0:
raise ValueError("unable to save an empty image")
image.save(filename)
return abspath(filename)
def export_svgs(obj, filename=None, height=None, width=None, webdriver=None, timeout=5):
''' Export the SVG-enabled plots within a layout. Each plot will result
in a distinct SVG file.
If the filename is not given, it is derived from the script name
(e.g. ``/foo/myplot.py`` will create ``/foo/myplot.svg``)
Args:
obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display
filename (str, optional) : filename to save document under (default: None)
If None, infer from the filename.
height (int) : the desired height of the exported layout obj only if
it's a Plot instance. Otherwise the height kwarg is ignored.
width (int) : the desired width of the exported layout obj only if
it's a Plot instance. Otherwise the width kwarg is ignored.
webdriver (selenium.webdriver) : a selenium webdriver instance to use
to export the image.
timeout (int) : the maximum amount of time (in seconds) to wait for
Bokeh to initialize (default: 5) (Added in 1.1.1).
Returns:
filenames (list(str)) : the list of filenames where the SVGs files are
saved.
.. warning::
Responsive sizing_modes may generate layouts with unexpected size and
aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
'''
svgs = get_svgs(obj, height=height, width=width, driver=webdriver, timeout=timeout)
if len(svgs) == 0:
log.warning("No SVG Plots were found.")
return
if filename is None:
filename = default_filename("svg")
filenames = []
for i, svg in enumerate(svgs):
if i > 0:
idx = filename.find(".svg")
filename = filename[:idx] + "_{}".format(i) + filename[idx:]
with io.open(filename, mode="w", encoding="utf-8") as f:
f.write(svg)
filenames.append(filename)
return filenames
# this is part of the API for this module
from .webdriver import terminate_webdriver ; terminate_webdriver
from .webdriver import webdriver_control ; webdriver_control
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def create_webdriver():
''' Create a new webdriver.
.. note ::
Here for compatibility. Prefer methods on the webdriver_control
object.
'''
return webdriver_control.create()
def get_screenshot_as_png(obj, driver=None, timeout=5, **kwargs):
''' Get a screenshot of a ``LayoutDOM`` object.
Args:
obj (LayoutDOM or Document) : a Layout (Row/Column), Plot or Widget
object or Document to export.
driver (selenium.webdriver) : a selenium webdriver instance to use
to export the image.
timeout (int) : the maximum amount of time to wait for initialization.
It will be used as a timeout for loading Bokeh, then when waiting for
the layout to be rendered.
Returns:
cropped_image (PIL.Image.Image) : a pillow image loaded from PNG.
.. warning::
Responsive sizing_modes may generate layouts with unexpected size and
aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
'''
with _tmp_html() as tmp:
html = get_layout_html(obj, **kwargs)
with io.open(tmp.path, mode="w", encoding="utf-8") as file:
file.write(html)
web_driver = driver if driver is not None else webdriver_control.get()
web_driver.get("file:///" + tmp.path)
web_driver.maximize_window()
## resize for PhantomJS compat
web_driver.execute_script("document.body.style.width = '100%';")
wait_until_render_complete(web_driver, timeout)
png = web_driver.get_screenshot_as_png()
b_rect = web_driver.execute_script(_BOUNDING_RECT_SCRIPT)
image = Image.open(io.BytesIO(png))
cropped_image = _crop_image(image, **b_rect)
return cropped_image
def get_svgs(obj, driver=None, timeout=5, **kwargs) -> bytes:
'''
'''
with _tmp_html() as tmp:
html = get_layout_html(obj, **kwargs)
with io.open(tmp.path, mode="w", encoding="utf-8") as file:
file.write(html)
web_driver = driver if driver is not None else webdriver_control.get()
web_driver.get("file:///" + tmp.path)
wait_until_render_complete(web_driver, timeout)
svgs = web_driver.execute_script(_SVG_SCRIPT)
return svgs
def get_layout_html(obj, resources=INLINE_LEGACY, **kwargs):
'''
'''
resize = False
if kwargs.get('height') is not None or kwargs.get('width') is not None:
# Defer this import, it is expensive
from ..models.plots import Plot
if not isinstance(obj, Plot):
warnings.warn("Export method called with height or width kwargs on a non-Plot layout. The size values will be ignored.")
else:
resize = True
old_height = obj.plot_height
old_width = obj.plot_width
obj.plot_height = kwargs.get('height', old_height)
obj.plot_width = kwargs.get('width', old_width)
try:
html = file_html(obj, resources, title="", suppress_callback_warning=True, _always_new=True)
finally:
if resize:
obj.plot_height = old_height
obj.plot_width = old_width
return html
def wait_until_render_complete(driver, timeout):
'''
'''
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
def is_bokeh_loaded(driver):
return driver.execute_script('''
return typeof Bokeh !== "undefined" && Bokeh.documents != null && Bokeh.documents.length != 0
''')
try:
WebDriverWait(driver, timeout, poll_frequency=0.1).until(is_bokeh_loaded)
except TimeoutException as e:
_log_console(driver)
raise RuntimeError('Bokeh was not loaded in time. Something may have gone wrong.') from e
driver.execute_script(_WAIT_SCRIPT)
def is_bokeh_render_complete(driver):
return driver.execute_script('return window._bokeh_render_complete;')
try:
WebDriverWait(driver, timeout, poll_frequency=0.1).until(is_bokeh_render_complete)
except TimeoutException:
log.warning("The webdriver raised a TimeoutException while waiting for "
"a 'bokeh:idle' event to signify that the layout has rendered. "
"Something may have gone wrong.")
finally:
_log_console(driver)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _log_console(driver):
levels = {'WARNING', 'ERROR', 'SEVERE'}
logs = driver.get_log('browser')
messages = [ log.get("message") for log in logs if log.get('level') in levels ]
if len(messages) > 0:
log.warning("There were browser warnings and/or errors that may have affected your export")
for message in messages:
log.warning(message)
_BOUNDING_RECT_SCRIPT = """
return document.getElementsByClassName('bk-root')[0].children[0].getBoundingClientRect()
"""
_SVG_SCRIPT = """
var serialized_svgs = [];
var svgs = document.getElementsByClassName('bk-root')[0].getElementsByTagName("svg");
for (var i = 0; i < svgs.length; i++) {
var source = (new XMLSerializer()).serializeToString(svgs[i]);
serialized_svgs.push(source);
};
return serialized_svgs
"""
_WAIT_SCRIPT = """
// add private window prop to check that render is complete
window._bokeh_render_complete = false;
function done() {
window._bokeh_render_complete = true;
}
var doc = window.Bokeh.documents[0];
if (doc.is_idle)
done();
else
doc.idle.connect(done);
"""
def _crop_image(image, left=0, top=0, right=0, bottom=0, **kwargs):
''' Crop the border from the layout
'''
return image.crop((left, top, right, bottom))
class _TempFile(object):
_closed = False
def __init__(self, prefix="tmp", suffix=""):
self.fd, self.path = mkstemp(prefix=prefix, suffix=suffix)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.close()
def __del__(self):
self.close()
def close(self):
if self._closed:
return
try:
os.close(self.fd)
except (OSError, IOError):
pass
finally:
self.fd = None
try:
os.unlink(self.path)
except (OSError, IOError):
pass
finally:
self.path = None
self._closed = True
def _tmp_html():
return _TempFile(prefix="bokeh", suffix=".html")
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 31.877863 | 132 | 0.587324 |
79563789ff9cd5a2c587eb145faba96ff3283ada | 3,132 | py | Python | 5.3 - Data Collection/forum/forum_scraping/forum_scraping/settings.py | dearden/thesis_flat_earth | 2604a140ccce266d81c7d383e6e66d166e0d67f3 | [
"MIT"
] | null | null | null | 5.3 - Data Collection/forum/forum_scraping/forum_scraping/settings.py | dearden/thesis_flat_earth | 2604a140ccce266d81c7d383e6e66d166e0d67f3 | [
"MIT"
] | null | null | null | 5.3 - Data Collection/forum/forum_scraping/forum_scraping/settings.py | dearden/thesis_flat_earth | 2604a140ccce266d81c7d383e6e66d166e0d67f3 | [
"MIT"
] | null | null | null | # Scrapy settings for forum_scraping project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'forum_scraping'
SPIDER_MODULES = ['forum_scraping.spiders']
NEWSPIDER_MODULE = 'forum_scraping.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'forum_scraping (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'forum_scraping.middlewares.ForumScrapingSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'forum_scraping.middlewares.ForumScrapingDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'forum_scraping.pipelines.ForumScrapingPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 1
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 5
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 8.0
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.191011 | 102 | 0.783844 |
79563790a2f3809ab65037b146e4c35d89ac7d24 | 2,148 | py | Python | devicedata/device/tests/test_views.py | sreeks87/devicedata | 530032e949828d5949319c8889183c362133684e | [
"Apache-2.0"
] | null | null | null | devicedata/device/tests/test_views.py | sreeks87/devicedata | 530032e949828d5949319c8889183c362133684e | [
"Apache-2.0"
] | null | null | null | devicedata/device/tests/test_views.py | sreeks87/devicedata | 530032e949828d5949319c8889183c362133684e | [
"Apache-2.0"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from device.models import Device
class DeviceAPIViewTest(TestCase):
@classmethod
def setUpTestData(cls):
Device.objects.create(device_id="1dae7442-a8ac-47d8-83f9-70672cdb0694",customer_id="1dae7442-a8ac-47d8-83f9-70672cdb0694",timestamp="2021-11-20T14:25:00Z",reading="158.323")
def test_get(self):
data=self.client.get(reverse('deviceapi'))
print(data.content)
self.assertEqual(data.status_code,200)
def test_post(self):
data=self.client.post(reverse('deviceapi'),[{
"device_id": "1dae7442-a8ac-47d8-83f9-70672cdb0694",
"customer_id": "5866c3e9-89cf-4955-b7cc-b39762af5d0c",
"timestamp":"2021-11-20T14:25:00Z",
"reading":"56.32"
}],follow=True,content_type="application/json")
print(data.status_code)
self.assertEqual(data.status_code,201)
def test_post400(self):
data=self.client.post(reverse('deviceapi'),[{
"device_id": "",
"customer_id": "wewqfwfwf",
"timestamp":"2021-11-20T14:25:00Z",
"reading":"56.32"
}],follow=True,content_type="application/json")
print(data.status_code)
self.assertEqual(data.status_code,400)
def test_post400_reading(self):
data=self.client.post(reverse('deviceapi'),[{
"device_id": "1dae7442-a8ac-47d8-83f9-70672cdb0694",
"customer_id": "1dae7442-a8ac-47d8-83f9-70672cdb0694",
"timestamp":"2021-11-20T14:25:00Z",
"reading":""
}],follow=True,content_type="application/json")
print(data.status_code)
self.assertEqual(data.status_code,400)
def test_post400_reading(self):
data=self.client.post(reverse('deviceapi'),[{
"device_id": "1dae7442-a8ac-47d8-83f9-70672cdb0694",
"customer_id": "1dae7442-a8ac-47d8-83f9-70672cdb0694",
"timestamp":"",
"reading":"53.65"
}],follow=True,content_type="application/json")
print(data.status_code)
self.assertEqual(data.status_code,400) | 40.528302 | 181 | 0.638268 |
795637beaced1953cda120e9a2eed7d31928fcd1 | 1,001 | py | Python | contrib/Phidgets/PhidgetLibrary.py | nmeier/simscript | 87a2beb2694b754156730de92159d2f4425bd057 | [
"MIT"
] | 10 | 2015-03-16T14:28:36.000Z | 2021-12-21T02:04:14.000Z | libs/python-phidget/build/lib.linux-i686-2.7/Phidgets/PhidgetLibrary.py | trevstanhope/cv-drive | 03a59a3debee7b9d3df45b5277cb621bed291554 | [
"MIT"
] | 4 | 2016-10-17T14:17:39.000Z | 2021-09-18T07:09:55.000Z | libs/python-phidget/build/lib.linux-i686-2.7/Phidgets/PhidgetLibrary.py | trevstanhope/cv-drive | 03a59a3debee7b9d3df45b5277cb621bed291554 | [
"MIT"
] | 7 | 2016-10-05T03:25:00.000Z | 2021-07-30T07:52:09.000Z | """Copyright 2010 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
import threading
from ctypes import *
import sys
class PhidgetLibrary:
__dll = None
@staticmethod
def getDll():
if PhidgetLibrary.__dll is None:
if sys.platform == 'win32':
PhidgetLibrary.__dll = windll.LoadLibrary("phidget21.dll")
elif sys.platform == 'darwin':
PhidgetLibrary.__dll = cdll.LoadLibrary("/Library/Frameworks/Phidget21.framework/Versions/Current/Phidget21")
elif sys.platform == 'linux2':
PhidgetLibrary.__dll = cdll.LoadLibrary("libphidget21.so.0")
else:
raise RuntimeError("Platform not supported")
return PhidgetLibrary.__dll | 35.75 | 126 | 0.64036 |
795637f2ac8ee05e7a72fea28477b820bb7f5fc0 | 53,851 | py | Python | python/ccxt/binance.py | OliverNChalk/ccxt | fcf55e88f3523d2969f905cbed3b4deec1433a5e | [
"MIT"
] | null | null | null | python/ccxt/binance.py | OliverNChalk/ccxt | fcf55e88f3523d2969f905cbed3b4deec1433a5e | [
"MIT"
] | null | null | null | python/ccxt/binance.py | OliverNChalk/ccxt | fcf55e88f3523d2969f905cbed3b4deec1433a5e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import ROUND
class binance (Exchange):
def describe(self):
return self.deep_extend(super(binance, self).describe(), {
'id': 'binance',
'name': 'Binance',
'countries': ['JP', 'MT'], # Japan, Malta
'rateLimit': 500,
'certified': True,
# new metainfo interface
'has': {
'fetchDepositAddress': True,
'CORS': False,
'fetchBidsAsks': True,
'fetchTickers': True,
'fetchOHLCV': True,
'fetchMyTrades': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'withdraw': True,
'fetchFundingFees': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': False,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/29604020-d5483cdc-87ee-11e7-94c7-d1a8d9169293.jpg',
'api': {
'web': 'https://www.binance.com',
'wapi': 'https://api.binance.com/wapi/v3',
'sapi': 'https://api.binance.com/sapi/v1',
'public': 'https://api.binance.com/api/v1',
'private': 'https://api.binance.com/api/v3',
'v3': 'https://api.binance.com/api/v3',
'v1': 'https://api.binance.com/api/v1',
},
'www': 'https://www.binance.com',
'referral': 'https://www.binance.com/?ref=10205187',
'doc': [
'https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md',
'https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md',
],
'fees': 'https://www.binance.com/en/fee/schedule',
},
'api': {
'web': {
'get': [
'exchange/public/product',
'assetWithdraw/getAllAsset.html',
],
},
'sapi': {
'get': [
'asset/assetDividend',
],
'post': [
'asset/dust',
],
},
'wapi': {
'post': [
'withdraw',
'sub-account/transfer',
],
'get': [
'depositHistory',
'withdrawHistory',
'depositAddress',
'accountStatus',
'systemStatus',
'apiTradingStatus',
'userAssetDribbletLog',
'tradeFee',
'assetDetail',
'sub-account/list',
'sub-account/transfer/history',
'sub-account/assets',
],
},
'v3': {
'get': [
'ticker/price',
'ticker/bookTicker',
],
},
'public': {
'get': [
'ping',
'time',
'depth',
'trades',
'aggTrades',
'historicalTrades',
'klines',
'ticker/24hr',
'ticker/allPrices',
'ticker/allBookTickers',
'ticker/price',
'ticker/bookTicker',
'exchangeInfo',
],
'put': ['userDataStream'],
'post': ['userDataStream'],
'delete': ['userDataStream'],
},
'private': {
'get': [
'order',
'openOrders',
'allOrders',
'account',
'myTrades',
],
'post': [
'order',
'order/test',
],
'delete': [
'order',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.001,
'maker': 0.001,
},
},
'commonCurrencies': {
'BCC': 'BCC', # kept for backward-compatibility https://github.com/ccxt/ccxt/issues/4848
'YOYO': 'YOYOW',
},
# exchange-specific options
'options': {
'fetchTradesMethod': 'publicGetAggTrades',
'fetchTickersMethod': 'publicGetTicker24hr',
'defaultTimeInForce': 'GTC', # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
'defaultLimitOrderType': 'limit', # or 'limit_maker'
'hasAlreadyAuthenticatedSuccessfully': False,
'warnOnFetchOpenOrdersWithoutSymbol': True,
'recvWindow': 5 * 1000, # 5 sec, binance default
'timeDifference': 0, # the difference between system clock and Binance clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
'parseOrderToPrecision': False, # force amounts and costs in parseOrder to precision
'newOrderRespType': {
'market': 'FULL', # 'ACK' for order id, 'RESULT' for full order or 'FULL' for order with fills
'limit': 'RESULT', # we change it from 'ACK' by default to 'RESULT'
},
},
'exceptions': {
'API key does not exist': AuthenticationError,
'Order would trigger immediately.': InvalidOrder,
'Account has insufficient balance for requested action.': InsufficientFunds,
'Rest API trading is not enabled.': ExchangeNotAvailable,
'-1000': ExchangeNotAvailable, # {"code":-1000,"msg":"An unknown error occured while processing the request."}
'-1013': InvalidOrder, # createOrder -> 'invalid quantity'/'invalid price'/MIN_NOTIONAL
'-1021': InvalidNonce, # 'your time is ahead of server'
'-1022': AuthenticationError, # {"code":-1022,"msg":"Signature for self request is not valid."}
'-1100': InvalidOrder, # createOrder(symbol, 1, asdf) -> 'Illegal characters found in parameter 'price'
'-1104': ExchangeError, # Not all sent parameters were read, read 8 parameters but was sent 9
'-1128': ExchangeError, # {"code":-1128,"msg":"Combination of optional parameters invalid."}
'-2010': ExchangeError, # generic error code for createOrder -> 'Account has insufficient balance for requested action.', {"code":-2010,"msg":"Rest API trading is not enabled."}, etc...
'-2011': OrderNotFound, # cancelOrder(1, 'BTC/USDT') -> 'UNKNOWN_ORDER'
'-2013': OrderNotFound, # fetchOrder(1, 'BTC/USDT') -> 'Order does not exist'
'-2014': AuthenticationError, # {"code":-2014, "msg": "API-key format invalid."}
'-2015': AuthenticationError, # "Invalid API-key, IP, or permissions for action."
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
def load_time_difference(self):
response = self.publicGetTime()
after = self.milliseconds()
self.options['timeDifference'] = int(after - response['serverTime'])
return self.options['timeDifference']
def fetch_markets(self, params={}):
response = self.publicGetExchangeInfo(params)
if self.options['adjustForTimeDifference']:
self.load_time_difference()
markets = self.safe_value(response, 'symbols')
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'symbol')
# "123456" is a "test symbol/market"
if id == '123456':
continue
baseId = market['baseAsset']
quoteId = market['quoteAsset']
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
filters = self.index_by(market['filters'], 'filterType')
precision = {
'base': market['baseAssetPrecision'],
'quote': market['quotePrecision'],
'amount': market['baseAssetPrecision'],
'price': market['quotePrecision'],
}
status = self.safe_string(market, 'status')
active = (status == 'TRADING')
entry = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': -1 * math.log10(precision['amount']),
'max': None,
},
},
}
if 'PRICE_FILTER' in filters:
filter = filters['PRICE_FILTER']
# PRICE_FILTER reports zero values for maxPrice
# since they updated filter types in November 2018
# https://github.com/ccxt/ccxt/issues/4286
# therefore limits['price']['max'] doesn't have any meaningful value except None
entry['limits']['price'] = {
'min': self.safe_float(filter, 'minPrice'),
'max': None,
}
maxPrice = self.safe_float(filter, 'maxPrice')
if (maxPrice is not None) and (maxPrice > 0):
entry['limits']['price']['max'] = maxPrice
entry['precision']['price'] = self.precision_from_string(filter['tickSize'])
if 'LOT_SIZE' in filters:
filter = self.safe_value(filters, 'LOT_SIZE', {})
stepSize = self.safe_string(filter, 'stepSize')
entry['precision']['amount'] = self.precision_from_string(stepSize)
entry['limits']['amount'] = {
'min': self.safe_float(filter, 'minQty'),
'max': self.safe_float(filter, 'maxQty'),
}
if 'MIN_NOTIONAL' in filters:
entry['limits']['cost']['min'] = self.safe_float(filters['MIN_NOTIONAL'], 'minNotional')
result.append(entry)
return result
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = amount * rate
precision = market['precision']['price']
if side == 'sell':
cost *= price
else:
key = 'base'
precision = market['precision']['amount']
cost = self.decimal_to_precision(cost, ROUND, precision, self.precisionMode)
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(cost),
}
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccount(params)
result = {'info': response}
balances = self.safe_value(response, 'balances', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = balance['asset']
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'free')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default = maximum = 100
response = self.publicGetDepth(self.extend(request, params))
orderbook = self.parse_order_book(response)
orderbook['nonce'] = self.safe_integer(response, 'lastUpdateId')
return orderbook
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'closeTime')
symbol = self.find_symbol(self.safe_string(ticker, 'symbol'), market)
last = self.safe_float(ticker, 'lastPrice')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': self.safe_float(ticker, 'weightedAvgPrice'),
'open': self.safe_float(ticker, 'openPrice'),
'close': last,
'last': last,
'previousClose': self.safe_float(ticker, 'prevClosePrice'), # previous day close
'change': self.safe_float(ticker, 'priceChange'),
'percentage': self.safe_float(ticker, 'priceChangePercent'),
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
def fetch_status(self, params={}):
systemStatus = self.wapiGetSystemStatus()
status = self.safe_value(systemStatus, 'status')
if status is not None:
self.status = self.extend(self.status, {
'status': status == 'ok' if 0 else 'maintenance',
'updated': self.milliseconds(),
})
return self.status
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTicker24hr(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_tickers(self, rawTickers, symbols=None):
tickers = []
for i in range(0, len(rawTickers)):
tickers.append(self.parse_ticker(rawTickers[i]))
return self.filter_by_array(tickers, 'symbol', symbols)
def fetch_bids_asks(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickerBookTicker(params)
return self.parse_tickers(response, symbols)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
method = self.options['fetchTickersMethod']
response = getattr(self, method)(params)
return self.parse_tickers(response, symbols)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0],
float(ohlcv[1]),
float(ohlcv[2]),
float(ohlcv[3]),
float(ohlcv[4]),
float(ohlcv[5]),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default == max == 500
response = self.publicGetKlines(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
if 'isDustTrade' in trade:
return self.parse_dust_trade(trade, market)
#
# aggregate trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
#
# {
# "a": 26129, # Aggregate tradeId
# "p": "0.01633102", # Price
# "q": "4.70443515", # Quantity
# "f": 27781, # First tradeId
# "l": 27781, # Last tradeId
# "T": 1498793709153, # Timestamp
# "m": True, # Was the buyer the maker?
# "M": True # Was the trade the best price match?
# }
#
# recent public trades and old public trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#old-trade-lookup-market_data
#
# {
# "id": 28457,
# "price": "4.00000100",
# "qty": "12.00000000",
# "time": 1499865549590,
# "isBuyerMaker": True,
# "isBestMatch": True
# }
#
# private trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-trade-list-user_data
#
# {
# "symbol": "BNBBTC",
# "id": 28457,
# "orderId": 100234,
# "price": "4.00000100",
# "qty": "12.00000000",
# "commission": "10.10000000",
# "commissionAsset": "BNB",
# "time": 1499865549590,
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
#
timestamp = self.safe_integer_2(trade, 'T', 'time')
price = self.safe_float_2(trade, 'p', 'price')
amount = self.safe_float_2(trade, 'q', 'qty')
id = self.safe_string_2(trade, 'a', 'id')
side = None
orderId = self.safe_string(trade, 'orderId')
if 'm' in trade:
side = 'sell' if trade['m'] else 'buy' # self is reversed intentionally
elif 'isBuyerMaker' in trade:
side = 'sell' if trade['isBuyerMaker'] else 'buy'
else:
if 'isBuyer' in trade:
side = 'buy' if (trade['isBuyer']) else 'sell' # self is a True side
fee = None
if 'commission' in trade:
fee = {
'cost': self.safe_float(trade, 'commission'),
'currency': self.safe_currency_code(self.safe_string(trade, 'commissionAsset')),
}
takerOrMaker = None
if 'isMaker' in trade:
takerOrMaker = 'maker' if trade['isMaker'] else 'taker'
symbol = None
if market is None:
marketId = self.safe_string(trade, 'symbol')
market = self.safe_value(self.markets_by_id, marketId)
if market is not None:
symbol = market['symbol']
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': price * amount,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'fromId': 123, # ID to get aggregate trades from INCLUSIVE.
# 'startTime': 456, # Timestamp in ms to get aggregate trades from INCLUSIVE.
# 'endTime': 789, # Timestamp in ms to get aggregate trades until INCLUSIVE.
# 'limit': 500, # default = 500, maximum = 1000
}
if self.options['fetchTradesMethod'] == 'publicGetAggTrades':
if since is not None:
request['startTime'] = since
request['endTime'] = self.sum(since, 3600000)
if limit is not None:
request['limit'] = limit # default = 500, maximum = 1000
#
# Caveats:
# - default limit(500) applies only if no other parameters set, trades up
# to the maximum limit may be returned to satisfy other parameters
# - if both limit and time window is set and time window contains more
# trades than the limit then the last trades from the window are returned
# - 'tradeId' accepted and returned by self method is "aggregate" trade id
# which is different from actual trade id
# - setting both fromId and time window results in error
method = self.safe_value(self.options, 'fetchTradesMethod', 'publicGetTrades')
response = getattr(self, method)(self.extend(request, params))
#
# aggregate trades
#
# [
# {
# "a": 26129, # Aggregate tradeId
# "p": "0.01633102", # Price
# "q": "4.70443515", # Quantity
# "f": 27781, # First tradeId
# "l": 27781, # Last tradeId
# "T": 1498793709153, # Timestamp
# "m": True, # Was the buyer the maker?
# "M": True # Was the trade the best price match?
# }
# ]
#
# recent public trades and historical public trades
#
# [
# {
# "id": 28457,
# "price": "4.00000100",
# "qty": "12.00000000",
# "time": 1499865549590,
# "isBuyerMaker": True,
# "isBestMatch": True
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'NEW': 'open',
'PARTIALLY_FILLED': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
'PENDING_CANCEL': 'canceling', # currently unused
'REJECTED': 'rejected',
'EXPIRED': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = self.find_symbol(self.safe_string(order, 'symbol'), market)
timestamp = None
if 'time' in order:
timestamp = self.safe_integer(order, 'time')
elif 'transactTime' in order:
timestamp = self.safe_integer(order, 'transactTime')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'origQty')
filled = self.safe_float(order, 'executedQty')
remaining = None
cost = self.safe_float(order, 'cummulativeQuoteQty')
if filled is not None:
if amount is not None:
remaining = amount - filled
if self.options['parseOrderToPrecision']:
remaining = float(self.amount_to_precision(symbol, remaining))
remaining = max(remaining, 0.0)
if price is not None:
if cost is None:
cost = price * filled
id = self.safe_string(order, 'orderId')
type = self.safe_string_lower(order, 'type')
if type == 'market':
if price == 0.0:
if (cost is not None) and (filled is not None):
if (cost > 0) and (filled > 0):
price = cost / filled
side = self.safe_string_lower(order, 'side')
fee = None
trades = None
fills = self.safe_value(order, 'fills')
if fills is not None:
trades = self.parse_trades(fills, market)
numTrades = len(trades)
if numTrades > 0:
cost = trades[0]['cost']
fee = {
'cost': trades[0]['fee']['cost'],
'currency': trades[0]['fee']['currency'],
}
for i in range(1, len(trades)):
cost = self.sum(cost, trades[i]['cost'])
fee['cost'] = self.sum(fee['cost'], trades[i]['fee']['cost'])
average = None
if cost is not None:
if filled:
average = cost / filled
if self.options['parseOrderToPrecision']:
cost = float(self.cost_to_precision(symbol, cost))
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
# the next 5 lines are added to support for testing orders
method = 'privatePostOrder'
test = self.safe_value(params, 'test', False)
if test:
method += 'Test'
params = self.omit(params, 'test')
uppercaseType = type.upper()
newOrderRespType = self.safe_value(self.options['newOrderRespType'], type, 'RESULT')
request = {
'symbol': market['id'],
'quantity': self.amount_to_precision(symbol, amount),
'type': uppercaseType,
'side': side.upper(),
'newOrderRespType': newOrderRespType, # 'ACK' for order id, 'RESULT' for full order or 'FULL' for order with fills
}
timeInForceIsRequired = False
priceIsRequired = False
stopPriceIsRequired = False
if uppercaseType == 'LIMIT':
priceIsRequired = True
timeInForceIsRequired = True
elif (uppercaseType == 'STOP_LOSS') or (uppercaseType == 'TAKE_PROFIT'):
stopPriceIsRequired = True
elif (uppercaseType == 'STOP_LOSS_LIMIT') or (uppercaseType == 'TAKE_PROFIT_LIMIT'):
stopPriceIsRequired = True
priceIsRequired = True
timeInForceIsRequired = True
elif uppercaseType == 'LIMIT_MAKER':
priceIsRequired = True
if priceIsRequired:
if price is None:
raise InvalidOrder(self.id + ' createOrder method requires a price argument for a ' + type + ' order')
request['price'] = self.price_to_precision(symbol, price)
if timeInForceIsRequired:
request['timeInForce'] = self.options['defaultTimeInForce'] # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
if stopPriceIsRequired:
stopPrice = self.safe_float(params, 'stopPrice')
if stopPrice is None:
raise InvalidOrder(self.id + ' createOrder method requires a stopPrice extra param for a ' + type + ' order')
else:
params = self.omit(params, 'stopPrice')
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
response = getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
origClientOrderId = self.safe_value(params, 'origClientOrderId')
request = {
'symbol': market['id'],
}
if origClientOrderId is not None:
request['origClientOrderId'] = origClientOrderId
else:
request['orderId'] = int(id)
response = self.privateGetOrder(self.extend(request, params))
return self.parse_order(response, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = self.privateGetAllOrders(self.extend(request, params))
#
# [
# {
# "symbol": "LTCBTC",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "0.0",
# "cummulativeQuoteQty": "0.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "icebergQty": "0.0",
# "time": 1499827319559,
# "updateTime": 1499827319559,
# "isWorking": True
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
elif self.options['warnOnFetchOpenOrdersWithoutSymbol']:
symbols = self.symbols
numSymbols = len(symbols)
fetchOpenOrdersRateLimit = int(numSymbols / 2)
raise ExchangeError(self.id + ' fetchOpenOrders WARNING: fetching open orders without specifying a symbol is rate-limited to one call per ' + str(fetchOpenOrdersRateLimit) + ' seconds. Do not call self method frequently to avoid ban. Set ' + self.id + '.options["warnOnFetchOpenOrdersWithoutSymbol"] = False to suppress self warning message.')
response = self.privateGetOpenOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'orderId': int(id),
# 'origClientOrderId': id,
}
response = self.privateDeleteOrder(self.extend(request, params))
return self.parse_order(response)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit
response = self.privateGetMyTrades(self.extend(request, params))
#
# [
# {
# "symbol": "BNBBTC",
# "id": 28457,
# "orderId": 100234,
# "price": "4.00000100",
# "qty": "12.00000000",
# "commission": "10.10000000",
# "commissionAsset": "BNB",
# "time": 1499865549590,
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_my_dust_trades(self, symbol=None, since=None, limit=None, params={}):
#
# Bianance provides an opportunity to trade insignificant(i.e. non-tradable and non-withdrawable)
# token leftovers(of any asset) into `BNB` coin which in turn can be used to pay trading fees with it.
# The corresponding trades history is called the `Dust Log` and can be requested via the following end-point:
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#dustlog-user_data
#
self.load_markets()
response = self.wapiGetUserAssetDribbletLog(params)
# {success: True,
# results: {total: 1,
# rows: [{ transfered_total: "1.06468458",
# service_charge_total: "0.02172826",
# tran_id: 2701371634,
# logs: [{ tranId: 2701371634,
# serviceChargeAmount: "0.00012819",
# uid: "35103861",
# amount: "0.8012",
# operateTime: "2018-10-07 17:56:07",
# transferedAmount: "0.00628141",
# fromAsset: "ADA" }],
# operate_time: "2018-10-07 17:56:06" }]} }
results = self.safe_value(response, 'results', {})
rows = self.safe_value(results, 'rows', [])
data = []
for i in range(0, len(rows)):
logs = rows[i]['logs']
for j in range(0, len(logs)):
logs[j]['isDustTrade'] = True
data.append(logs[j])
trades = self.parse_trades(data, None, since, limit)
return self.filter_by_since_limit(trades, since, limit)
def parse_dust_trade(self, trade, market=None):
# { tranId: 2701371634,
# serviceChargeAmount: "0.00012819",
# uid: "35103861",
# amount: "0.8012",
# operateTime: "2018-10-07 17:56:07",
# transferedAmount: "0.00628141",
# fromAsset: "ADA" },
orderId = self.safe_string(trade, 'tranId')
timestamp = self.parse8601(self.safe_string(trade, 'operateTime'))
tradedCurrency = self.safe_currency_code(self.safe_string(trade, 'fromAsset'))
earnedCurrency = self.currency('BNB')['code']
applicantSymbol = earnedCurrency + '/' + tradedCurrency
tradedCurrencyIsQuote = False
if applicantSymbol in self.markets:
tradedCurrencyIsQuote = True
#
# Warning
# Binance dust trade `fee` is already excluded from the `BNB` earning reported in the `Dust Log`.
# So the parser should either set the `fee.cost` to `0` or add it on top of the earned
# BNB `amount`(or `cost` depending on the trade `side`). The second of the above options
# is much more illustrative and therefore preferable.
#
fee = {
'currency': earnedCurrency,
'cost': self.safe_float(trade, 'serviceChargeAmount'),
}
symbol = None
amount = None
cost = None
side = None
if tradedCurrencyIsQuote:
symbol = applicantSymbol
amount = self.sum(self.safe_float(trade, 'transferedAmount'), fee['cost'])
cost = self.safe_float(trade, 'amount')
side = 'buy'
else:
symbol = tradedCurrency + '/' + earnedCurrency
amount = self.safe_float(trade, 'amount')
cost = self.sum(self.safe_float(trade, 'transferedAmount'), fee['cost'])
side = 'sell'
price = None
if cost is not None:
if amount:
price = cost / amount
id = None
type = None
takerOrMaker = None
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'amount': amount,
'price': price,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTime'] = since
response = self.wapiGetDepositHistory(self.extend(request, params))
#
# { success: True,
# depositList: [{insertTime: 1517425007000,
# amount: 0.3,
# address: "0x0123456789abcdef",
# addressTag: "",
# txId: "0x0123456789abcdef",
# asset: "ETH",
# status: 1 }]}
#
return self.parseTransactions(response['depositList'], currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTime'] = since
response = self.wapiGetWithdrawHistory(self.extend(request, params))
#
# {withdrawList: [{ amount: 14,
# address: "0x0123456789abcdef...",
# successTime: 1514489710000,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ETH",
# applyTime: 1514488724000,
# status: 6 },
# { amount: 7600,
# address: "0x0123456789abcdef...",
# successTime: 1515323226000,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ICN",
# applyTime: 1515322539000,
# status: 6 } ],
# success: True }
#
return self.parseTransactions(response['withdrawList'], currency, since, limit)
def parse_transaction_status_by_type(self, status, type=None):
if type is None:
return status
statuses = {
'deposit': {
'0': 'pending',
'1': 'ok',
},
'withdrawal': {
'0': 'pending', # Email Sent
'1': 'canceled', # Cancelled(different from 1 = ok in deposits)
'2': 'pending', # Awaiting Approval
'3': 'failed', # Rejected
'4': 'pending', # Processing
'5': 'failed', # Failure
'6': 'ok', # Completed
},
}
return statuses[type][status] if (status in list(statuses[type].keys())) else status
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
# {insertTime: 1517425007000,
# amount: 0.3,
# address: "0x0123456789abcdef",
# addressTag: "",
# txId: "0x0123456789abcdef",
# asset: "ETH",
# status: 1 }
#
# fetchWithdrawals
#
# { amount: 14,
# address: "0x0123456789abcdef...",
# successTime: 1514489710000,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ETH",
# applyTime: 1514488724000,
# status: 6 }
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'addressTag') # set but unused
if tag is not None:
if len(tag) < 1:
tag = None
txid = self.safe_value(transaction, 'txId')
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
timestamp = None
insertTime = self.safe_integer(transaction, 'insertTime')
applyTime = self.safe_integer(transaction, 'applyTime')
type = self.safe_string(transaction, 'type')
if type is None:
if (insertTime is not None) and (applyTime is None):
type = 'deposit'
timestamp = insertTime
elif (insertTime is None) and (applyTime is not None):
type = 'withdrawal'
timestamp = applyTime
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'status'), type)
amount = self.safe_float(transaction, 'amount')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': None,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.wapiGetDepositAddress(self.extend(request, params))
success = self.safe_value(response, 'success')
if (success is None) or not success:
raise InvalidAddress(self.id + ' fetchDepositAddress returned an empty response – create the deposit address in the user settings first.')
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'addressTag')
self.check_address(address)
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def fetch_funding_fees(self, codes=None, params={}):
response = self.wapiGetAssetDetail(params)
#
# {
# "success": True,
# "assetDetail": {
# "CTR": {
# "minWithdrawAmount": "70.00000000", #min withdraw amount
# "depositStatus": False,//deposit status
# "withdrawFee": 35, # withdraw fee
# "withdrawStatus": True, #withdraw status
# "depositTip": "Delisted, Deposit Suspended" #reason
# },
# "SKY": {
# "minWithdrawAmount": "0.02000000",
# "depositStatus": True,
# "withdrawFee": 0.01,
# "withdrawStatus": True
# }
# }
# }
#
detail = self.safe_value(response, 'assetDetail', {})
ids = list(detail.keys())
withdrawFees = {}
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
withdrawFees[code] = self.safe_float(detail[id], 'withdrawFee')
return {
'withdraw': withdrawFees,
'deposit': {},
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
name = address[0:20]
request = {
'asset': currency['id'],
'address': address,
'amount': float(amount),
'name': name,
}
if tag is not None:
request['addressTag'] = tag
response = self.wapiPostWithdraw(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'id'),
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
url += '/' + path
if api == 'wapi':
url += '.html'
userDataStream = (path == 'userDataStream')
if path == 'historicalTrades':
headers = {
'X-MBX-APIKEY': self.apiKey,
}
elif userDataStream:
# v1 special case for userDataStream
body = self.urlencode(params)
headers = {
'X-MBX-APIKEY': self.apiKey,
'Content-Type': 'application/x-www-form-urlencoded',
}
if (api == 'private') or (api == 'sapi') or (api == 'wapi' and path != 'systemStatus'):
self.check_required_credentials()
query = self.urlencode(self.extend({
'timestamp': self.nonce(),
'recvWindow': self.options['recvWindow'],
}, params))
signature = self.hmac(self.encode(query), self.encode(self.secret))
query += '&' + 'signature=' + signature
headers = {
'X-MBX-APIKEY': self.apiKey,
}
if (method == 'GET') or (method == 'DELETE') or (api == 'wapi'):
url += '?' + query
else:
body = query
headers['Content-Type'] = 'application/x-www-form-urlencoded'
else:
# userDataStream endpoints are public, but POST, PUT, DELETE
# therefore they don't accept URL query arguments
# https://github.com/ccxt/ccxt/issues/5224
if not userDataStream:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 418) or (code == 429):
raise DDoSProtection(self.id + ' ' + str(code) + ' ' + reason + ' ' + body)
# error response in a form: {"code": -1013, "msg": "Invalid quantity."}
# following block cointains legacy checks against message patterns in "msg" property
# will switch "code" checks eventually, when we know all of them
if code >= 400:
if body.find('Price * QTY is zero or less') >= 0:
raise InvalidOrder(self.id + ' order cost = amount * price is zero or less ' + body)
if body.find('LOT_SIZE') >= 0:
raise InvalidOrder(self.id + ' order amount should be evenly divisible by lot size ' + body)
if body.find('PRICE_FILTER') >= 0:
raise InvalidOrder(self.id + ' order price is invalid, i.e. exceeds allowed price precision, exceeds min price or max price limits or is invalid float value in general, use self.price_to_precision(symbol, amount) ' + body)
if len(body) > 0:
if body[0] == '{':
# check success value for wapi endpoints
# response in format {'msg': 'The coin does not exist.', 'success': True/false}
success = self.safe_value(response, 'success', True)
if not success:
message = self.safe_string(response, 'msg')
parsedMessage = None
if message is not None:
try:
parsedMessage = json.loads(message)
except Exception as e:
# do nothing
parsedMessage = None
if parsedMessage is not None:
response = parsedMessage
exceptions = self.exceptions
message = self.safe_string(response, 'msg')
if message in exceptions:
ExceptionClass = exceptions[message]
raise ExceptionClass(self.id + ' ' + message)
# checks against error codes
error = self.safe_string(response, 'code')
if error is not None:
if error in exceptions:
# a workaround for {"code":-2015,"msg":"Invalid API-key, IP, or permissions for action."}
# despite that their message is very confusing, it is raised by Binance
# on a temporary ban(the API key is valid, but disabled for a while)
if (error == '-2015') and self.options['hasAlreadyAuthenticatedSuccessfully']:
raise DDoSProtection(self.id + ' temporary banned: ' + body)
raise exceptions[error](self.id + ' ' + body)
else:
raise ExchangeError(self.id + ' ' + body)
if not success:
raise ExchangeError(self.id + ' ' + body)
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
# a workaround for {"code":-2015,"msg":"Invalid API-key, IP, or permissions for action."}
if (api == 'private') or (api == 'wapi'):
self.options['hasAlreadyAuthenticatedSuccessfully'] = True
return response
| 43.674777 | 355 | 0.494828 |
7956387fe8ade1233a9c477bcf327e74e50f7435 | 1,502 | py | Python | software/serial_read_versions.py | ccimponeriu1996/Haasoscope | 350f8766017e3a97b276ff5982e6519400b3a8c7 | [
"MIT"
] | 89 | 2017-12-04T06:40:47.000Z | 2022-03-21T23:40:32.000Z | software/serial_read_versions.py | ccimponeriu1996/Haasoscope | 350f8766017e3a97b276ff5982e6519400b3a8c7 | [
"MIT"
] | 49 | 2018-05-01T21:01:07.000Z | 2022-02-11T17:10:59.000Z | software/serial_read_versions.py | ccimponeriu1996/Haasoscope | 350f8766017e3a97b276ff5982e6519400b3a8c7 | [
"MIT"
] | 32 | 2018-05-08T17:11:15.000Z | 2022-02-20T08:20:28.000Z | from serial import Serial
from struct import unpack
import time
serialtimeout=1.0
ser=Serial("COM5",1500000,timeout=serialtimeout)
waitlittle = .1 #seconds
#This is a _minimal_ set of example commands needed to send to a board to initialize it properly
ser.write(chr(0)); ser.write(chr(20)) #set board id to 0
time.sleep(waitlittle)
ser.write(chr(135)); ser.write(chr(0)); ser.write(chr(100)); #serialdelaytimerwait of 100
time.sleep(waitlittle)
oldtime=time.time()
boa=0 # board to get ID from
ser.write(chr(30+boa)) #make the next board active (serial_passthrough 0)
ser.write(chr(142)) #request the unique ID
rslt = ser.read(8)
byte_array = unpack('%dB'%len(rslt),rslt) #Convert serial data to array of numbers
uniqueID = ''.join(format(x, '02x') for x in byte_array)
print "got uniqueID",uniqueID,"for board",boa," in",round((time.time()-oldtime)*1000.,2),"ms"
oldtime=time.time()
boa=0 # board to get firmware version from
ser.write(chr(30+boa)) #make the next board active (serial_passthrough 0)
ser.write(chr(147)) #request the firmware version byte
ser.timeout=0.1; rslt = ser.read(1); ser.timeout=serialtimeout # reduce the serial timeout temporarily, since the old firmware versions will return nothing for command 147
byte_array = unpack('%dB'%len(rslt),rslt)
firmwareversion=0
if len(byte_array)>0: firmwareversion=byte_array[0]
print "got firmwareversion",firmwareversion,"for board",boa,"in",round((time.time()-oldtime)*1000.,2),"ms"
ser.close()
| 41.722222 | 172 | 0.73502 |
795638979a9dc6a1784c87b7d29f6bca2eed9cbc | 6,265 | py | Python | mumax3c/tests/test_util.py | ubermag/mumaxc | 73098ada436cb4e16699f33068a8391a679b49e9 | [
"BSD-3-Clause"
] | null | null | null | mumax3c/tests/test_util.py | ubermag/mumaxc | 73098ada436cb4e16699f33068a8391a679b49e9 | [
"BSD-3-Clause"
] | null | null | null | mumax3c/tests/test_util.py | ubermag/mumaxc | 73098ada436cb4e16699f33068a8391a679b49e9 | [
"BSD-3-Clause"
] | null | null | null | import discretisedfield as df
import micromagneticmodel as mm
import numpy as np
import pytest
import mumax3c as mc
def test_mumax3_regions__no_subregion():
mesh = df.Mesh(p1=(0, 0, 0), p2=(2, 2, 2), cell=(1, 1, 1))
system = mm.System(name="test")
system.m = df.Field(mesh, dim=3, value=(0, 0, 1), norm=1)
subregion_values, subregions_dict = mc.scripts.util._identify_subregions(system)
assert np.allclose(subregion_values, 0)
assert len(subregions_dict) == 1
mc.scripts.mumax3_regions(system)
subregions = df.Field.fromfile("mumax3_regions.omf")
assert np.allclose(subregions.array, 0.0)
assert hasattr(system, "region_relator")
assert system.region_relator == {"": [0]}
def test_mumax3_regions__two_subregions():
subregions = {
"r1": df.Region(p1=(0, 0, 0), p2=(2, 2, 1)),
"r2": df.Region(p1=(0, 0, 1), p2=(2, 2, 2)),
}
mesh = df.Mesh(p1=(0, 0, 0), p2=(2, 2, 2), cell=(1, 1, 1), subregions=subregions)
system = mm.System(name="test")
system.m = df.Field(mesh, dim=3, value=(0, 0, 1), norm=1)
subregion_values, subregions_dict = mc.scripts.util._identify_subregions(system)
assert subregion_values[0, 0, 0, 0] == 1
assert subregion_values[1, 1, 1, 0] == 2
assert len(subregions_dict) == 3
assert subregions_dict == {0: "", 1: "r1", 2: "r2"}
mc.scripts.util.mumax3_regions(system)
subregions = df.Field.fromfile("mumax3_regions.omf")
assert np.allclose(np.unique(subregions.array), [0.0, 1.0])
assert hasattr(system, "region_relator")
assert system.region_relator == {"": [], "r1": [0], "r2": [1]}
def test_mumax3_regions__two_subregions_gap_ms():
subregions = {
"r1": df.Region(p1=(0, 0, 0), p2=(2, 2, 1)),
"r2": df.Region(p1=(0, 0, 2), p2=(2, 2, 3)),
}
mesh = df.Mesh(p1=(0, 0, 0), p2=(2, 2, 3), cell=(1, 1, 1), subregions=subregions)
system = mm.System(name="test")
def ms_fun(pos):
x, _, _ = pos
return x
system.m = df.Field(mesh, dim=3, value=(0, 0, 1), norm=ms_fun)
subregion_values, subregions_dict = mc.scripts.util._identify_subregions(system)
assert subregion_values[0, 0, 0, 0] == 1
assert subregion_values[1, 1, 1, 0] == 0
assert subregion_values[1, 1, 2, 0] == 2
assert len(subregions_dict) == 3
assert subregions_dict == {0: "", 1: "r1", 2: "r2"}
mc.scripts.util.mumax3_regions(system)
subregions = df.Field.fromfile("mumax3_regions.omf")
assert np.allclose(np.unique(subregions.array), [0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
assert hasattr(system, "region_relator")
assert system.region_relator == {"": [0, 1], "r1": [2, 3], "r2": [4, 5]}
def ms_fun(pos):
x, _, _ = pos
if x < 1:
return 0
return 1
system.m = df.Field(mesh, dim=3, value=(0, 0, 1), norm=ms_fun)
mc.scripts.util.mumax3_regions(system)
subregions = df.Field.fromfile("mumax3_regions.omf")
assert np.allclose(np.unique(subregions.array), [0, 1, 2, 255])
assert hasattr(system, "region_relator")
assert system.region_relator == {"": [0], "r1": [1], "r2": [2]}
def test_identify_subregions__two_overlaping_subregions():
subregions = {
"r1": df.Region(p1=(0, 0, 0), p2=(2, 2, 1)),
"r2": df.Region(p1=(0, 0, 0), p2=(2, 2, 2)),
}
mesh = df.Mesh(p1=(0, 0, 0), p2=(2, 2, 2), cell=(1, 1, 1), subregions=subregions)
system = mm.System(name="test")
system.m = df.Field(mesh, dim=3, value=(0, 0, 1), norm=1)
subregion_values, subregions_dict = mc.scripts.util._identify_subregions(system)
assert subregion_values[0, 0, 0, 0] == 1
assert subregion_values[1, 1, 1, 0] == 2
assert len(subregions_dict) == 3
mc.scripts.util.mumax3_regions(system)
subregions = df.Field.fromfile("mumax3_regions.omf")
assert np.allclose(np.unique(subregions.array), [0, 1])
assert hasattr(system, "region_relator")
assert system.region_relator == {"": [], "r1": [0], "r2": [1]}
subregions = {
"r1": df.Region(p1=(0, 0, 0), p2=(2, 2, 2)),
"r2": df.Region(p1=(0, 0, 0), p2=(2, 2, 1)),
}
mesh = df.Mesh(p1=(0, 0, 0), p2=(2, 2, 2), cell=(1, 1, 1), subregions=subregions)
system = mm.System(name="test")
system.m = df.Field(mesh, dim=3, value=(0, 0, 1), norm=1)
subregion_values, subregions_dict = mc.scripts.util._identify_subregions(system)
assert subregion_values[0, 0, 0, 0] == 1
assert subregion_values[1, 1, 1, 0] == 1
assert len(subregions_dict) == 3
mc.scripts.util.mumax3_regions(system)
subregions = df.Field.fromfile("mumax3_regions.omf")
assert np.allclose(np.unique(subregions.array), [0])
assert hasattr(system, "region_relator")
assert system.region_relator == {"": [], "r1": [0], "r2": []}
def test_identify_subregions__three_subregions():
subregions = {
"r1": df.Region(p1=(0, 0, 0), p2=(2, 2, 1)),
"r2": df.Region(p1=(0, 0, 1), p2=(2, 2, 2)),
"r3": df.Region(p1=(0, 0, 2), p2=(2, 2, 3)),
}
mesh = df.Mesh(p1=(0, 0, 0), p2=(2, 2, 3), cell=(1, 1, 1), subregions=subregions)
system = mm.System(name="test")
system.m = df.Field(mesh, dim=3, value=(0, 0, 1), norm=1)
subregion_values, subregions_dict = mc.scripts.util._identify_subregions(system)
assert subregion_values[0, 0, 0, 0] == 1
assert subregion_values[1, 1, 1, 0] == 2
assert subregion_values[1, 1, 2, 0] == 3
assert len(subregions_dict) == 4
mc.scripts.util.mumax3_regions(system)
subregions = df.Field.fromfile("mumax3_regions.omf")
assert np.allclose(np.unique(subregions.array), [0, 1, 2])
assert hasattr(system, "region_relator")
assert system.region_relator == {"": [], "r1": [0], "r2": [1], "r3": [2]}
def test_mumax3_regions__too_many_ms():
subregions = {
"r1": df.Region(p1=(0, 0, 0), p2=(200, 2, 1)),
"r2": df.Region(p1=(0, 0, 2), p2=(200, 2, 3)),
}
mesh = df.Mesh(p1=(0, 0, 0), p2=(200, 2, 3), cell=(1, 1, 1), subregions=subregions)
system = mm.System(name="test")
def ms_fun(pos):
x, y, z = pos
return x
system.m = df.Field(mesh, dim=3, value=(0, 0, 1), norm=ms_fun)
with pytest.raises(ValueError):
mc.scripts.mumax3_regions(system)
| 37.51497 | 87 | 0.611652 |
795638d7a09d835e1d98729a288090c94bc2de69 | 2,504 | py | Python | ecosys3D/diagnostics/cfl_monitor.py | cbrockw/ecosys3D | 5e3b125688f571d1a0a2dce4789d7de0831e91c9 | [
"MIT"
] | null | null | null | ecosys3D/diagnostics/cfl_monitor.py | cbrockw/ecosys3D | 5e3b125688f571d1a0a2dce4789d7de0831e91c9 | [
"MIT"
] | null | null | null | ecosys3D/diagnostics/cfl_monitor.py | cbrockw/ecosys3D | 5e3b125688f571d1a0a2dce4789d7de0831e91c9 | [
"MIT"
] | null | null | null | from loguru import logger
from .diagnostic import ecoDiagnostic
from .. import eco_method
from ..distributed import global_max
class CFLMonitor(ecoDiagnostic):
"""Diagnostic monitoring the maximum CFL number of the solution to detect
instabilities.
Writes output to stdout (no binary output).
"""
name = 'cfl_monitor' #:
output_frequency = None # :Frequency (in seconds) in which output is written.
def initialize(self, vs):
pass
def diagnose(self, vs):
pass
@eco_method
def output(self, vs):
"""
check for CFL violation
"""
cfl = global_max(vs, max(
np.max(np.abs(vs.u[2:-2, 2:-2, :, vs.tau]) * vs.maskU[2:-2, 2:-2, :]
/ (vs.cost[np.newaxis, 2:-2, np.newaxis] * vs.dxt[2:-2, np.newaxis, np.newaxis])
* vs.dt_tracer),
np.max(np.abs(vs.v[2:-2, 2:-2, :, vs.tau]) * vs.maskV[2:-2, 2:-2, :]
/ vs.dyt[np.newaxis, 2:-2, np.newaxis] * vs.dt_tracer)
))
wcfl = global_max(vs, np.max(
np.abs(vs.w[2:-2, 2:-2, :, vs.tau]) * vs.maskW[2:-2, 2:-2, :]
/ vs.dzt[np.newaxis, np.newaxis, :] * vs.dt_tracer
))
if np.isnan(cfl) or np.isnan(wcfl):
raise RuntimeError('CFL number is NaN at iteration {}'.format(vs.itt))
logger.warning(' Maximal hor. CFL number = {}'.format(float(cfl)))
logger.warning(' Maximal ver. CFL number = {}'.format(float(wcfl)))
if vs.enable_eke or vs.enable_tke or vs.enable_idemix:
cfl = global_max(vs, max(
np.max(np.abs(vs.u_wgrid[2:-2, 2:-2, :]) * vs.maskU[2:-2, 2:-2, :]
/ (vs.cost[np.newaxis, 2:-2, np.newaxis] * vs.dxt[2:-2, np.newaxis, np.newaxis])
* vs.dt_tracer),
np.max(np.abs(vs.v_wgrid[2:-2, 2:-2, :]) * vs.maskV[2:-2, 2:-2, :]
/ vs.dyt[np.newaxis, 2:-2, np.newaxis] * vs.dt_tracer)
))
wcfl = global_max(vs, np.max(
np.abs(vs.w_wgrid[2:-2, 2:-2, :]) * vs.maskW[2:-2, 2:-2, :]
/ vs.dzt[np.newaxis, np.newaxis, :] * vs.dt_tracer
))
logger.warning(' Maximal hor. CFL number on w grid = {}'.format(float(cfl)))
logger.warning(' Maximal ver. CFL number on w grid = {}'.format(float(wcfl)))
def read_restart(self, vs, infile):
pass
def write_restart(self, vs, outfile):
pass
| 37.939394 | 103 | 0.529153 |
79563acc36538e02e3d35089a07c6d5a72793a61 | 4,248 | py | Python | sparseconvnet/submanifoldConvolution.py | THU-luvision/Occuseg | 163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4 | [
"BSD-3-Clause"
] | 1 | 2022-03-29T18:26:11.000Z | 2022-03-29T18:26:11.000Z | sparseconvnet/submanifoldConvolution.py | THU-luvision/Occuseg | 163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4 | [
"BSD-3-Clause"
] | null | null | null | sparseconvnet/submanifoldConvolution.py | THU-luvision/Occuseg | 163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# 'SubmanifoldConvolution == SubmanifoldConvolution'
import sparseconvnet
import sparseconvnet.SCN
from torch.autograd import Function
from torch.nn import Module, Parameter
from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor
# import gperftools_wrapped as gperf
class SubmanifoldConvolution(Module):
def __init__(self, dimension, nIn, nOut, filter_size, bias, dilated_rate = 1):
Module.__init__(self)
self.dimension = dimension
self.nIn = nIn
self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size)
self.filter_volume = self.filter_size.prod().item()
std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = Parameter(torch.Tensor(
self.filter_volume, nIn, nOut
).normal_(0, std))
self.dilated_rate = dilated_rate
if bias:
self.bias = Parameter(torch.Tensor(nOut).zero_())
self.filename_count = 0
def forward(self, input):
assert input.features.nelement() == 0 or input.features.size(1) == self.nIn, (self.nIn, self.nOut, input)
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
# gperf.ProfStart(b'gperf_results/gperftools_output_%d.prof' % self.filename_count)
output.features = SubmanifoldConvolutionFunction.apply(
input.features,
self.weight,
optionalTensor(self, 'bias'),
input.metadata,
input.spatial_size,
self.dimension,
self.filter_size,
self.dilated_rate )
return output
def __repr__(self):
s = 'SubmanifoldConvolution ' + \
str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min():
s = s + str(self.filter_size[0].item())
else:
s = s + '(' + str(self.filter_size[0].item())
for i in self.filter_size[1:]:
s = s + ',' + str(i.item())
s = s + ')'
return s
def input_spatial_size(self, out_size):
return out_size
class ValidConvolution(SubmanifoldConvolution):
pass
class SubmanifoldConvolutionFunction(Function):
@staticmethod
def forward(
ctx,
input_features,
weight,
bias,
input_metadata,
spatial_size,
dimension,
filter_size,
dilated_rate = 1):
ctx.input_metadata = input_metadata
ctx.dimension = dimension
ctx.dilated_rate = dilated_rate
output_features = input_features.new()
ctx.save_for_backward(
input_features,
spatial_size,
weight,
bias,
filter_size)
sparseconvnet.forward_pass_multiplyAdd_count +=\
sparseconvnet.SCN.SubmanifoldConvolution_updateOutput(
spatial_size,
filter_size,
input_metadata,
input_features,
output_features,
weight,
bias,
dilated_rate)
sparseconvnet.forward_pass_hidden_states += output_features.nelement()
return output_features
@staticmethod
def backward(ctx, grad_output):
input_features, spatial_size, weight, bias, filter_size = ctx.saved_tensors
grad_input = grad_output.new()
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
sparseconvnet.SCN.SubmanifoldConvolution_backward(
spatial_size,
filter_size,
ctx.input_metadata,
input_features,
grad_input,
grad_output.contiguous(),
weight,
grad_weight,
grad_bias,
ctx.dilated_rate)
del ctx.input_metadata
return grad_input, grad_weight, optionalTensorReturn(grad_bias), None, None, None, None, None
| 32.930233 | 113 | 0.607109 |
79563b8ad2b5999fa6b0fc730048c2fd71a7951d | 2,243 | py | Python | OpenCV/GettingStarted/join_images.py | nipundavid/PythonPlaybook | cb35a6587cebf3febcdbd5dd30f5e69ab89f9a93 | [
"MIT"
] | null | null | null | OpenCV/GettingStarted/join_images.py | nipundavid/PythonPlaybook | cb35a6587cebf3febcdbd5dd30f5e69ab89f9a93 | [
"MIT"
] | null | null | null | OpenCV/GettingStarted/join_images.py | nipundavid/PythonPlaybook | cb35a6587cebf3febcdbd5dd30f5e69ab89f9a93 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
'''Used to stack images, if we are working with multiple images,
it's ticky to manage so many windows so we put them in one image'''
img = cv2.imread("Resources/test_image.png")
hor = np.hstack((img, img))
cv2.imshow("Horizontal Stack", hor)
ver = np.vstack((img, img))
cv2.imshow("Vertical Stack", ver)
'''CAVEAT: All the images should have same channel, otherwise it will not work,
So use the below func in case images has different channels'''
def stackImages(scale, imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]),
None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank] * rows
hor_con = [imageBlank] * rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None, scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
ver = hor
return ver
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgStack = stackImages(0.5, ([img, imgGray, img], [imgGray, img, img]))
cv2.imshow("Stack With Func", imgStack)
cv2.waitKey(0)
| 39.350877 | 120 | 0.572893 |
79563bc2b75be98e2b9d8e5a4a164d76264c65e0 | 2,406 | py | Python | examples/experimental/scala-parallel-recommendation-entitymap/data/import_eventserver.py | saikiran40cs/PredictionIO | ba987b60ba0daeeacdb12214ecf4652fe13fb96c | [
"Apache-2.0"
] | 21 | 2016-03-29T19:05:10.000Z | 2020-07-29T04:28:30.000Z | examples/experimental/scala-parallel-recommendation-entitymap/data/import_eventserver.py | saikiran40cs/PredictionIO | ba987b60ba0daeeacdb12214ecf4652fe13fb96c | [
"Apache-2.0"
] | 19 | 2016-03-17T16:10:14.000Z | 2018-07-29T15:16:06.000Z | examples/experimental/scala-parallel-recommendation-entitymap/data/import_eventserver.py | saikiran40cs/PredictionIO | ba987b60ba0daeeacdb12214ecf4652fe13fb96c | [
"Apache-2.0"
] | 24 | 2016-06-06T19:40:56.000Z | 2020-04-11T01:23:14.000Z | """
Import sample data for recommendation engine
"""
import predictionio
import argparse
import random
SEED = 3
def import_events(client):
random.seed(SEED)
count = 0
print "Importing data..."
# generate 10 users, with user uid1,2,....,10
# with some random attributes
user_ids = [ ("uid"+str(i)) for i in range(1, 11)]
for user_id in user_ids:
print "Set user", user_id
client.create_event(
event="$set",
entity_type="user",
entity_id=user_id,
properties={
"attr0" : float(random.randint(0, 4)),
"attr1" : random.randint(10, 14),
"attr2" : random.randint(20, 24)
}
)
count += 1
# generate 50 items, with iid1,2,....,50
# with some randome attributes
item_ids = [ ("iid"+str(i)) for i in range(1, 51)]
for item_id in item_ids:
print "Set item", item_id
client.create_event(
event="$set",
entity_type="item",
entity_id=item_id,
properties={
"attrA" : random.choice(["something1", "something2", "valueX"]),
"attrB" : random.randint(10, 30),
"attrC" : random.choice([True, False])
}
)
count += 1
# each user randomly rate or buy 10 items
for user_id in user_ids:
for viewed_item in random.sample(item_ids, 10):
if (random.randint(0, 1) == 1):
print "User", user_id ,"rates item", viewed_item
client.create_event(
event="rate",
entity_type="user",
entity_id=user_id,
target_entity_type="item",
target_entity_id=item_id,
properties= { "rating" : float(random.randint(1, 6)) }
)
else:
print "User", user_id ,"buys item", viewed_item
client.create_event(
event="buy",
entity_type="user",
entity_id=user_id,
target_entity_type="item",
target_entity_id=item_id
)
count += 1
print "%s events are imported." % count
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import sample data for recommendation engine")
parser.add_argument('--access_key', default='invald_access_key')
parser.add_argument('--url', default="http://localhost:7070")
args = parser.parse_args()
print args
client = predictionio.EventClient(
access_key=args.access_key,
url=args.url,
threads=5,
qsize=500)
import_events(client)
| 26.43956 | 72 | 0.612219 |
79563d4d1bb3bd8ef6b31221c48f3ec5c3bcdd1c | 503 | py | Python | load/__init__.py | KenzaxTazi/Himalayan-Water-Security | edaadf1502ca92c2e196c6f2eded067bb93a360e | [
"MIT"
] | 4 | 2020-12-30T18:33:31.000Z | 2021-04-28T07:53:35.000Z | load/__init__.py | KenzaxTazi/Himalayan-Water-Security | edaadf1502ca92c2e196c6f2eded067bb93a360e | [
"MIT"
] | null | null | null | load/__init__.py | KenzaxTazi/Himalayan-Water-Security | edaadf1502ca92c2e196c6f2eded067bb93a360e | [
"MIT"
] | 1 | 2021-03-15T16:12:55.000Z | 2021-03-15T16:12:55.000Z | """
Datasets should all have the same format so they can be easily used together.
In particular, they should be exported from the submodule:
- as a xarray DataArray or saved asnetcdf file format
- with 'lon' as the longitude variable name in °E (float)
- with 'lat' as the latitude variable name in °N (float)
- with 'time' for time variable name in years with monthly resolution taken in
the middle of each month (float)
- with 'tp' for the variable name for total precipitation in mm/day (float)
"""
| 41.916667 | 78 | 0.753479 |
79563e09594ed7fc8568c61ac8eed1d2447c771f | 2,765 | py | Python | apps/principal/urls.py | jeffessongomes/frwsaude | 14f821d6a4e551e410b56eee90b87bddbb049e58 | [
"MIT"
] | 1 | 2021-07-03T02:02:45.000Z | 2021-07-03T02:02:45.000Z | apps/principal/urls.py | jeffessongomes/frwsaude | 14f821d6a4e551e410b56eee90b87bddbb049e58 | [
"MIT"
] | null | null | null | apps/principal/urls.py | jeffessongomes/frwsaude | 14f821d6a4e551e410b56eee90b87bddbb049e58 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('add_doubt', views.add_doubt, name="add_doubt"),
path('edit_doubt/<int:pk>', views.edit_doubt, name="edit_doubt"),
path('list_doubt', views.list_doubt, name="list_doubt"),
path('delete_doubt/<int:pk>', views.delete_doubt, name="delete_doubt"),
path('add_client', views.add_client, name="add_client"),
path('edit_client/<int:pk>', views.edit_client, name="edit_client"),
path('list_client', views.list_client, name="list_client"),
path('delete_client/<int:pk>', views.delete_client, name="delete_client"),
path('add_product', views.add_product, name="add_product"),
path('edit_product/<int:pk>', views.edit_product, name="edit_product"),
path('list_product', views.list_product, name="list_product"),
path('delete_product/<int:pk>', views.delete_product, name="delete_product"),
path('add_ingredients', views.add_ingredients, name="add_ingredients"),
path('edit_ingredients/<int:pk>', views.edit_ingredients, name="edit_ingredients"),
path('list_ingredients', views.list_ingredients, name="list_ingredients"),
path('delete_ingredients/<int:pk>', views.delete_ingredients, name="delete_ingredients"),
path('add_case', views.add_case, name="add_case"),
path('edit_case/<int:pk>', views.edit_case, name="edit_case"),
path('list_case', views.list_case, name="list_case"),
path('delete_case/<int:pk>', views.delete_case, name="delete_case"),
path('add_how', views.add_how, name="add_how"),
path('edit_how/<int:pk>', views.edit_how, name="edit_how"),
path('list_how', views.list_how, name="list_how"),
path('delete_how/<int:pk>', views.delete_how, name="delete_how"),
path('edit_initial', views.edit_initial, name="edit_initial"),
path('edit_value', views.edit_value, name="edit_value"),
path('edit_how_text', views.edit_how_text, name="edit_how_text"),
path('edit_video_description', views.edit_video_description, name='edit_video_description'),
path('login', views.do_login, name="login"),
path('logout', views.do_logout, name="logout"),
path('forget_password', views.forget_password, name='forget_password'),
path('confirme_password/<int:pk>/<str:token>', views.confirme_password, name='confirme_password'),
path('change_password', views.change_password, name='change_password'),
path('add_product_prize', views.add_product_prize, name="add_product_prize"),
path('edit_product_prize/<int:pk>', views.edit_product_prize, name="edit_product_prize"),
path('list_product_prize', views.list_product_prize, name="list_product_prize"),
path('delete_product_prize/<int:pk>', views.delete_product_prize, name="delete_product_prize"),
] | 47.672414 | 101 | 0.729476 |
79563e2ab03832dbe7becd2d249ffc853f8ddf11 | 432 | py | Python | language_features/itertools/accumulate_intro.py | PrasadHonrao/python-samples | faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a | [
"MIT"
] | 3 | 2018-08-20T13:00:01.000Z | 2021-09-18T04:19:46.000Z | language_features/itertools/accumulate_intro.py | PrasadHonrao/python-samples | faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a | [
"MIT"
] | 1 | 2021-06-25T20:25:02.000Z | 2021-08-19T22:44:31.000Z | language_features/itertools/accumulate_intro.py | PrasadHonrao/python-samples | faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a | [
"MIT"
] | 1 | 2021-09-18T23:51:20.000Z | 2021-09-18T23:51:20.000Z | # accumulate is a generator function that returns accumulated sums of the elements of an iterable.
# accumulate(range(5)) = [0, 1, 3, 6, 10]
# accumulate(range(5), operator.mul) = [0, 1, 2, 6, 24]
# accumulate(range(5), operator.mul, 1) = [0, 1, 2, 6, 24]
from itertools import accumulate
a = [1, 2, 3, 4]
acc = accumulate(a)
print(a)
print(list(acc))
import operator
acc = accumulate(a, operator.mul)
print(a)
print(list(acc)) | 24 | 98 | 0.678241 |
795640a04a699f1a74fe376d654785cc17e4f3cb | 2,141 | py | Python | reddit_comments/reddit/redditapi.py | martinlecs/reddit_comments | 8a212e81f595be5767e9ff6bf27291081e9a5d7e | [
"MIT"
] | null | null | null | reddit_comments/reddit/redditapi.py | martinlecs/reddit_comments | 8a212e81f595be5767e9ff6bf27291081e9a5d7e | [
"MIT"
] | null | null | null | reddit_comments/reddit/redditapi.py | martinlecs/reddit_comments | 8a212e81f595be5767e9ff6bf27291081e9a5d7e | [
"MIT"
] | null | null | null | import praw
import yaml
import os
root_dir = os.path.dirname(os.path.dirname(__file__))
class NoPostsInSubredditError(Exception):
pass
class InvalidLimitError(Exception):
pass
class RedditAPI:
def __init__(self, subreddit):
self.__reddit = praw.Reddit(**self._getConfig())
self.subreddit = subreddit
def is_read_only(self):
return self.__reddit.read_only
def get_hottest_posts(self, limit=10):
"""
:param subreddit: String
Name of subreddit
:param limit: Int
Number of posts to retrieve
:return: Submission
Generator containing Submission objects
"""
if limit < 0 or not isinstance(limit, int):
raise InvalidLimitError("Limit must be of type Int and >= 0 ")
try:
submissions = self.__reddit.subreddit(self.subreddit).hot(limit=limit)
except TypeError:
raise
return submissions
def get_comment_stream(self, pause_after):
"""
Yield new comments as they become available.
Comments are yielded oldest first. Up to 100 historical comments will initially be returned.
:pause_after:
An integer representing the number of requests that result in no new items before this function yields None
:return:
Generator function containing Comment objects
"""
return self.__reddit.subreddit(self.subreddit).stream.comments(pause_after=pause_after)
def get_subreddits(self):
"""
Gets list of all Subreddits on Reddit
:return: list
"""
#TODO: Have to scrape this information and update it using streams every now and then
subreddits = []
return subreddits
@staticmethod
def _getConfig():
with open(os.path.join(root_dir, 'config.yaml'), 'r') as f:
return yaml.load(f)
if __name__ == "__main__":
r = RedditAPI('anime')
latest_comments = r.get_comment_stream()
for comment in latest_comments:
if comment is None:
break
print(comment.body)
| 27.101266 | 119 | 0.634283 |
795640f7f23de69c0c13ed0ee2f98525a8b9efa3 | 62 | py | Python | tests/__init__.py | practicalci/practci | 47d0600918762373992da0ba067dbb84a3e4d633 | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | practicalci/practci | 47d0600918762373992da0ba067dbb84a3e4d633 | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | practicalci/practci | 47d0600918762373992da0ba067dbb84a3e4d633 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unit test package for practci."""
| 15.5 | 36 | 0.564516 |
7956410279cd4a54ef4311c377d8b2265eff4874 | 716 | py | Python | setup.py | osteele/pyfsa | 58a44106d3e3918a17a5a106584d1a91636f9d52 | [
"Artistic-1.0-Perl"
] | 7 | 2015-11-25T10:52:43.000Z | 2018-09-11T21:35:25.000Z | setup.py | osteele/pyfsa | 58a44106d3e3918a17a5a106584d1a91636f9d52 | [
"Artistic-1.0-Perl"
] | null | null | null | setup.py | osteele/pyfsa | 58a44106d3e3918a17a5a106584d1a91636f9d52 | [
"Artistic-1.0-Perl"
] | 7 | 2015-12-23T05:22:20.000Z | 2021-07-13T19:17:32.000Z | from distutils.core import setup
setup(name="FSA",
version="1.0",
description="FSA utilities",
long_description=
"""This package contains functions for manipulating Finite-State Automata (FSAs). It includes functions for minimizing and determinizing FSAs, computing FSA intersections and unions, compiling a (non-POSIX) regular expression into an FSA, and compiling a set of regular expression productions into a chart parser.""",
author="Oliver Steele",
author_email="steele@osteele.com",
url="http://osteele.com/software/pyfsa/",
py_modules=["FSA", "NumFSAUtils", "FSChartParser", "reCompiler"],
data_files=["README.txt", "LICENSE.txt"]
)
| 51.142857 | 325 | 0.695531 |
7956414eb93d34184b88049cce5b472a79859365 | 3,832 | py | Python | tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py | CliffJumper/cloud-custodian | 47d2f0aa990d2179c8f6764ac53c12720069ddcb | [
"Apache-2.0"
] | null | null | null | tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py | CliffJumper/cloud-custodian | 47d2f0aa990d2179c8f6764ac53c12720069ddcb | [
"Apache-2.0"
] | null | null | null | tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py | CliffJumper/cloud-custodian | 47d2f0aa990d2179c8f6764ac53c12720069ddcb | [
"Apache-2.0"
] | 1 | 2019-11-06T16:54:06.000Z | 2019-11-06T16:54:06.000Z | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sendgrid
import six
from python_http_client import exceptions
from sendgrid.helpers.mail import Email, Content, Mail
from c7n_mailer.utils import (
get_message_subject, get_rendered_jinja)
class SendGridDelivery(object):
def __init__(self, config, logger):
self.config = config
self.logger = logger
self.sendgrid_client = sendgrid.SendGridAPIClient(apikey=self.config['sendgrid_api_key'])
def get_to_addrs_sendgrid_messages_map(self, queue_message):
targets = queue_message['action']['to']
# eg: { ('milton@initech.com', 'peter@initech.com'): [resource1, resource2, etc] }
to_addrs_to_resources_map = {tuple(sorted(set(targets))): queue_message['resources']}
to_addrs_to_content_map = {}
for to_addrs, resources in six.iteritems(to_addrs_to_resources_map):
to_addrs_to_content_map[to_addrs] = self.get_message_content(
queue_message,
resources,
list(to_addrs)
)
# eg: { ('milton@initech.com', 'peter@initech.com'): message }
return to_addrs_to_content_map
def get_message_content(self, queue_message, resources, to_addrs):
return get_rendered_jinja(
to_addrs, queue_message, resources, self.logger, 'template', 'default')
def sendgrid_handler(self, queue_message, to_addrs_to_email_messages_map):
self.logger.info("Sending account:%s policy:%s %s:%s email:%s to %s" % (
queue_message.get('account', ''),
queue_message['policy']['name'],
queue_message['policy']['resource'],
str(len(queue_message['resources'])),
queue_message['action'].get('template', 'default'),
to_addrs_to_email_messages_map))
from_email = Email(self.config['from_address'])
subject = get_message_subject(queue_message)
email_format = queue_message['action'].get('template_format', None)
if not email_format:
email_format = queue_message['action'].get(
'template', 'default').endswith('html') and 'html' or 'plain'
for email_to_addrs, email_content in six.iteritems(to_addrs_to_email_messages_map):
for to_address in email_to_addrs:
to_email = Email(to_address)
content = Content("text/" + email_format, email_content)
mail = Mail(from_email, subject, to_email, content)
try:
self.sendgrid_client.client.mail.send.post(request_body=mail.get())
except (exceptions.UnauthorizedError, exceptions.BadRequestsError) as e:
self.logger.warning(
"\n**Error \nPolicy:%s \nAccount:%s \nSending to:%s \n\nRequest body:"
"\n%s\n\nRequest headers:\n%s\n\n mailer.yml: %s" % (
queue_message['policy'],
queue_message.get('account', ''),
email_to_addrs,
e.body,
e.headers,
self.config
)
)
return False
return True
| 43.545455 | 97 | 0.622651 |
795641d1339ed1a1e9393718ec4865384ba0d0b1 | 864 | py | Python | lib/model/nms/nms_cpu.py | juwangvsu/faster-rcnn | 2df9e6475284ab47de9cfcb90ff63d6a81ce8559 | [
"MIT"
] | 681 | 2019-04-10T08:24:25.000Z | 2022-03-31T20:15:05.000Z | lib/model/nms/nms_cpu.py | juwangvsu/faster-rcnn | 2df9e6475284ab47de9cfcb90ff63d6a81ce8559 | [
"MIT"
] | 83 | 2019-04-12T11:19:08.000Z | 2022-03-16T07:02:52.000Z | lib/model/nms/nms_cpu.py | juwangvsu/faster-rcnn | 2df9e6475284ab47de9cfcb90ff63d6a81ce8559 | [
"MIT"
] | 182 | 2019-04-10T09:06:08.000Z | 2022-03-15T02:18:52.000Z | from __future__ import absolute_import
import numpy as np
import torch
def nms_cpu(dets, thresh):
dets = dets.numpy()
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order.item(0)
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.maximum(x2[i], x2[order[1:]])
yy2 = np.maximum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return torch.IntTensor(keep)
| 23.351351 | 59 | 0.503472 |
795642a2f44c8b401998c9a91fb9f3000c04d2fa | 5,623 | py | Python | list_tree_and_table/treeview/tree-in-model-view.py | shuge/Qt-Python-Binding-Examples | efe40c8af6c3e0805a5a7c3d053b8c8bf893a803 | [
"BSD-3-Clause"
] | 179 | 2015-01-08T10:21:28.000Z | 2020-03-24T07:03:04.000Z | table_view_and_list_view/treeview/tree-in-model-view.py | bisratyalew/Qt-Python-Sample-Examples | 45d69e01adefc854ac25627110611ca27b334a55 | [
"MIT"
] | 1 | 2019-12-23T17:14:37.000Z | 2020-01-09T16:45:58.000Z | table_view_and_list_view/treeview/tree-in-model-view.py | bisratyalew/Qt-Python-Sample-Examples | 45d69e01adefc854ac25627110611ca27b334a55 | [
"MIT"
] | 57 | 2015-01-05T09:34:15.000Z | 2019-11-18T06:12:08.000Z | #!/usr/bin/env python
import sys
#try:
# from PySide import QtGui, QtCore
#except ImportError:
# from PySide import QtGui, QtCore
from PySide import QtGui, QtCore
FIRST_COLUMN = 0
class UserPresence:
OFFLINE = 0
ONLINE = 400
class Object:
GROUP = 1
USER = 2
class User():
def __init__(self, uri, nickname, group = None):
self._type = Object.USER
self.uri = uri
self.nickname = nickname
self.group = group
def get_type(self):
return self._type
def get_display_name(self):
return self.nickname
class Group():
def __init__(self, gid, gname, users = None):
self._type = Object.GROUP
self.gid = gid
self.gname = gname
self.user_list = []
if users:
for user in users:
self.add_user(user)
def add_user(self, user):
if user not in self.user_list:
self.user_list.append(user)
def count(self):
return len(self.user_list)
def get_user_by_row(self, row):
return self.user_list[row]
def get_user_by_uri(self, uri):
for user in self.user_list:
if user.uri == uri:
return user
def get_display_name(self):
return self.gname
def get_type(self):
return self._type
class GroupAgent:
def __init__(self, groups = None):
self.group_list = []
if groups:
for group in groups:
self.add_group(group)
def add_group(self, group):
self.group_list.append(group)
def count(self):
return len(self.group_list)
def get_group_by_row(self, row):
return self.group_list[row]
def index(self, group):
return self.group_list.index(group)
def get_user_by_uri(self, uri):
for group in self.group_list:
user = group.get_user_by_uri(uri)
if user:
return user
class Model(QtCore.QAbstractItemModel):
COLUMN_COUNT = 1
def __init__(self, group_agent):
QtCore.QAbstractItemModel.__init__(self)
self.group_agent = group_agent
def columnCount(self, parent_idx):
if not parent_idx.isValid():
return self.COLUMN_COUNT
parent_obj = parent_idx.internalPointer()
return parent_obj.count()
def rowCount(self, parent_idx):
if not parent_idx.isValid():
return self.group_agent.count()
parent_obj = parent_idx.internalPointer()
if parent_obj.get_type() == Object.GROUP:
return parent_obj.count()
return 0
def index(self, row, column, parent_idx):
assert column != None
if not parent_idx.isValid():
group = self.group_agent.get_group_by_row(row)
return self.createIndex(row, column, group)
parent_obj = parent_idx.internalPointer()
if parent_obj.get_type() == Object.GROUP:
item = parent_obj.get_user_by_row(row)
return self.createIndex(row, column, item)
return QtCore.QModelIndex()
def data(self, index, role = QtCore.Qt.DisplayRole):
if not index.isValid():
return QtCore.QVariant()
obj = index.internalPointer()
if role == QtCore.Qt.DisplayRole:
if obj.get_type() in (Object.GROUP, Object.USER):
return QtCore.QVariant(obj.get_display_name())
elif role == QtCore.Qt.UserRole:
obj_type = obj.get_type()
if obj_type == Object.GROUP:
return QtCore.QVariant(obj.gid)
elif obj_type == Object.USER:
return QtCore.QVariant(obj.uri)
return QtCore.QVariant()
def parent(self, child_index):
if not child_index.isValid():
return QtCore.QModelIndex()
obj = child_index.internalPointer()
if obj.get_type() == Object.USER:
parent_obj = obj.group
row = self.group_agent.index(parent_obj)
return self.createIndex(row, FIRST_COLUMN, parent_obj)
return QtCore.QModelIndex()
def flags(self, index):
if not index.isValid():
return QtCore.Qt.NoItemFlags
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def create_group_agent():
group_a = Group('10', 'Group A')
for i in [('123', 'Mery'),
('132', 'Lily'),
('321', 'May')]:
group_a.add_user(User(i[0], i[1], group_a))
group_b = Group('20', 'Group B')
user = User('213', 'Joe', group_b)
group_b.add_user(user)
ga = GroupAgent()
ga.add_group(group_a)
ga.add_group(group_b)
return ga
class Demo(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
x, y, w, h = 300, 300, 300, 200
self.setGeometry(x, y, w, h)
self.tv = QtGui.QTreeView(self)
self.ga = create_group_agent()
model = Model(self.ga)
self.tv.setHeaderHidden(True)
self.tv.setModel(model)
self.selection_model = self.tv.selectionModel()
self.selection_model.currentRowChanged.connect(self.current_row_changed)
user = self.ga.get_user_by_uri("123")
# print user.__dict__
user.nickname = "foo"
def current_row_changed(self, current_idx, prev_idx):
assert prev_idx != None
item = current_idx.internalPointer()
if item.get_type() != Object.USER:
return
print item.get_display_name()
if __name__ == "__main__":
qa = QtGui.QApplication(sys.argv)
app = Demo()
app.show()
qa.exec_()
| 25.559091 | 80 | 0.602525 |
7956438cc0823ccd16bee84539aca4b1b8f92000 | 1,949 | py | Python | pkg/tests/test_pipeparams.py | sdrees/pyvtreat | fed9a653b2524ba04b1e92b1087e58bead25f99a | [
"BSD-3-Clause"
] | 104 | 2019-07-21T06:15:02.000Z | 2022-02-23T19:41:58.000Z | pkg/tests/test_pipeparams.py | sdrees/pyvtreat | fed9a653b2524ba04b1e92b1087e58bead25f99a | [
"BSD-3-Clause"
] | 15 | 2019-08-12T09:59:40.000Z | 2021-12-09T00:38:47.000Z | pkg/tests/test_pipeparams.py | sdrees/pyvtreat | fed9a653b2524ba04b1e92b1087e58bead25f99a | [
"BSD-3-Clause"
] | 9 | 2019-08-15T13:29:15.000Z | 2021-03-08T18:04:08.000Z | #%% md
# From [pyvtreat issue 12](https://github.com/WinVector/pyvtreat/issues/12)
#%%
import pytest
import pandas as pd
import numpy as np
import numpy.random
import vtreat
import vtreat.util
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
def test_pipeparams():
numpy.random.seed(2019)
def make_data(nrows):
d = pd.DataFrame({"x": 5 * numpy.random.normal(size=nrows)})
d["y"] = numpy.sin(d["x"]) + 0.1 * numpy.random.normal(size=nrows)
d.loc[numpy.arange(3, 10), "x"] = numpy.nan # introduce a nan level
d["xc"] = ["level_" + str(5 * numpy.round(yi / 5, 1)) for yi in d["y"]]
d["x2"] = np.random.normal(size=nrows)
d.loc[d["xc"] == "level_-1.0", "xc"] = numpy.nan # introduce a nan level
d["yc"] = d["y"] > 0.5
return d
df = make_data(500)
df = df.drop(columns=["y"])
transform = vtreat.BinomialOutcomeTreatment(
outcome_target=True,
params=vtreat.vtreat_parameters({"sparse_indicators": False}),
)
clf = Pipeline(
steps=[
("preprocessor", transform),
("classifier", LogisticRegression(solver="lbfgs")),
]
)
X, y = df, df.pop("yc")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
#%%
t_params = transform.get_params()
assert t_params["indicator_min_fraction"] is not None
assert transform.get_params()["indicator_min_fraction"] != 0
#%%
p_params = clf.get_params()
assert p_params["preprocessor__indicator_min_fraction"] is not None
#%%
clf.set_params(preprocessor__indicator_min_fraction=0)
assert transform.get_params()["indicator_min_fraction"] == 0
# no warning or error
with pytest.warns(None) as record:
clf.fit(X_train, y_train)
assert not record
| 26.337838 | 81 | 0.641355 |
7956456608123df7125940655ddd28e0e0a55f99 | 1,599 | py | Python | simplech/write_context.py | vir-mir/simple-clickhouse | 7464322eddf7fa1a3ecccf27ad55d43dcb80d1f0 | [
"MIT"
] | 12 | 2018-04-09T03:59:05.000Z | 2020-06-26T21:54:17.000Z | simplech/write_context.py | vir-mir/simple-clickhouse | 7464322eddf7fa1a3ecccf27ad55d43dcb80d1f0 | [
"MIT"
] | 6 | 2018-10-09T07:41:51.000Z | 2020-07-06T01:46:34.000Z | simplech/write_context.py | vir-mir/simple-clickhouse | 7464322eddf7fa1a3ecccf27ad55d43dcb80d1f0 | [
"MIT"
] | 10 | 2019-05-18T23:50:42.000Z | 2021-11-18T13:43:01.000Z | import io
import ujson
from .log import logger
class Buffer:
def __init__(self, buffer_limit=5000):
self.buffer_limit = buffer_limit
self.buffer = io.BytesIO()
self.counter = 0
self.full = False
def __len__(self):
return self.counter
def prepare(self):
self.buffer.seek(0)
def append(self, rec):
self.buffer.write((rec + '\n').encode())
self.counter += 1
if self.counter >= self.buffer_limit:
self.full = True
class WriterContext:
def __init__(self, ch, table, dump_json=True, ensure_ascii=False, buffer_limit=5000):
self.ch = ch
self.ensure_ascii = ensure_ascii
self.dump_json = dump_json
self.buffer_limit = buffer_limit
self.table = table
self.set_buffer()
def flush(self):
self.buffer.prepare()
buff = self.buffer
self.set_buffer()
return self.ch._flush(self.table, buff)
def set_buffer(self):
self.buffer = Buffer(buffer_limit=self.buffer_limit)
def push(self, *docs):
try:
for doc in docs:
if self.dump_json == True:
doc = ujson.dumps(doc, self.ensure_ascii)
self.buffer.append(doc)
if self.buffer.full:
self.flush()
except Exception as e:
logger.exception('exc during push')
raise e
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not exc_value:
self.flush()
| 25.790323 | 89 | 0.57661 |
795645f2f3f27e907181654098148a9936cf4a71 | 2,656 | py | Python | source/component/3rd-party/btstack/raw/tool/update_filename.py | liangyongxiang/vsf-all-in-one | 942676bd201bb4fa6a3262f77150ab2d7766ec88 | [
"Apache-2.0"
] | 1 | 2022-01-04T08:06:04.000Z | 2022-01-04T08:06:04.000Z | source/component/3rd-party/btstack/raw/tool/update_filename.py | liangyongxiang/vsf-all-in-one | 942676bd201bb4fa6a3262f77150ab2d7766ec88 | [
"Apache-2.0"
] | null | null | null | source/component/3rd-party/btstack/raw/tool/update_filename.py | liangyongxiang/vsf-all-in-one | 942676bd201bb4fa6a3262f77150ab2d7766ec88 | [
"Apache-2.0"
] | 1 | 2021-08-23T10:11:47.000Z | 2021-08-23T10:11:47.000Z | #!/usr/bin/env python3
import os
import re
btstack_root = os.path.abspath(os.path.dirname(sys.argv[0]) + '/..')
filetag = '#define BTSTACK_FILE__ "%s"\n'
filetag_re = '#define BTSTACK_FILE__ \"(.*)\"'
ignoreFolders = ["3rd-party", "pic32-harmony", "msp430", "cpputest", "test", "msp-exp430f5438-cc2564b", "msp430f5229lp-cc2564b", "ez430-rf2560", "ios", "chipset/cc256x", "docs", "mtk", "port"]
ignoreFiles = ["ant_cmds.h", "rijndael.c", "btstack_config.h", "btstack_version.h", "profile.h", "bluetoothdrv.h",
"ancs_client_demo.h", "spp_and_le_counter.h", "bluetoothdrv-stub.c", "minimal_peripheral.c", "BTstackDaemonRespawn.c"]
class State:
SearchStartComment = 0
SearchCopyrighter = 1
SearchEndComment = 2
ProcessRest = 3
def update_filename_tag(dir_name, file_name, has_tag):
infile = dir_name + "/" + file_name
outfile = dir_name + "/tmp_" + file_name
# print "Update copyright: ", infile
with open(outfile, 'wt') as fout:
bufferComment = ""
state = State.SearchStartComment
with open(infile, 'rt') as fin:
for line in fin:
if state == State.SearchStartComment:
fout.write(line)
parts = re.match('\s*(/\*).*(\*/)',line)
if parts:
if len(parts.groups()) == 2:
# one line comment
continue
parts = re.match('\s*(/\*).*',line)
if parts:
# beginning of comment
state = State.SearchCopyrighter
continue
if state == State.SearchCopyrighter:
fout.write(line)
parts = re.match('.*(\*/)',line)
if parts:
# end of comment
state = State.SearchStartComment
# add filename tag if missing
if not has_tag:
fout.write('\n')
fout.write(filetag % file_name)
state = State.ProcessRest
continue
if state == State.ProcessRest:
if has_tag:
parts = re.match(filetag_re,line)
if parts:
print('have tag, found tag')
fout.write(filetag % file_name)
continue
fout.write(line)
os.rename(outfile, infile)
def get_filename_tag(file_path):
basename = os.path.basename(file_path)
with open(file_path, "rt") as fin:
for line in fin:
parts = re.match(filetag_re,line)
if not parts:
continue
tag = parts.groups()[0]
return tag
return None
for root, dirs, files in os.walk(btstack_root, topdown=True):
dirs[:] = [d for d in dirs if d not in ignoreFolders]
files[:] = [f for f in files if f not in ignoreFiles]
for f in files:
if not f.endswith(".c"):
continue
file_path = root + "/" + f
tag = get_filename_tag(file_path)
if tag != f:
print('%s needs filetag' % file_path)
update_filename_tag(root, f, tag != None)
| 27.957895 | 192 | 0.643072 |
7956462ec03f54de6e6bdcd167d132703a77e31b | 11,686 | py | Python | LowLevelApi/NGPF/REST/1_Most_Common/l2l3RestNgpf.py | NickKeating/IxNetwork | 0a54c0b8d1a1664d2826ad20a826ef384c48432f | [
"MIT"
] | 46 | 2018-01-24T06:43:45.000Z | 2022-03-17T07:27:08.000Z | LowLevelApi/NGPF/REST/1_Most_Common/l2l3RestNgpf.py | NickKeating/IxNetwork | 0a54c0b8d1a1664d2826ad20a826ef384c48432f | [
"MIT"
] | 104 | 2018-03-16T18:16:29.000Z | 2022-03-17T07:16:43.000Z | LowLevelApi/NGPF/REST/1_Most_Common/l2l3RestNgpf.py | NickKeating/IxNetwork | 0a54c0b8d1a1664d2826ad20a826ef384c48432f | [
"MIT"
] | 58 | 2018-01-23T05:54:20.000Z | 2022-03-30T22:55:20.000Z |
# PLEASE READ DISCLAIMER
#
# This is a sample script for demo and reference purpose only.
# It is subject to change for content updates without warning.
#
# REQUIREMENTS
# - Python2.7 (Supports Python 2 and 3)
# - Python2.7
# - Python modules: requests
#
# DESCRIPTION
# This sample script demonstrates:
# - REST API configurations using two back-to-back Ixia ports.
# - Connecting to Windows IxNetwork API server or Linux API server.
#
# - Verify for sufficient amount of port licenses before testing.
# - Verify port ownership.
# - Configure two IPv4 Topology Groups
# - Start protocols
# - Verify ARP
# - Create a Traffic Item
# - Apply Traffic
# - Start Traffic
# - Get stats
#
# USAGE
# python <script>.py windows
# python <script>.py linux
import sys, traceback
sys.path.insert(0, '../Modules')
from IxNetRestApi import *
from IxNetRestApiPortMgmt import PortMgmt
from IxNetRestApiTraffic import Traffic
from IxNetRestApiProtocol import Protocol
from IxNetRestApiStatistics import Statistics
# Default the API server to either windows, windowsConnectionMgr or linux.
connectToApiServer = 'windows'
if len(sys.argv) > 1:
if sys.argv[1] not in ['windows', 'windowsConnectionMgr', 'linux']:
sys.exit("\nError: %s is not a known option. Choices are 'windows', 'windowsConnectionMgr or 'linux'." % sys.argv[1])
connectToApiServer = sys.argv[1]
try:
#---------- Preference Settings --------------
forceTakePortOwnership = True
releasePortsWhenDone = False
enableDebugTracing = True
deleteSessionAfterTest = True ;# For Windows Connection Mgr and Linux API server only
# Optional: Mainly for connecting to Linux API server.
licenseServerIp = '192.168.70.3'
licenseModel = 'subscription'
licenseTier = 'tier3'
ixChassisIp = '192.168.70.11'
# [chassisIp, cardNumber, slotNumber]
portList = [[ixChassisIp, '1', '1'],
[ixChassisIp, '2', '1']]
if connectToApiServer == 'linux':
mainObj = Connect(apiServerIp='192.168.70.108',
username='admin',
password='admin',
deleteSessionAfterTest=deleteSessionAfterTest,
verifySslCert=False,
serverOs=connectToApiServer)
if connectToApiServer in ['windows', 'windowsConnectionMgr']:
mainObj = Connect(apiServerIp='192.168.70.3',
serverIpPort='11009',
serverOs=connectToApiServer,
deleteSessionAfterTest=deleteSessionAfterTest)
#---------- Preference Settings End --------------
mainObj.newBlankConfig()
portObj = PortMgmt(mainObj)
portObj.connectIxChassis(ixChassisIp)
if portObj.arePortsAvailable(portList, raiseException=False) != 0:
if forceTakePortOwnership == True:
portObj.releasePorts(portList)
portObj.clearPortOwnership(portList)
else:
raise IxNetRestApiException('Ports are owned by another user and forceTakePortOwnership is set to False')
# Uncomment this to configure license server.
# Configuring license requires releasing all ports even for ports that is not used for this test.
portObj.releaseAllPorts()
mainObj.configLicenseServerDetails([licenseServerIp], licenseModel, licenseTier)
# Set createVports True if building config from scratch.
portObj.assignPorts(portList, createVports=True)
protocolObj = Protocol(mainObj, portObj)
topologyObj1 = protocolObj.createTopologyNgpf(portList=[portList[0]],
topologyName='Topo1')
deviceGroupObj1 = protocolObj.createDeviceGroupNgpf(topologyObj1,
multiplier=1,
deviceGroupName='DG1')
topologyObj2 = protocolObj.createTopologyNgpf(portList=[portList[1]],
topologyName='Topo2')
deviceGroupObj2 = protocolObj.createDeviceGroupNgpf(topologyObj2,
multiplier=1,
deviceGroupName='DG2')
ethernetObj1 = protocolObj.createEthernetNgpf(deviceGroupObj1,
ethernetName='MyEth1',
macAddress={'start': '00:01:01:00:00:01',
'direction': 'increment',
'step': '00:00:00:00:00:01'},
macAddressPortStep='disabled',
vlanId={'start': 103,
'direction': 'increment',
'step':0})
ethernetObj2 = protocolObj.createEthernetNgpf(deviceGroupObj2,
ethernetName='MyEth2',
macAddress={'start': '00:01:02:00:00:01',
'direction': 'increment',
'step': '00:00:00:00:00:01'},
macAddressPortStep='disabled',
vlanId={'start': 103,
'direction': 'increment',
'step':0})
ipv4Obj1 = protocolObj.createIpv4Ngpf(ethernetObj1,
ipv4Address={'start': '1.1.1.1',
'direction': 'increment',
'step': '0.0.0.1'},
ipv4AddressPortStep='disabled',
gateway={'start': '1.1.1.2',
'direction': 'increment',
'step': '0.0.0.0'},
gatewayPortStep='disabled',
prefix=24,
resolveGateway=True)
ipv4Obj2 = protocolObj.createIpv4Ngpf(ethernetObj2,
ipv4Address={'start': '1.1.1.2',
'direction': 'increment',
'step': '0.0.0.1'},
ipv4AddressPortStep='disabled',
gateway={'start': '1.1.1.1',
'direction': 'increment',
'step': '0.0.0.0'},
gatewayPortStep='disabled',
prefix=24,
resolveGateway=True)
protocolObj.startAllProtocols()
protocolObj.verifyProtocolSessionsNgpf()
# For all parameter options, please go to the API configTrafficItem
# mode = create or modify
trafficObj = Traffic(mainObj)
trafficStatus = trafficObj.configTrafficItem(mode='create',
trafficItem = {
'name':'Topo1 to Topo2',
'trafficType':'ipv4',
'biDirectional':True,
'srcDestMesh':'one-to-one',
'routeMesh':'oneToOne',
'allowSelfDestined':False,
'trackBy': ['flowGroup0', 'vlanVlanId0']},
endpoints = [{'name':'Flow-Group-1',
'sources': [topologyObj1],
'destinations': [topologyObj2]
}],
configElements = [{'transmissionType': 'fixedFrameCount',
'frameCount': 50000,
'frameRate': 88,
'frameRateType': 'percentLineRate',
'frameSize': 128}])
trafficItemObj = trafficStatus[0]
endpointObj = trafficStatus[1][0]
configElementObj = trafficStatus[2][0]
# Example on how to modify Traffic Item
trafficObj.configTrafficItem(mode='modify',
obj=trafficItemObj,
trafficItem = {'name':'Topo1_mod_Topo2'})
trafficObj.configTrafficItem(mode='modify',
obj=configElementObj,
configElements = {'frameSize':'512'})
trafficObj.configTrafficItem(mode='modify',
obj=endpointObj,
endpoints = {'name':'Flow-Group-10'})
trafficObj.regenerateTrafficItems()
trafficObj.startTraffic()
# Check the traffic state to assure traffic has indeed stopped before checking for stats.
if trafficObj.getTransmissionType(configElementObj) == "fixedFrameCount":
trafficObj.checkTrafficState(expectedState=['stopped', 'stoppedWaitingForStats'], timeout=45)
statObj = Statistics(mainObj)
stats = statObj.getStats(viewName='Flow Statistics')
print('\n{txPort:10} {txFrames:15} {rxPort:10} {rxFrames:15} {frameLoss:10}'.format(
txPort='txPort', txFrames='txFrames', rxPort='rxPort', rxFrames='rxFrames', frameLoss='frameLoss'))
print('-'*90)
for flowGroup,values in stats.items():
txPort = values['Tx Port']
rxPort = values['Rx Port']
txFrames = values['Tx Frames']
rxFrames = values['Rx Frames']
frameLoss = values['Frames Delta']
print('{txPort:10} {txFrames:15} {rxPort:10} {rxFrames:15} {frameLoss:10} '.format(
txPort=txPort, txFrames=txFrames, rxPort=rxPort, rxFrames=rxFrames, frameLoss=frameLoss))
if releasePortsWhenDone == True:
portObj.releasePorts(portList)
if connectToApiServer == 'linux':
mainObj.linuxServerStopAndDeleteSession()
if connectToApiServer == 'windowsConnectionMgr':
mainObj.deleteSession()
except (IxNetRestApiException, Exception, KeyboardInterrupt) as errMsg:
if enableDebugTracing:
if not bool(re.search('ConnectionError', traceback.format_exc())):
print('\n%s' % traceback.format_exc())
print('\nException Error! %s\n' % errMsg)
if 'mainObj' in locals() and connectToApiServer == 'linux':
if deleteSessionAfterTest:
mainObj.linuxServerStopAndDeleteSession()
if 'mainObj' in locals() and connectToApiServer in ['windows', 'windowsConnectionMgr']:
if releasePortsWhenDone and forceTakePortOwnership:
portObj.releasePorts(portList)
if connectToApiServer == 'windowsConnectionMgr':
if deleteSessionAfterTest:
mainObj.deleteSession()
| 47.120968 | 125 | 0.5 |
7956469bc3f12e94416af10f985bf01422f801cb | 14,214 | py | Python | rllib/agents/ppo/appo_torch_policy.py | kennethlien/ray | 2916c2c3d29f2f7e4bf53872d5ac8c5d866b0e45 | [
"Apache-2.0"
] | null | null | null | rllib/agents/ppo/appo_torch_policy.py | kennethlien/ray | 2916c2c3d29f2f7e4bf53872d5ac8c5d866b0e45 | [
"Apache-2.0"
] | null | null | null | rllib/agents/ppo/appo_torch_policy.py | kennethlien/ray | 2916c2c3d29f2f7e4bf53872d5ac8c5d866b0e45 | [
"Apache-2.0"
] | null | null | null | """
PyTorch policy class used for APPO.
Adapted from VTraceTFPolicy to use the PPO surrogate loss.
Keep in sync with changes to VTraceTFPolicy.
"""
import gym
import numpy as np
import logging
from typing import Type
from ray.rllib.agents.dqn.simple_q_torch_policy import TargetNetworkMixin
import ray.rllib.agents.impala.vtrace_torch as vtrace
from ray.rllib.agents.impala.vtrace_torch_policy import (
make_time_major,
choose_optimizer,
)
from ray.rllib.agents.ppo.appo_tf_policy import make_appo_model, postprocess_trajectory
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import (
TorchDistributionWrapper,
TorchCategorical,
)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_mixins import (
EntropyCoeffSchedule,
LearningRateSchedule,
ValueNetworkMixin,
)
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_utils import (
apply_grad_clipping,
explained_variance,
global_norm,
sequence_mask,
)
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
def appo_surrogate_loss(
policy: Policy,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> TensorType:
"""Constructs the loss for APPO.
With IS modifications and V-trace for Advantage Estimation.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]): The action distr. class.
train_batch (SampleBatch): The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
target_model = policy.target_models[model]
model_out, _ = model(train_batch)
action_dist = dist_class(model_out, model)
if isinstance(policy.action_space, gym.spaces.Discrete):
is_multidiscrete = False
output_hidden_shape = [policy.action_space.n]
elif isinstance(policy.action_space, gym.spaces.multi_discrete.MultiDiscrete):
is_multidiscrete = True
output_hidden_shape = policy.action_space.nvec.astype(np.int32)
else:
is_multidiscrete = False
output_hidden_shape = 1
def _make_time_major(*args, **kwargs):
return make_time_major(
policy, train_batch.get(SampleBatch.SEQ_LENS), *args, **kwargs
)
actions = train_batch[SampleBatch.ACTIONS]
dones = train_batch[SampleBatch.DONES]
rewards = train_batch[SampleBatch.REWARDS]
behaviour_logits = train_batch[SampleBatch.ACTION_DIST_INPUTS]
target_model_out, _ = target_model(train_batch)
prev_action_dist = dist_class(behaviour_logits, model)
values = model.value_function()
values_time_major = _make_time_major(values)
drop_last = policy.config["vtrace"] and policy.config["vtrace_drop_last_ts"]
if policy.is_recurrent():
max_seq_len = torch.max(train_batch[SampleBatch.SEQ_LENS])
mask = sequence_mask(train_batch[SampleBatch.SEQ_LENS], max_seq_len)
mask = torch.reshape(mask, [-1])
mask = _make_time_major(mask, drop_last=drop_last)
num_valid = torch.sum(mask)
def reduce_mean_valid(t):
return torch.sum(t[mask]) / num_valid
else:
reduce_mean_valid = torch.mean
if policy.config["vtrace"]:
logger.debug(
"Using V-Trace surrogate loss (vtrace=True; " f"drop_last={drop_last})"
)
old_policy_behaviour_logits = target_model_out.detach()
old_policy_action_dist = dist_class(old_policy_behaviour_logits, model)
if isinstance(output_hidden_shape, (list, tuple, np.ndarray)):
unpacked_behaviour_logits = torch.split(
behaviour_logits, list(output_hidden_shape), dim=1
)
unpacked_old_policy_behaviour_logits = torch.split(
old_policy_behaviour_logits, list(output_hidden_shape), dim=1
)
else:
unpacked_behaviour_logits = torch.chunk(
behaviour_logits, output_hidden_shape, dim=1
)
unpacked_old_policy_behaviour_logits = torch.chunk(
old_policy_behaviour_logits, output_hidden_shape, dim=1
)
# Prepare actions for loss.
loss_actions = actions if is_multidiscrete else torch.unsqueeze(actions, dim=1)
# Prepare KL for loss.
action_kl = _make_time_major(
old_policy_action_dist.kl(action_dist), drop_last=drop_last
)
# Compute vtrace on the CPU for better perf.
vtrace_returns = vtrace.multi_from_logits(
behaviour_policy_logits=_make_time_major(
unpacked_behaviour_logits, drop_last=drop_last
),
target_policy_logits=_make_time_major(
unpacked_old_policy_behaviour_logits, drop_last=drop_last
),
actions=torch.unbind(
_make_time_major(loss_actions, drop_last=drop_last), dim=2
),
discounts=(1.0 - _make_time_major(dones, drop_last=drop_last).float())
* policy.config["gamma"],
rewards=_make_time_major(rewards, drop_last=drop_last),
values=values_time_major[:-1] if drop_last else values_time_major,
bootstrap_value=values_time_major[-1],
dist_class=TorchCategorical if is_multidiscrete else dist_class,
model=model,
clip_rho_threshold=policy.config["vtrace_clip_rho_threshold"],
clip_pg_rho_threshold=policy.config["vtrace_clip_pg_rho_threshold"],
)
actions_logp = _make_time_major(action_dist.logp(actions), drop_last=drop_last)
prev_actions_logp = _make_time_major(
prev_action_dist.logp(actions), drop_last=drop_last
)
old_policy_actions_logp = _make_time_major(
old_policy_action_dist.logp(actions), drop_last=drop_last
)
is_ratio = torch.clamp(
torch.exp(prev_actions_logp - old_policy_actions_logp), 0.0, 2.0
)
logp_ratio = is_ratio * torch.exp(actions_logp - prev_actions_logp)
policy._is_ratio = is_ratio
advantages = vtrace_returns.pg_advantages.to(logp_ratio.device)
surrogate_loss = torch.min(
advantages * logp_ratio,
advantages
* torch.clamp(
logp_ratio,
1 - policy.config["clip_param"],
1 + policy.config["clip_param"],
),
)
mean_kl_loss = reduce_mean_valid(action_kl)
mean_policy_loss = -reduce_mean_valid(surrogate_loss)
# The value function loss.
value_targets = vtrace_returns.vs.to(values_time_major.device)
if drop_last:
delta = values_time_major[:-1] - value_targets
else:
delta = values_time_major - value_targets
mean_vf_loss = 0.5 * reduce_mean_valid(torch.pow(delta, 2.0))
# The entropy loss.
mean_entropy = reduce_mean_valid(
_make_time_major(action_dist.entropy(), drop_last=drop_last)
)
else:
logger.debug("Using PPO surrogate loss (vtrace=False)")
# Prepare KL for Loss
action_kl = _make_time_major(prev_action_dist.kl(action_dist))
actions_logp = _make_time_major(action_dist.logp(actions))
prev_actions_logp = _make_time_major(prev_action_dist.logp(actions))
logp_ratio = torch.exp(actions_logp - prev_actions_logp)
advantages = _make_time_major(train_batch[Postprocessing.ADVANTAGES])
surrogate_loss = torch.min(
advantages * logp_ratio,
advantages
* torch.clamp(
logp_ratio,
1 - policy.config["clip_param"],
1 + policy.config["clip_param"],
),
)
mean_kl_loss = reduce_mean_valid(action_kl)
mean_policy_loss = -reduce_mean_valid(surrogate_loss)
# The value function loss.
value_targets = _make_time_major(train_batch[Postprocessing.VALUE_TARGETS])
delta = values_time_major - value_targets
mean_vf_loss = 0.5 * reduce_mean_valid(torch.pow(delta, 2.0))
# The entropy loss.
mean_entropy = reduce_mean_valid(_make_time_major(action_dist.entropy()))
# The summed weighted loss
total_loss = (
mean_policy_loss
+ mean_vf_loss * policy.config["vf_loss_coeff"]
- mean_entropy * policy.entropy_coeff
)
# Optional additional KL Loss
if policy.config["use_kl_loss"]:
total_loss += policy.kl_coeff * mean_kl_loss
# Store values for stats function in model (tower), such that for
# multi-GPU, we do not override them during the parallel loss phase.
model.tower_stats["total_loss"] = total_loss
model.tower_stats["mean_policy_loss"] = mean_policy_loss
model.tower_stats["mean_kl_loss"] = mean_kl_loss
model.tower_stats["mean_vf_loss"] = mean_vf_loss
model.tower_stats["mean_entropy"] = mean_entropy
model.tower_stats["value_targets"] = value_targets
model.tower_stats["vf_explained_var"] = explained_variance(
torch.reshape(value_targets, [-1]),
torch.reshape(values_time_major[:-1] if drop_last else values_time_major, [-1]),
)
return total_loss
def stats(policy: Policy, train_batch: SampleBatch):
"""Stats function for APPO. Returns a dict with important loss stats.
Args:
policy (Policy): The Policy to generate stats for.
train_batch (SampleBatch): The SampleBatch (already) used for training.
Returns:
Dict[str, TensorType]: The stats dict.
"""
stats_dict = {
"cur_lr": policy.cur_lr,
"total_loss": torch.mean(torch.stack(policy.get_tower_stats("total_loss"))),
"policy_loss": torch.mean(
torch.stack(policy.get_tower_stats("mean_policy_loss"))
),
"entropy": torch.mean(torch.stack(policy.get_tower_stats("mean_entropy"))),
"entropy_coeff": policy.entropy_coeff,
"var_gnorm": global_norm(policy.model.trainable_variables()),
"vf_loss": torch.mean(torch.stack(policy.get_tower_stats("mean_vf_loss"))),
"vf_explained_var": torch.mean(
torch.stack(policy.get_tower_stats("vf_explained_var"))
),
}
if policy.config["vtrace"]:
is_stat_mean = torch.mean(policy._is_ratio, [0, 1])
is_stat_var = torch.var(policy._is_ratio, [0, 1])
stats_dict["mean_IS"] = is_stat_mean
stats_dict["var_IS"] = is_stat_var
if policy.config["use_kl_loss"]:
stats_dict["kl"] = torch.mean(
torch.stack(policy.get_tower_stats("mean_kl_loss"))
)
stats_dict["KL_Coeff"] = policy.kl_coeff
return stats_dict
def add_values(policy, input_dict, state_batches, model, action_dist):
out = {}
if not policy.config["vtrace"]:
out[SampleBatch.VF_PREDS] = model.value_function()
return out
class KLCoeffMixin:
"""Assigns the `update_kl()` method to the PPOPolicy.
This is used in PPO's execution plan (see ppo.py) for updating the KL
coefficient after each learning step based on `config.kl_target` and
the measured KL value (from the train_batch).
"""
def __init__(self, config):
# The current KL value (as python float).
self.kl_coeff = config["kl_coeff"]
# Constant target value.
self.kl_target = config["kl_target"]
def update_kl(self, sampled_kl):
# Update the current KL value based on the recently measured value.
if sampled_kl > 2.0 * self.kl_target:
self.kl_coeff *= 1.5
elif sampled_kl < 0.5 * self.kl_target:
self.kl_coeff *= 0.5
# Return the current KL value.
return self.kl_coeff
def setup_early_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
):
"""Call all mixin classes' constructors before APPOPolicy initialization.
Args:
policy (Policy): The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config (TrainerConfigDict): The Policy's config.
"""
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
EntropyCoeffSchedule.__init__(
policy, config["entropy_coeff"], config["entropy_coeff_schedule"]
)
def setup_late_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
):
"""Call all mixin classes' constructors after APPOPolicy initialization.
Args:
policy (Policy): The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config (TrainerConfigDict): The Policy's config.
"""
KLCoeffMixin.__init__(policy, config)
ValueNetworkMixin.__init__(policy, config)
TargetNetworkMixin.__init__(policy)
# Build a child class of `TorchPolicy`, given the custom functions defined
# above.
AsyncPPOTorchPolicy = build_policy_class(
name="AsyncPPOTorchPolicy",
framework="torch",
loss_fn=appo_surrogate_loss,
stats_fn=stats,
postprocess_fn=postprocess_trajectory,
extra_action_out_fn=add_values,
extra_grad_process_fn=apply_grad_clipping,
optimizer_fn=choose_optimizer,
before_init=setup_early_mixins,
before_loss_init=setup_late_mixins,
make_model=make_appo_model,
mixins=[
LearningRateSchedule,
KLCoeffMixin,
TargetNetworkMixin,
ValueNetworkMixin,
EntropyCoeffSchedule,
],
get_batch_divisibility_req=lambda p: p.config["rollout_fragment_length"],
)
| 35.62406 | 88 | 0.680808 |
795646fbe69e5b82470b6b4974ff0b4cc6b4fe12 | 475 | py | Python | pnp/api/endpoints/__init__.py | HazardDede/pnp | 469ca17254dcca1a4eefe0dc5ac574692a9ab38e | [
"MIT"
] | 4 | 2018-10-07T11:32:00.000Z | 2019-04-23T09:34:23.000Z | pnp/api/endpoints/__init__.py | HazardDede/pnp | 469ca17254dcca1a4eefe0dc5ac574692a9ab38e | [
"MIT"
] | null | null | null | pnp/api/endpoints/__init__.py | HazardDede/pnp | 469ca17254dcca1a4eefe0dc5ac574692a9ab38e | [
"MIT"
] | 1 | 2019-08-12T19:56:10.000Z | 2019-08-12T19:56:10.000Z | """Contains api endpoints."""
from .base import Endpoint
from .catchall_route import CatchAllRoute, CatchAllRequest
from .health import Health
from .log_level import SetLogLevel
from .metrics import PrometheusExporter
from .ping import Ping
from .trigger import Trigger
from .version import Version
__all__ = [
'CatchAllRoute',
'CatchAllRequest',
'Endpoint',
'Health',
'PrometheusExporter',
'Ping',
'SetLogLevel',
'Trigger',
'Version'
]
| 20.652174 | 58 | 0.722105 |
795647b1d2399ed4e9c4d31a26d88c6039744939 | 1,286 | py | Python | zaqar/storage/__init__.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | 1 | 2015-03-22T18:41:13.000Z | 2015-03-22T18:41:13.000Z | zaqar/storage/__init__.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | null | null | null | zaqar/storage/__init__.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zaqar Storage Drivers"""
from zaqar.storage import base
from zaqar.storage import errors # NOQA
# Hoist classes into package namespace
Capabilities = base.Capabilities
ControlDriverBase = base.ControlDriverBase
DataDriverBase = base.DataDriverBase
CatalogueBase = base.CatalogueBase
Claim = base.Claim
Message = base.Message
Queue = base.Queue
Subscription = base.Subscription
PoolsBase = base.PoolsBase
FlavorsBase = base.FlavorsBase
DEFAULT_QUEUES_PER_PAGE = base.DEFAULT_QUEUES_PER_PAGE
DEFAULT_MESSAGES_PER_PAGE = base.DEFAULT_MESSAGES_PER_PAGE
DEFAULT_POOLS_PER_PAGE = base.DEFAULT_POOLS_PER_PAGE
DEFAULT_SUBSCRIPTIONS_PER_PAGE = base.DEFAULT_SUBSCRIPTIONS_PER_PAGE
DEFAULT_MESSAGES_PER_CLAIM = base.DEFAULT_MESSAGES_PER_CLAIM
| 34.756757 | 69 | 0.814152 |
795647c8d857256cfc896f576471ca4292736ff5 | 1,038 | py | Python | euler/solutions/solution_19.py | rlucioni/euler | 7cc61a493e2dbbea73e8fc042918707a3862458b | [
"MIT"
] | 3 | 2017-02-07T08:12:52.000Z | 2020-12-03T11:16:44.000Z | euler/solutions/solution_19.py | rlucioni/euler | 7cc61a493e2dbbea73e8fc042918707a3862458b | [
"MIT"
] | null | null | null | euler/solutions/solution_19.py | rlucioni/euler | 7cc61a493e2dbbea73e8fc042918707a3862458b | [
"MIT"
] | null | null | null | """Counting Sundays
You are given the following information, but you may prefer to do some research for yourself.
- 1 Jan 1900 was a Monday.
- Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
- A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
"""
import calendar
MONTHS = 12
SUNDAY = 6
def get_first_sundays(low_year, high_year):
"""
Find the number of Sundays falling on the first of the month between low_year
and high_year, inclusive.
"""
first_sundays = 0
for year in range(low_year, high_year + 1):
for month in range(1, MONTHS + 1):
first_day, _ = calendar.monthrange(year, month)
if first_day == SUNDAY:
first_sundays += 1
return first_sundays
| 27.315789 | 107 | 0.691715 |
7956491f0a72ec2f8aea3de3d3b2200bdab3f450 | 23,778 | py | Python | deeprobust/graph/utils.py | CrownX/DeepRobust | ede9246b6c882b0ae4ef666516c3e9d8f989299d | [
"MIT"
] | 3 | 2021-01-16T06:20:39.000Z | 2021-01-21T08:21:38.000Z | deeprobust/graph/utils.py | qilong-zhang/DeepRobust | 276a7048aded2cf3a190d3851ffd4587b7d1dd49 | [
"MIT"
] | null | null | null | deeprobust/graph/utils.py | qilong-zhang/DeepRobust | 276a7048aded2cf3a190d3851ffd4587b7d1dd49 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.sparse as sp
import torch
from sklearn.model_selection import train_test_split
import torch.sparse as ts
import torch.nn.functional as F
import warnings
def encode_onehot(labels):
"""Convert label to onehot format.
Parameters
----------
labels : numpy.array
node labels
Returns
-------
numpy.array
onehot labels
"""
eye = np.eye(labels.max() + 1)
onehot_mx = eye[labels]
return onehot_mx
def tensor2onehot(labels):
"""Convert label tensor to label onehot tensor.
Parameters
----------
labels : torch.LongTensor
node labels
Returns
-------
torch.LongTensor
onehot labels tensor
"""
eye = torch.eye(labels.max() + 1)
onehot_mx = eye[labels]
return onehot_mx.to(labels.device)
def preprocess(adj, features, labels, preprocess_adj=False, preprocess_feature=False, sparse=False, device='cpu'):
"""Convert adj, features, labels from array or sparse matrix to
torch Tensor, and normalize the input data.
Parameters
----------
adj : scipy.sparse.csr_matrix
the adjacency matrix.
features : scipy.sparse.csr_matrix
node features
labels : numpy.array
node labels
preprocess_adj : bool
whether to normalize the adjacency matrix
preprocess_feature : bool
whether to normalize the feature matrix
sparse : bool
whether to return sparse tensor
device : str
'cpu' or 'cuda'
"""
if preprocess_adj:
adj_norm = normalize_adj(adj)
if preprocess_feature:
features = normalize_feature(features)
labels = torch.LongTensor(labels)
if sparse:
adj = sparse_mx_to_torch_sparse_tensor(adj)
features = sparse_mx_to_torch_sparse_tensor(features)
else:
features = torch.FloatTensor(np.array(features.todense()))
adj = torch.FloatTensor(adj.todense())
return adj.to(device), features.to(device), labels.to(device)
def to_tensor(adj, features, labels=None, device='cpu'):
"""Convert adj, features, labels from array or sparse matrix to
torch Tensor.
Parameters
----------
adj : scipy.sparse.csr_matrix
the adjacency matrix.
features : scipy.sparse.csr_matrix
node features
labels : numpy.array
node labels
device : str
'cpu' or 'cuda'
"""
if sp.issparse(adj):
adj = sparse_mx_to_torch_sparse_tensor(adj)
else:
adj = torch.FloatTensor(adj)
if sp.issparse(features):
features = sparse_mx_to_torch_sparse_tensor(features)
else:
features = torch.FloatTensor(np.array(features))
if labels is None:
return adj.to(device), features.to(device)
else:
labels = torch.LongTensor(labels)
return adj.to(device), features.to(device), labels.to(device)
def normalize_feature(mx):
"""Row-normalize sparse matrix
Parameters
----------
mx : scipy.sparse.csr_matrix
matrix to be normalized
Returns
-------
scipy.sprase.lil_matrix
normalized matrix
"""
if type(mx) is not sp.lil.lil_matrix:
mx = mx.tolil()
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def normalize_adj(mx):
"""Normalize sparse adjacency matrix,
A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
Row-normalize sparse matrix
Parameters
----------
mx : scipy.sparse.csr_matrix
matrix to be normalized
Returns
-------
scipy.sprase.lil_matrix
normalized matrix
"""
# TODO: maybe using coo format would be better?
if type(mx) is not sp.lil.lil_matrix:
mx = mx.tolil()
if mx[0, 0] == 0 :
mx = mx + sp.eye(mx.shape[0])
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1/2).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
mx = mx.dot(r_mat_inv)
return mx
def normalize_sparse_tensor(adj, fill_value=1):
"""Normalize sparse tensor. Need to import torch_scatter
"""
edge_index = adj._indices()
edge_weight = adj._values()
num_nodes= adj.size(0)
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
from torch_scatter import scatter_add
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
values = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
shape = adj.shape
return torch.sparse.FloatTensor(edge_index, values, shape)
def add_self_loops(edge_index, edge_weight=None, fill_value=1, num_nodes=None):
# num_nodes = maybe_num_nodes(edge_index, num_nodes)
loop_index = torch.arange(0, num_nodes, dtype=torch.long,
device=edge_index.device)
loop_index = loop_index.unsqueeze(0).repeat(2, 1)
if edge_weight is not None:
assert edge_weight.numel() == edge_index.size(1)
loop_weight = edge_weight.new_full((num_nodes, ), fill_value)
edge_weight = torch.cat([edge_weight, loop_weight], dim=0)
edge_index = torch.cat([edge_index, loop_index], dim=1)
return edge_index, edge_weight
def normalize_adj_tensor(adj, sparse=False):
"""Normalize adjacency tensor matrix.
"""
device = torch.device("cuda" if adj.is_cuda else "cpu")
if sparse:
# warnings.warn('If you find the training process is too slow, you can uncomment line 207 in deeprobust/graph/utils.py. Note that you need to install torch_sparse')
# TODO if this is too slow, uncomment the following code,
# but you need to install torch_scatter
# return normalize_sparse_tensor(adj)
adj = to_scipy(adj)
mx = normalize_adj(adj)
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1/2).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
mx = mx @ r_mat_inv
return mx
def degree_normalize_adj(mx):
"""Row-normalize sparse matrix"""
mx = mx.tolil()
if mx[0, 0] == 0 :
mx = mx + sp.eye(mx.shape[0])
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
# mx = mx.dot(r_mat_inv)
mx = r_mat_inv.dot(mx)
return mx
def degree_normalize_sparse_tensor(adj, fill_value=1):
"""degree_normalize_sparse_tensor.
"""
edge_index = adj._indices()
edge_weight = adj._values()
num_nodes= adj.size(0)
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
from torch_scatter import scatter_add
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-1)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
values = deg_inv_sqrt[row] * edge_weight
shape = adj.shape
return torch.sparse.FloatTensor(edge_index, values, shape)
def degree_normalize_adj_tensor(adj, sparse=True):
"""degree_normalize_adj_tensor.
"""
device = torch.device("cuda" if adj.is_cuda else "cpu")
if sparse:
# return degree_normalize_sparse_tensor(adj)
adj = to_scipy(adj)
mx = degree_normalize_adj(adj)
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
return mx
def accuracy(output, labels):
"""Return accuracy of output compared to labels.
Parameters
----------
output : torch.Tensor
output from model
labels : torch.Tensor or numpy.array
node labels
Returns
-------
float
accuracy
"""
if not hasattr(labels, '__len__'):
labels = [labels]
if type(labels) is not torch.Tensor:
labels = torch.LongTensor(labels)
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def loss_acc(output, labels, targets, avg_loss=True):
if type(labels) is not torch.Tensor:
labels = torch.LongTensor(labels)
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()[targets]
loss = F.nll_loss(output[targets], labels[targets], reduction='mean' if avg_loss else 'none')
if avg_loss:
return loss, correct.sum() / len(targets)
return loss, correct
# correct = correct.sum()
# return loss, correct / len(labels)
def classification_margin(output, true_label):
"""Calculate classification margin for outputs.
`probs_true_label - probs_best_second_class`
Parameters
----------
output: torch.Tensor
output vector (1 dimension)
true_label: int
true label for this node
Returns
-------
list
classification margin for this node
"""
probs = torch.exp(output)
probs_true_label = probs[true_label].clone()
probs[true_label] = 0
probs_best_second_class = probs[probs.argmax()]
return (probs_true_label - probs_best_second_class).item()
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
sparserow=torch.LongTensor(sparse_mx.row).unsqueeze(1)
sparsecol=torch.LongTensor(sparse_mx.col).unsqueeze(1)
sparseconcat=torch.cat((sparserow, sparsecol),1)
sparsedata=torch.FloatTensor(sparse_mx.data)
return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape))
# slower version....
# sparse_mx = sparse_mx.tocoo().astype(np.float32)
# indices = torch.from_numpy(
# np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
# values = torch.from_numpy(sparse_mx.data)
# shape = torch.Size(sparse_mx.shape)
# return torch.sparse.FloatTensor(indices, values, shape)
def to_scipy(tensor):
"""Convert a dense/sparse tensor to scipy matrix"""
if is_sparse_tensor(tensor):
values = tensor._values()
indices = tensor._indices()
return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
else:
indices = tensor.nonzero().t()
values = tensor[indices[0], indices[1]]
return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
def is_sparse_tensor(tensor):
"""Check if a tensor is sparse tensor.
Parameters
----------
tensor : torch.Tensor
given tensor
Returns
-------
bool
whether a tensor is sparse tensor
"""
# if hasattr(tensor, 'nnz'):
if tensor.layout == torch.sparse_coo:
return True
else:
return False
def get_train_val_test(nnodes, val_size=0.1, test_size=0.8, stratify=None, seed=None):
"""This setting follows nettack/mettack, where we split the nodes
into 10% training, 10% validation and 80% testing data
Parameters
----------
nnodes : int
number of nodes in total
val_size : float
size of validation set
test_size : float
size of test set
stratify :
data is expected to split in a stratified fashion. So stratify should be labels.
seed : int or None
random seed
Returns
-------
idx_train :
node training indices
idx_val :
node validation indices
idx_test :
node test indices
"""
assert stratify is not None, 'stratify cannot be None!'
if seed is not None:
np.random.seed(seed)
idx = np.arange(nnodes)
train_size = 1 - val_size - test_size
idx_train_and_val, idx_test = train_test_split(idx,
random_state=None,
train_size=train_size + val_size,
test_size=test_size,
stratify=stratify)
if stratify is not None:
stratify = stratify[idx_train_and_val]
idx_train, idx_val = train_test_split(idx_train_and_val,
random_state=None,
train_size=(train_size / (train_size + val_size)),
test_size=(val_size / (train_size + val_size)),
stratify=stratify)
return idx_train, idx_val, idx_test
def get_train_test(nnodes, test_size=0.8, stratify=None, seed=None):
"""This function returns training and test set without validation.
It can be used for settings of different label rates.
Parameters
----------
nnodes : int
number of nodes in total
test_size : float
size of test set
stratify :
data is expected to split in a stratified fashion. So stratify should be labels.
seed : int or None
random seed
Returns
-------
idx_train :
node training indices
idx_test :
node test indices
"""
assert stratify is not None, 'stratify cannot be None!'
if seed is not None:
np.random.seed(seed)
idx = np.arange(nnodes)
train_size = 1 - test_size
idx_train, idx_test = train_test_split(idx, random_state=None,
train_size=train_size,
test_size=test_size,
stratify=stratify)
return idx_train, idx_test
def get_train_val_test_gcn(labels, seed=None):
"""This setting follows gcn, where we randomly sample 20 instances for each class
as training data, 500 instances as validation data, 1000 instances as test data.
Note here we are not using fixed splits. When random seed changes, the splits
will also change.
Parameters
----------
labels : numpy.array
node labels
seed : int or None
random seed
Returns
-------
idx_train :
node training indices
idx_val :
node validation indices
idx_test :
node test indices
"""
if seed is not None:
np.random.seed(seed)
idx = np.arange(len(labels))
nclass = labels.max() + 1
idx_train = []
idx_unlabeled = []
for i in range(nclass):
labels_i = idx[labels==i]
labels_i = np.random.permutation(labels_i)
idx_train = np.hstack((idx_train, labels_i[: 20])).astype(np.int)
idx_unlabeled = np.hstack((idx_unlabeled, labels_i[20: ])).astype(np.int)
idx_unlabeled = np.random.permutation(idx_unlabeled)
idx_val = idx_unlabeled[: 500]
idx_test = idx_unlabeled[500: 1500]
return idx_train, idx_val, idx_test
def get_train_test_labelrate(labels, label_rate):
"""Get train test according to given label rate.
"""
nclass = labels.max() + 1
train_size = int(round(len(labels) * label_rate / nclass))
print("=== train_size = %s ===" % train_size)
idx_train, idx_val, idx_test = get_splits_each_class(labels, train_size=train_size)
return idx_train, idx_test
def get_splits_each_class(labels, train_size):
"""We randomly sample n instances for class, where n = train_size.
"""
idx = np.arange(len(labels))
nclass = labels.max() + 1
idx_train = []
idx_val = []
idx_test = []
for i in range(nclass):
labels_i = idx[labels==i]
labels_i = np.random.permutation(labels_i)
idx_train = np.hstack((idx_train, labels_i[: train_size])).astype(np.int)
idx_val = np.hstack((idx_val, labels_i[train_size: 2*train_size])).astype(np.int)
idx_test = np.hstack((idx_test, labels_i[2*train_size: ])).astype(np.int)
return np.random.permutation(idx_train), np.random.permutation(idx_val), \
np.random.permutation(idx_test)
def unravel_index(index, array_shape):
rows = index // array_shape[1]
cols = index % array_shape[1]
return rows, cols
def get_degree_squence(adj):
try:
return adj.sum(0)
except:
return ts.sum(adj, dim=1).to_dense()
def likelihood_ratio_filter(node_pairs, modified_adjacency, original_adjacency, d_min, threshold=0.004):
"""
Filter the input node pairs based on the likelihood ratio test proposed by Zügner et al. 2018, see
https://dl.acm.org/citation.cfm?id=3220078. In essence, for each node pair return 1 if adding/removing the edge
between the two nodes does not violate the unnoticeability constraint, and return 0 otherwise. Assumes unweighted
and undirected graphs.
"""
N = int(modified_adjacency.shape[0])
# original_degree_sequence = get_degree_squence(original_adjacency)
# current_degree_sequence = get_degree_squence(modified_adjacency)
original_degree_sequence = original_adjacency.sum(0)
current_degree_sequence = modified_adjacency.sum(0)
concat_degree_sequence = torch.cat((current_degree_sequence, original_degree_sequence))
# Compute the log likelihood values of the original, modified, and combined degree sequences.
ll_orig, alpha_orig, n_orig, sum_log_degrees_original = degree_sequence_log_likelihood(original_degree_sequence, d_min)
ll_current, alpha_current, n_current, sum_log_degrees_current = degree_sequence_log_likelihood(current_degree_sequence, d_min)
ll_comb, alpha_comb, n_comb, sum_log_degrees_combined = degree_sequence_log_likelihood(concat_degree_sequence, d_min)
# Compute the log likelihood ratio
current_ratio = -2 * ll_comb + 2 * (ll_orig + ll_current)
# Compute new log likelihood values that would arise if we add/remove the edges corresponding to each node pair.
new_lls, new_alphas, new_ns, new_sum_log_degrees = updated_log_likelihood_for_edge_changes(node_pairs,
modified_adjacency, d_min)
# Combination of the original degree distribution with the distributions corresponding to each node pair.
n_combined = n_orig + new_ns
new_sum_log_degrees_combined = sum_log_degrees_original + new_sum_log_degrees
alpha_combined = compute_alpha(n_combined, new_sum_log_degrees_combined, d_min)
new_ll_combined = compute_log_likelihood(n_combined, alpha_combined, new_sum_log_degrees_combined, d_min)
new_ratios = -2 * new_ll_combined + 2 * (new_lls + ll_orig)
# Allowed edges are only those for which the resulting likelihood ratio measure is < than the threshold
allowed_edges = new_ratios < threshold
if allowed_edges.is_cuda:
filtered_edges = node_pairs[allowed_edges.cpu().numpy().astype(np.bool)]
else:
filtered_edges = node_pairs[allowed_edges.numpy().astype(np.bool)]
allowed_mask = torch.zeros(modified_adjacency.shape)
allowed_mask[filtered_edges.T] = 1
allowed_mask += allowed_mask.t()
return allowed_mask, current_ratio
def degree_sequence_log_likelihood(degree_sequence, d_min):
"""
Compute the (maximum) log likelihood of the Powerlaw distribution fit on a degree distribution.
"""
# Determine which degrees are to be considered, i.e. >= d_min.
D_G = degree_sequence[(degree_sequence >= d_min.item())]
try:
sum_log_degrees = torch.log(D_G).sum()
except:
sum_log_degrees = np.log(D_G).sum()
n = len(D_G)
alpha = compute_alpha(n, sum_log_degrees, d_min)
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
return ll, alpha, n, sum_log_degrees
def updated_log_likelihood_for_edge_changes(node_pairs, adjacency_matrix, d_min):
""" Adopted from https://github.com/danielzuegner/nettack
"""
# For each node pair find out whether there is an edge or not in the input adjacency matrix.
edge_entries_before = adjacency_matrix[node_pairs.T]
degree_sequence = adjacency_matrix.sum(1)
D_G = degree_sequence[degree_sequence >= d_min.item()]
sum_log_degrees = torch.log(D_G).sum()
n = len(D_G)
deltas = -2 * edge_entries_before + 1
d_edges_before = degree_sequence[node_pairs]
d_edges_after = degree_sequence[node_pairs] + deltas[:, None]
# Sum the log of the degrees after the potential changes which are >= d_min
sum_log_degrees_after, new_n = update_sum_log_degrees(sum_log_degrees, n, d_edges_before, d_edges_after, d_min)
# Updated estimates of the Powerlaw exponents
new_alpha = compute_alpha(new_n, sum_log_degrees_after, d_min)
# Updated log likelihood values for the Powerlaw distributions
new_ll = compute_log_likelihood(new_n, new_alpha, sum_log_degrees_after, d_min)
return new_ll, new_alpha, new_n, sum_log_degrees_after
def update_sum_log_degrees(sum_log_degrees_before, n_old, d_old, d_new, d_min):
# Find out whether the degrees before and after the change are above the threshold d_min.
old_in_range = d_old >= d_min
new_in_range = d_new >= d_min
d_old_in_range = d_old * old_in_range.float()
d_new_in_range = d_new * new_in_range.float()
# Update the sum by subtracting the old values and then adding the updated logs of the degrees.
sum_log_degrees_after = sum_log_degrees_before - (torch.log(torch.clamp(d_old_in_range, min=1))).sum(1) \
+ (torch.log(torch.clamp(d_new_in_range, min=1))).sum(1)
# Update the number of degrees >= d_min
new_n = n_old - (old_in_range!=0).sum(1) + (new_in_range!=0).sum(1)
new_n = new_n.float()
return sum_log_degrees_after, new_n
def compute_alpha(n, sum_log_degrees, d_min):
try:
alpha = 1 + n / (sum_log_degrees - n * torch.log(d_min - 0.5))
except:
alpha = 1 + n / (sum_log_degrees - n * np.log(d_min - 0.5))
return alpha
def compute_log_likelihood(n, alpha, sum_log_degrees, d_min):
# Log likelihood under alpha
try:
ll = n * torch.log(alpha) + n * alpha * torch.log(d_min) + (alpha + 1) * sum_log_degrees
except:
ll = n * np.log(alpha) + n * alpha * np.log(d_min) + (alpha + 1) * sum_log_degrees
return ll
def ravel_multiple_indices(ixs, shape, reverse=False):
"""
"Flattens" multiple 2D input indices into indices on the flattened matrix, similar to np.ravel_multi_index.
Does the same as ravel_index but for multiple indices at once.
Parameters
----------
ixs: array of ints shape (n, 2)
The array of n indices that will be flattened.
shape: list or tuple of ints of length 2
The shape of the corresponding matrix.
Returns
-------
array of n ints between 0 and shape[0]*shape[1]-1
The indices on the flattened matrix corresponding to the 2D input indices.
"""
if reverse:
return ixs[:, 1] * shape[1] + ixs[:, 0]
return ixs[:, 0] * shape[1] + ixs[:, 1]
def visualize(your_var):
"""visualize computation graph"""
from graphviz import Digraph
import torch
from torch.autograd import Variable
from torchviz import make_dot
make_dot(your_var).view()
def reshape_mx(mx, shape):
indices = mx.nonzero()
return sp.csr_matrix((mx.data, (indices[0], indices[1])), shape=shape)
# def check_path(file_path):
# if not osp.exists(file_path):
# os.system(f'mkdir -p {file_path}')
| 33.396067 | 172 | 0.65527 |
79564959a69e4e77bf60a96d35ef4e056e629cb0 | 51 | py | Python | bcdc_apitests/config/__init__.py | franTarkenton/bcdc_tests | 7c2280e2e1dc8b6c1314638c1839985c26cbb082 | [
"Apache-2.0"
] | 1 | 2019-11-04T16:35:53.000Z | 2019-11-04T16:35:53.000Z | bcdc_apitests/config/__init__.py | franTarkenton/bcdc_tests | 7c2280e2e1dc8b6c1314638c1839985c26cbb082 | [
"Apache-2.0"
] | 14 | 2019-06-18T23:40:59.000Z | 2022-01-10T21:53:06.000Z | bcdc_apitests/config/__init__.py | franTarkenton/bcdc_tests | 7c2280e2e1dc8b6c1314638c1839985c26cbb082 | [
"Apache-2.0"
] | 4 | 2019-05-23T18:59:29.000Z | 2019-06-10T22:34:57.000Z | '''
Created on Jul. 4, 2019
@author: kjnether
'''
| 8.5 | 23 | 0.607843 |
7956495a89514e673a61e55a28dc2fdfc106061b | 7,336 | py | Python | webapp/graphite/metrics/hypertable_search.py | foursquare/graphite | bd9a1ff71c76f109cc9bf6232378f8ce17108445 | [
"Apache-2.0"
] | 1 | 2015-04-09T19:05:51.000Z | 2015-04-09T19:05:51.000Z | webapp/graphite/metrics/hypertable_search.py | foursquare/graphite | bd9a1ff71c76f109cc9bf6232378f8ce17108445 | [
"Apache-2.0"
] | null | null | null | webapp/graphite/metrics/hypertable_search.py | foursquare/graphite | bd9a1ff71c76f109cc9bf6232378f8ce17108445 | [
"Apache-2.0"
] | null | null | null | import sys
import time
import subprocess
import os.path
from django.conf import settings
from hyperthrift.gen.ttypes import ScanSpec
from graphite.hypertable_client import removePrefix, addPrefix
from graphite.storage import _deduplicate, is_pattern
from graphite.logger import log
import re
import fnmatch
from graphite.hypertable_client import HyperTablePool
EXPANDABLE_PATH_RE = re.compile('.*[\*{}\[\]]+.*')
def regexifyPathExpr(pathExpr):
return pathExpr.replace('+', '\\+').replace('.', '\\.').replace('*', '[^\.]+')
CACHE_CHECK_INTERVAL_SECS = 300
class HyperIndex:
def __init__(self):
self.index_path = settings.INDEX_FILE + 'ht'
self.last_atime = 0
self.every_metric = ''
self.tree = ({}, {})
log.info("[HyperIndex] performing initial index load")
self._loadFromFile()
self._loadFromHyperTable()
def _loadFromFile(self):
if os.path.exists(self.index_path):
s = time.time()
fh = open(self.index_path)
has_lines = False
for l in fh:
if l.strip():
self._add(l.strip())
has_lines = True
fh.close()
if has_lines:
self.last_atime = int(os.path.getmtime(self.index_path)) * 10**9L
log.info("[HyperIndex] initial load took %.6f seconds" % (time.time() - s))
def _loadFromHyperTable(self):
fh = None
# if the index_path exists, update it
if os.path.exists(self.index_path):
fh = open(self.index_path, 'a')
spec = ScanSpec(start_time=self.last_atime, versions=1)
s = time.time()
self.last_atime = int(s) * 10**9L
metrics = []
def processResult(key, family, column, val, ts):
if not self._existsInTree(key):
if fh:
fh.write(key + '\n')
self._add(key, val)
HyperTablePool.doScan(spec, "search", processResult)
if fh:
fh.close()
log.info("[HyperIndex] index reload took %.6f seconds" % (time.time() - s))
# like find in tree, for exact matches.
# for index dup checking
def _existsInTree(self, key):
branches = key.split('.')
cursor = self.tree
leaf = branches.pop()
for branch in branches:
if branch not in cursor[1]:
return False
if leaf in cursor[0]:
return True
else:
return False
def _add(self, key, val):
realval = None
try:
if val != '':
realval = int(val)
except:
log.warning('unexpected val for %s: %s' % (key, val))
branches = key.split('.')
cursor = self.tree
leaf = branches.pop()
for branch in branches:
if branch not in cursor[1]:
cursor[1][branch] = ({}, {}) # (leaves, children)
cursor = cursor[1][branch]
cursor[0][leaf] = realval # add leaf
def _getMatches(self, haystack_dict, needle):
if type(needle) is list: # patterns, variants
entries = haystack_dict.keys()
matches = []
for variant in needle:
matches.extend(fnmatch.filter(entries, variant))
return sorted(list(_deduplicate(matches)))
else:
if needle in haystack_dict:
return [needle]
else:
return[]
# splits the key by '.', exact parts are strings, patterns are lists
def _split(self, key):
parts = key.split('.')
for i in range (0, len(parts)):
if is_pattern(parts[i]):
parts[i] = self._variants(parts[i])
return parts
# computes variants in a pathExpr
def _variants(self, pattern):
v1, v2 = pattern.find('{'), pattern.find('}')
if v1 > -1 and v2 > v1:
variations = pattern[v1+1:v2].split(',')
variants = [ pattern[:v1] + v + pattern[v2+1:] for v in variations ]
return variants
return [pattern]
def _findInTree(self, cursor, keyparts, leaf_matches_leaves=True, leaf_matches_branches=False):
if not keyparts:
return []
#print keyparts
part = keyparts.pop()
if len(keyparts) == 0: #leaf
res = []
if leaf_matches_leaves:
res.extend([([e], True, cursor[0][e]) for e in self._getMatches(cursor[0], part)])
if leaf_matches_branches:
res.extend([([e], False, None) for e in self._getMatches(cursor[1], part)])
#print res
return res
else:
results = []
for match in self._getMatches(cursor[1], part):
#print match
postfixes = self._findInTree(cursor[1][match], keyparts[:], leaf_matches_leaves, leaf_matches_branches)
for postfix in postfixes:
postfix[0].append(match)
results.append(postfix)
return results
def findInTree(self, pathExpr, leaf_matches_leaves=True, leaf_matches_branches=False):
if int(time.time()) * 10**9L - self.last_atime > CACHE_CHECK_INTERVAL_SECS * 10**9L:
self._loadFromHyperTable()
s = time.time()
parts = self._split(pathExpr)
parts.reverse() #keyparts is reversed, because pop is fast
res = self._findInTree(self.tree, parts, leaf_matches_leaves, leaf_matches_branches)
nodes = [HyperNode('.'.join(reversed(x[0])), x[1], x[2]) for x in res]
log.info("[HyperIndex] search for %s took %.6f seconds" % (pathExpr, time.time() - s))
return nodes
# only returns metrics
def findMetric(self, pathExpr):
return [(node.metric_path, node.value) for node in self.findInTree(pathExpr)]
# returns HyperNodes which could be metrics, or subfolders
def find(self, pathExpr):
return self.findInTree(pathExpr, True, True)
# weird format for seemingly deprecated metrics/search endpoint
def search(self, query, max_results=None, keep_query_pattern=False):
count = 0
for node in self.find(query):
if max_results != None and count >= max_results:
return
yield { 'path': node.metric_path, 'is_leaf': node.isLeaf() }
count += 1
class HyperStore:
def find(self, pathExpr):
pathExpr = pathExpr
log.info('searching for: %s' % pathExpr)
if EXPANDABLE_PATH_RE.match(pathExpr):
regex = regexifyPathExpr(pathExpr)
where = 'ROW REGEXP "%s"' % regex
starIndex = pathExpr.find('*')
if starIndex > 0:
where += ' AND ROW =^ "%s"' % pathExpr[0:starIndex]
log.info('where: %s' % where)
return [removePrefix(p) for p in self.findHelper(where)]
else:
return [removePrefix(pathExpr)]
def search(self, query):
qre = '(?i)%s' % re.sub('\*', '.*', re.sub('\.', '\.', query))
return [removePrefix(p) for p in self.findByRegex(qre)]
def findByRegex(self, regex):
where = 'ROW REGEXP "%s"' % regex
return self.findHelper(where)
def findHelper(self, where):
query = 'SELECT * FROM search WHERE %s' % (where)
metrics = []
def processResult(key, family, column, val, ts):
metrics.append(key)
HyperTablePool.doQuery(query, processResult)
return metrics
class HyperNode:
context = {}
def __init__(self, metric_path, isLeaf, value):
self.metric_path = metric_path
self.real_metric = metric_path
self.name = metric_path.split('.')[-1]
self.__isLeaf = isLeaf
self.value = value
def isLeaf(self):
return self.__isLeaf
def __repr__(self):
return 'HyperNode(%s, %s)' % (self.metric_path, self.isLeaf())
hypertable_index = HyperIndex()
| 31.350427 | 196 | 0.623637 |
7956496e55e4e2a09378fdbde3b76bbb7035a76f | 2,826 | py | Python | venv/lib/python3.8/site-packages/numpy/distutils/__config__.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 4 | 2021-03-29T19:15:29.000Z | 2021-06-08T05:34:00.000Z | venv/lib/python3.8/site-packages/numpy/distutils/__config__.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 3 | 2021-08-30T16:30:50.000Z | 2022-03-01T23:15:44.000Z | venv/lib/python3.8/site-packages/numpy/distutils/__config__.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 3 | 2021-03-28T16:13:00.000Z | 2021-07-16T10:27:25.000Z | # This file is generated by numpy's setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
if sys.version_info >= (3, 8):
os.add_dll_directory(extra_dll_dir)
else:
os.environ.setdefault('PATH', '')
os.environ['PATH'] += os.pathsep + extra_dll_dir
blas_mkl_info={}
blis_info={}
openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
lapack_mkl_info={}
openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
"""
Show libraries in the system on which NumPy was built.
Print information about various resources (libraries, library
directories, include directories, etc.) in the system on which
NumPy was built.
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
Classes specifying the information to be printed are defined
in the `numpy.distutils.system_info` module.
Information may include:
* ``language``: language used to write the libraries (mostly
C or f77)
* ``libraries``: names of libraries found in the system
* ``library_dirs``: directories containing the libraries
* ``include_dirs``: directories containing library header files
* ``src_dirs``: directories containing library source files
* ``define_macros``: preprocessor macros used by
``distutils.setup``
Examples
--------
>>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
define_macros = [('HAVE_CBLAS', None)]
libraries = ['openblas', 'openblas']
library_dirs = ['/usr/local/lib']
"""
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| 35.772152 | 154 | 0.621019 |
79564a01fe843b5c3f7ae7630448555d77be2d45 | 1,461 | py | Python | pylxd/deprecated/certificate.py | AdamIsrael/pylxd | d5d47a4d1185b4956e997d70e09d649ea73ba26b | [
"Apache-2.0"
] | null | null | null | pylxd/deprecated/certificate.py | AdamIsrael/pylxd | d5d47a4d1185b4956e997d70e09d649ea73ba26b | [
"Apache-2.0"
] | 1 | 2018-04-21T16:31:29.000Z | 2018-04-21T16:31:29.000Z | pylxd/deprecated/certificate.py | AdamIsrael/pylxd | d5d47a4d1185b4956e997d70e09d649ea73ba26b | [
"Apache-2.0"
] | 1 | 2021-08-16T15:00:35.000Z | 2021-08-16T15:00:35.000Z | # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from pylxd.deprecated import base
class LXDCertificate(base.LXDBase):
def certificate_list(self):
(state, data) = self.connection.get_object('GET', '/1.0/certificates')
return [certificate.split('/1.0/certificates/')[-1]
for certificate in data['metadata']]
def certificate_show(self, fingerprint):
return self.connection.get_object('GET', '/1.0/certificates/%s'
% fingerprint)
def certificate_create(self, certificate):
return self.connection.get_status('POST', '/1.0/certificates',
json.dumps(certificate))
def certificate_delete(self, fingerprint):
return self.connection.get_status('DELETE', '/1.0/certificates/%s'
% fingerprint)
| 38.447368 | 78 | 0.646133 |
79564cddfe2c14ef84ee993d3d19cf794ae8ab87 | 644 | py | Python | src/production-manage.py | gravitationalwavedc/gwlab_cwfollowup | 7bc9ab125c8336f5ac86ca24c24d02001e918b5e | [
"MIT"
] | null | null | null | src/production-manage.py | gravitationalwavedc/gwlab_cwfollowup | 7bc9ab125c8336f5ac86ca24c24d02001e918b5e | [
"MIT"
] | null | null | null | src/production-manage.py | gravitationalwavedc/gwlab_cwfollowup | 7bc9ab125c8336f5ac86ca24c24d02001e918b5e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gw_cwfollowup.production-settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.272727 | 88 | 0.689441 |
79564d4b28bf06797eff2802708567952f76212b | 49,293 | py | Python | c_statements.py | Naras/programming_language_transformer | a30629751bd177c30e1de3a4ca5fad8869226bb4 | [
"MIT"
] | null | null | null | c_statements.py | Naras/programming_language_transformer | a30629751bd177c30e1de3a4ca5fad8869226bb4 | [
"MIT"
] | null | null | null | c_statements.py | Naras/programming_language_transformer | a30629751bd177c30e1de3a4ca5fad8869226bb4 | [
"MIT"
] | null | null | null | import re
stmt_comment = "if(tvibptr->stype =='1'){ /* blah \n bleh */;\nyes=findverb;}\n/* foo */\n"
# stmt_comment = "if(tvibptr->stype =='1'){ /* blah */yes=findverb;}/* */"
# stmt_comment = "if(tvibptr->stype =='1') yes=findverb;"
stmt_var_decl_initialized = "int yes=0,success=1;char t='ty'"
stmt_assignment = "choice = (3 + 4 * 8 % 3) / 7;\n a=b; type[i]=words[3][0]; type[a][b]=words[c][d][e]; // rest of line a comment "
stmt_func_decl_default = "int gcd(unsigned char u, int v)\n{ if(v==2) return u - v * w;}fdisp(FILE *sfp,VIBAK *dvibptr,DISP_ARTH *start,unsigned char voice[]);"
stmt_func_decl_complex = "int gcd(int u, int v){ if(v==k) return u * v/(w+r); else return gcd(v, v + (u-v)/(v-u));}; empty_return(int y) return; unsigned char * asrPhoneticJoin( char *);"
stmt_func_decl_complex1 = "int choice(char type,unsigned char *word,unsigned char voice[], unsigned char vice[ik-k],int pos,VIBAK *tvibptr,FILE *afp,long fl,unsigned char *VerbMean)"
stmt_func_decl_complex2 = "int choice(int type, unsigned char *word){if(stype!='kartari') {choice = (3 + 4 * 8) / 7; blah = gcd->yt - rt->uy} else choice = rt->uy;}"
stmt_func_def_complex1 = "int choice(char type,unsigned char *word,unsigned char voice[],int pos,VIBAK *tvibptr,FILE *afp,long fl,unsigned char *VerbMean){" \
"int yes=0,success=1;" \
"if((tvibptr->stype =='1' && strcmp(tvibptr->specf,'dative')==0 ) || tvibptr->stype =='5' || tvibptr->stype=='2'|| tvibptr->stype=='4')" \
" {/* Check for case where there is only a single meaning for ¸ÂÝÏèÂÜ ÔÛ˳èÂÛ */" \
"yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean);}return success;}"
stmt_func_def_complex2 = "int choice(char type,unsigned char *word,unsigned char voice[],int pos,VIBAK *tvibptr,FILE *afp,long fl,unsigned char *VerbMean){while(1){" \
"if((tvibptr->stype =='1' && strcmp(tvibptr->specf,'dative')==0 ) || tvibptr->stype =='5' || tvibptr->stype=='2'|| tvibptr->stype=='4'){" \
"/* Check for case where there is only a single meaning for ¸ÂÝÏèÂÜ ÔÛ˳èÂÛ */" \
"yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean);if(tvibptr->stype=='2' && tvibptr->matnoun !=1 )" \
"{switch(tvibptr->spos){" \
"case 0:if(tvibptr->semlinga==0)strcat(tvibptr->arthaword,'×Ú ');if(tvibptr->semlinga==1)strcat(tvibptr->arthaword,'×£ ');if(tvibptr->semlinga==2)strcat(tvibptr->arthaword,'Âè ');break;" \
"case 1:strcat(tvibptr->arthaword,'ÂÆèÆÛÖè¾³ÏèÌÂÚÆÛÏÞȳ ');break;" \
"case 2:strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏÁÂÚÆÛÏÞȳ ');break;" \
"case 3:strcat(tvibptr->arthaword,'ÆÛÖè¾×ÌèÈèÏÄÚÆÂÚÆÛÏÞȳ ');break;" \
"case 4:strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÈÚÄÚÆÂÚÆÛÏÞȳ ');break;" \
"case 5:strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÅÛ³ÏÁÂÚÆÛÏÞȳ ');break;}}" \
"if(tvibptr->stype == '2' || tvibptr->stype =='4' || tvibptr->stype=='5')success= 0;}if(tvibptr->stype =='1' && (strcmpi(tvibptr->specf,'object')==0)){" \
" /* Check for case where there is only a single meaning for ÄèÔÛÂÜÍÚ ÔÛ˳èÂÛ */" \
"yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean);}" \
"/* If not in above case following steps lead to menu display for selection based on type of vibhakti */ " \
"if(tvibptr->stype =='1') { switch(tvibptr->spos) {" \
"case 0:if(strcmpi(voice,'kartari') ==0)strcpy(tvibptr->arthaword,tvibptr->sword);if(strcmpi(voice,'karmani') ==0){strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏèÂßÂÚÆÛÏÞȳ ');}break;" \
"case 1:if(strcmpi(voice,'kartari') ==0){strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏèÌÂÚÆÛÏÞȳ ');}if(strcmpi(voice,'karmani') ==0){strcpy(tvibptr->arthaword,tvibptr->sword);}break; " \
"case 2:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏÁÂÚÆÛÏÞȳ ');break; " \
"case 3:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾×ÌèÈèÏÄÚÆÂÚÆÛÏÞȳ ');break; " \
"case 4:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÈÚÄÚÆÂÚÆÛÏÞȳ ');break; " \
"case 6:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'×ÌèÊÆèÅÛ ');break; " \
"case 5:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÅÛ³ÏÁÂÚÆÛÏÞȳ ');break;} } " \
"if (tvibptr->next != NULL)tvibptr=tvibptr->next; else break;}return success;}"
stmt_assignment_func = 'choice = strcmpi(voice,"karmani")==0'
stmt_if_assign = 'if(a==0 && a == b || strcmp(temp->Type,"Noun")==0) choice = rt->uy;'
stmt_if_assign2 = 'if(a==0 && a == b || strcmp(temp->Type,"Noun")==0) choice = rt->uy;' \
'if(strcmp(temp->Type,"Noun")==0 && strcmp(temp->specf,"Subject")==0 && temp->subinsen==0){Assignlingavib(drecord);break;}' \
'if(temp->next != NULL)temp=temp->next;else break;'
stmt_if_assign3 = 'if(a==b){Assignlingavib(drecord);break};\nelse temp=temp->next;'
stmt_if_assign4 = 'if(strcmp(word,list1[i])==0) {if(linga==0) strcpy(message,"×ÌÔáÂÚ"); ' \
'if(linga==1) strcpy(message,"×ÌÔá£");if(linga==2) strcpy(message,"×ÌÔá¢");' \
'strcpy(vword,tvibptr->bword); strcat(vword,message); strcpy(tvibptr->arthaword,vword);return 1; } '
stmt_strcmp_cpy_cat = 'if(strcmpi(voice,"karmani") ==0) \
{ \
strcpy(tvibptr->arthaword,tvibptr->bword); \
strcat(tvibptr->arthaword,"ÆÛÖè¾³ÏèÂßÂÚÆÛÏÞȳ ");if(strcmpi(voice,"karmani") ==0) strcpy(tvibptr->arthaword,tvibptr->bword);}'
stmt_switch_case = 'switch(spos) { case 0: choice = 3; bb=cc; break; case "1": i = 1; break; default: j = "ikh"}'
stmt_switch_case1 = 'switch(spos) { case 0: case "1": i = 1; break; case 3: kk == mm; gg = 99; default: j = "ikh"}' #
stmt_switch_case2 = 'switch(tvibptr->spos) {case 0:if(strcmpi(voice,"kartari") ==0) strcpy(tvibptr->arthaword,tvibptr->sword);' \
'if(strcmpi(voice,"karmani") ==0) strcpy(tvibptr->arthaword,tvibptr->sword);' \
'case "1": if(strcmpi(voice,"karmani") ==0)strcpy(tvibptr->arthaword,tvibptr->sword); break; case 3: j = "ikh"}'
stmt_switch_case22 = 'switch(tvibptr->spos) {case 0:i = 1; break; case "1": choice = 3; break; case 3: j = "ikh"}'
stmt_switch_case3 = 'switch(tvibptr->spos) {' \
'case 0:if(strcmpi(voice,"kartari") ==0)strcpy(tvibptr->arthaword,tvibptr->sword);' \
'if(strcmpi(voice,"karmani") ==0){strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,"ÆÛÖè¾³ÏèÂßÂÚÆÛÏÞȳ ");}break;' \
'case 1:if(strcmpi(voice,"kartari") ==0){strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,"ÆÛÖè¾³ÏèÌÂÚÆÛÏÞȳ ");}' \
'if(strcmpi(voice,"karmani") ==0){strcpy(tvibptr->arthaword,tvibptr->sword);}break;' \
' case 2:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,"ÆÛÖè¾³ÏÁÂÚÆÛÏÞȳ ");break;' \
' case 3:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,"ÆÛÖè¾×ÌèÈèÏÄÚÆÂÚÆÛÏÞȳ ");break;' \
' case 4:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,"ÆÛÖè¾ÚÈÚÄÚÆÂÚÆÛÏÞȳ ");break; ' \
'case 6:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,"×ÌèÊÆèÅÛ ");break; ' \
'case 5:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,"ÆÛÖè¾ÚÅÛ³ÏÁÂÚÆÛÏÞȳ ");break;' \
'}'
stmt_while = 'while(1){if(strcmp(temp->Type,"Noun")==0 && strcmp(temp->specf,"Subject")==0 && temp->subinsen==0){Assignlingavib(drecord);break;}if(temp->next != NULL)temp=temp->next;else break;}'
stmt_if_while_complex1 = "if((tvibptr->stype =='1' && strcmp(tvibptr->specf,'dative')==0 ) || tvibptr->stype =='5' || tvibptr->stype=='2'|| tvibptr->stype=='4')" \
" {/* Check for case where there is only a single meaning for ¸ÂÝÏèÂÜ ÔÛ˳èÂÛ */" \
"yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean);}"
stmt_while_complex2 = "while(1) { " \
"if((tvibptr->stype =='1' && strcmp(tvibptr->specf,'dative')==0 ) || tvibptr->stype =='5' || tvibptr->stype=='2'|| tvibptr->stype=='4') " \
"{ /* Check for case where there is only a single meaning for ¸ÂÝÏèÂÜ ÔÛ˳èÂÛ */ " \
"yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean); " \
"if(tvibptr->stype=='2' && tvibptr->matnoun !=1 ) {" \
" switch(tvibptr->spos) {" \
" case 0: if(tvibptr->semlinga==0) strcat(tvibptr->arthaword,'×Ú ');" \
" if(tvibptr->semlinga==1) strcat(tvibptr->arthaword,'×£ ');" \
" if(tvibptr->semlinga==2) strcat(tvibptr->arthaword,'Âè '); break; " \
"case 1: strcat(tvibptr->arthaword,'ÂÆèÆÛÖè¾³ÏèÌÂÚÆÛÏÞȳ '); break; " \
"case 2: strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏÁÂÚÆÛÏÞȳ '); break; " \
"case 3: strcat(tvibptr->arthaword,'ÆÛÖè¾×ÌèÈèÏÄÚÆÂÚÆÛÏÞȳ '); break; " \
"case 4: strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÈÚÄÚÆÂÚÆÛÏÞȳ '); break; " \
"case 5: strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÅÛ³ÏÁÂÚÆÛÏÞȳ '); break; }" \
" } if(tvibptr->stype == '2' || tvibptr->stype =='4' || tvibptr->stype=='5') success= 0; } " \
"if(tvibptr->stype =='1' && (strcmpi(tvibptr->specf,'object')==0)) {" \
" /* Check for case where there is only a single meaning for ÄèÔÛÂÜÍÚ ÔÛ˳èÂÛ */ " \
"yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean); }}"
stmt_while_complex3 = "while(1){" \
"if((tvibptr->stype =='1' && strcmp(tvibptr->specf,'dative')==0 ) || tvibptr->stype =='5' || tvibptr->stype=='2'|| tvibptr->stype=='4'){" \
"/* Check for case where there is only a single meaning for ¸ÂÝÏèÂÜ ÔÛ˳èÂÛ */" \
"yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean);if(tvibptr->stype=='2' && tvibptr->matnoun !=1 )" \
"{switch(tvibptr->spos){" \
"case 0:if(tvibptr->semlinga==0)strcat(tvibptr->arthaword,'×Ú ');if(tvibptr->semlinga==1)strcat(tvibptr->arthaword,'×£ ');if(tvibptr->semlinga==2)strcat(tvibptr->arthaword,'Âè ');break;" \
"case 1:strcat(tvibptr->arthaword,'ÂÆèÆÛÖè¾³ÏèÌÂÚÆÛÏÞȳ ');break;" \
"case 2:strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏÁÂÚÆÛÏÞȳ ');break;" \
"case 3:strcat(tvibptr->arthaword,'ÆÛÖè¾×ÌèÈèÏÄÚÆÂÚÆÛÏÞȳ ');break;" \
"case 4:strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÈÚÄÚÆÂÚÆÛÏÞȳ ');break;" \
"case 5:strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÅÛ³ÏÁÂÚÆÛÏÞȳ ');break;}}" \
"if(tvibptr->stype == '2' || tvibptr->stype =='4' || tvibptr->stype=='5')success= 0;}if(tvibptr->stype =='1' && (strcmpi(tvibptr->specf,'object')==0)){" \
" /* Check for case where there is only a single meaning for ÄèÔÛÂÜÍÚ ÔÛ˳èÂÛ */" \
"yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean);}" \
"/* If not in above case following steps lead to menu display for selection based on type of vibhakti */ " \
"if(tvibptr->stype =='1') { switch(tvibptr->spos) {" \
"case 0:if(strcmpi(voice,'kartari') ==0)strcpy(tvibptr->arthaword,tvibptr->sword);if(strcmpi(voice,'karmani') ==0){strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏèÂßÂÚÆÛÏÞȳ ');}break;" \
"case 1:if(strcmpi(voice,'kartari') ==0){strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏèÌÂÚÆÛÏÞȳ ');}if(strcmpi(voice,'karmani') ==0){strcpy(tvibptr->arthaword,tvibptr->sword);}break; " \
"case 2:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏÁÂÚÆÛÏÞȳ ');break; " \
"case 3:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾×ÌèÈèÏÄÚÆÂÚÆÛÏÞȳ ');break; " \
"case 4:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÈÚÄÚÆÂÚÆÛÏÞȳ ');break; " \
"case 6:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'×ÌèÊÆèÅÛ ');break; " \
"case 5:strcpy(tvibptr->arthaword,tvibptr->bword);strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÅÛ³ÏÁÂÚÆÛÏÞȳ ');break;} } " \
"if (tvibptr->next != NULL)tvibptr=tvibptr->next; else break;}"
stmt_include = "#include <stdio.h>\n"
stmt_include2 = '#include "sengen1.h"\n #include "data.h"\n'
stmt_define = "#define KARTHARI 0 #define KARMANI 1 #define HALANT (unsigned char)'è' #define FULLSTOP 'ê' #define eof 255 "
stmt_typedef = "typedef struct { int vibhakti[20]; int vacana[20]; int linga[20]; int purusha[20]; unsigned char *subanta[20]; unsigned char *pratipadika[20]; unsigned char *erb[20]; /* End Removed Base */; int wordNum[20]; int numofNouns; } SUBANTA_DATA;"
stmt_typedef_many = "typedef struct { int vibhakti[20]; int vacana[20]; int linga[20]; int purusha[20]; unsigned char *subanta[20]; unsigned char *pratipadika[20]; unsigned char *erb[20]; /* End Removed Base */; int wordNum[20]; int numofNouns; } SUBANTA_DATA; typedef struct { int dhatuVidha[10]; int prayoga[10]; int lakara[10]; int purusha[10]; int vacana[10]; int gana[10]; int padi[10]; int karma[10]; int it[10]; unsigned char *tiganta[10]; unsigned char *dhatu[10]; unsigned char *nijdhatu[10]; unsigned char *sandhatu[10]; unsigned char *artha[10]; unsigned char *err[10]; /* End Removed Root */; int wordNum[10]; int numofVerbs; } TIGANTA_DATA; typedef struct { int vibhakti[20]; int vacana[20]; int linga[20]; int prayoga[20]; int krdType[20]; int dhatuVidha[20]; int purusha[20]; int gana[20]; int padi[20]; int karma[20]; int it[20]; unsigned char *krdanta[20]; unsigned char *pratipadika[20]; unsigned char *erb[20]; /* end removed base of krdanta */; unsigned char *dhatu[20]; unsigned char *nijdhatu[20]; unsigned char *sandhatu[20]; unsigned char *artha[20]; int wordNum[20]; int numofKrdantas; } KRDANTA_DATA; typedef struct { unsigned char *avyaya[30]; int wordNum[30]; int numofAvyayas; } AVYAYA_DATA; typedef struct { int dhatuVidha[20]; int gana[20]; int padi[20]; int karma[20]; int it[20]; int krdavType[20]; unsigned char *krdavyaya[20]; unsigned char *dhatu[20]; unsigned char *nijdhatu[20]; unsigned char *sandhatu[20]; unsigned char *artha[20]; int wordNum[20]; int numofKrdavyayas; } KRDAV_DATA; typedef struct { unsigned char *word[20]; int vibhakti[20]; int vacana[20]; int purusha[20]; int linga[20]; int wordPos[20]; int numofWords; } VIBHAKTI; typedef struct { unsigned char *verb; unsigned char *dhatu; int purusha; int vacana; int prayoga; int karma; int wordPos; } VERB; typedef struct { unsigned char *krdanta; int vibhakti; int vacana; int linga; int prayoga; int karma; int krdType; } PARTICIPLE; typedef struct { unsigned char *sentence; unsigned char *idens[100]; int numofIdens; } RECORD; typedef struct { unsigned char *iden[30]; int numofIdens; } WORD; typedef struct { unsigned char *word[15]; int numofWords; } TYPE;"
stmt_var_decl_array = "unsigned char list[]={'ÈÞÏèÔÚÁèØ', '¤ÈÏÚÁèØ', 'ÄÛÆ', 'ÏÚÂèÏÛ', '¤ØåÏÚÂèÏ', '×ÈèÂÚØ', 'ȳèÖ', 'ÌÚ×', '×¢ÔÂè×Ï'}; unsigned char list[jk-ui]; unsigned char list[i+9]={'ÈÞÏèÔÚÁèØ', '¤ÈÏÚÁèØ', 'ÄÛÆ', 'ÏÚÂèÏÛ', '¤ØåÏÚÂèÏ', '×ÈèÂÚØ', 'ȳèÖ', 'ÌÚ×', '×¢ÔÂè×Ï'};"
stmt_func_def_vibmenu_c_complete = "int choice(char type,unsigned char *word,unsigned char voice[],int pos,VIBAK *tvibptr,FILE *afp,long fl,unsigned char *VerbMean) { int yes=0,success=1; while(1) { if((tvibptr->stype =='1' && strcmp(tvibptr->specf,'dative')==0 ) || tvibptr->stype =='5' || tvibptr->stype=='2'|| tvibptr->stype=='4') { /* Check for case where there is only a single meaning for ¸ÂÝÏèÂÜ ÔÛ˳èÂÛ */ yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean); if(tvibptr->stype=='2' && tvibptr->matnoun !=1 ) { switch(tvibptr->spos) { case 0: if(tvibptr->semlinga==0) strcat(tvibptr->arthaword,'×Ú '); if(tvibptr->semlinga==1) strcat(tvibptr->arthaword,'×£ '); if(tvibptr->semlinga==2) strcat(tvibptr->arthaword,'Âè '); break; case 1: strcat(tvibptr->arthaword,'ÂÆèÆÛÖè¾³ÏèÌÂÚÆÛÏÞȳ '); break; case 2: strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏÁÂÚÆÛÏÞȳ '); break; case 3: strcat(tvibptr->arthaword,'ÆÛÖè¾×ÌèÈèÏÄÚÆÂÚÆÛÏÞȳ '); break; case 4: strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÈÚÄÚÆÂÚÆÛÏÞȳ '); break; case 5: strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÅÛ³ÏÁÂÚÆÛÏÞȳ '); break; } } if(tvibptr->stype == '2' || tvibptr->stype =='4' || tvibptr->stype=='5') success= 0; } if(tvibptr->stype =='1' && (strcmpi(tvibptr->specf,'object')==0)) { /* Check for case where there is only a single meaning for ÄèÔÛÂÜÍÚ ÔÛ˳èÂÛ */ yes=findverb(voice,tvibptr->sword,tvibptr,afp,fl,VerbMean); } /* If not in above case following steps lead to menu display for selection based on type of vibhakti */ if(tvibptr->stype =='1') { switch(tvibptr->spos) { case 0: if(strcmpi(voice,'kartari') ==0) strcpy(tvibptr->arthaword,tvibptr->sword); if(strcmpi(voice,'karmani') ==0) { strcpy(tvibptr->arthaword,tvibptr->bword); strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏèÂßÂÚÆÛÏÞȳ '); } break; case 1: if(strcmpi(voice,'kartari') ==0) { strcpy(tvibptr->arthaword,tvibptr->bword); strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏèÌÂÚÆÛÏÞȳ '); } if(strcmpi(voice,'karmani') ==0) { strcpy(tvibptr->arthaword,tvibptr->sword); } break; case 2: strcpy(tvibptr->arthaword,tvibptr->bword); strcat(tvibptr->arthaword,'ÆÛÖè¾³ÏÁÂÚÆÛÏÞȳ '); break; case 3: strcpy(tvibptr->arthaword,tvibptr->bword); strcat(tvibptr->arthaword,'ÆÛÖè¾×ÌèÈèÏÄÚÆÂÚÆÛÏÞȳ '); break; case 4: strcpy(tvibptr->arthaword,tvibptr->bword); strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÈÚÄÚÆÂÚÆÛÏÞȳ '); break; case 6: strcpy(tvibptr->arthaword,tvibptr->bword); strcat(tvibptr->arthaword,'×ÌèÊÆèÅÛ '); break; case 5: strcpy(tvibptr->arthaword,tvibptr->bword); strcat(tvibptr->arthaword,'ÆÛÖè¾ÚÅÛ³ÏÁÂÚÆÛÏÞȳ '); break; } } if (tvibptr->next != NULL) tvibptr=tvibptr->next; else break; } return success; }"
stmt_for = 'for(i=0;i<vno;i-=3) free(words[i]); for(i=j+1,k=0;i<strlen(word);i+=2,k++) {s=u;};for(i=0;input[i]!= "\0";i++);'
stmt_increment_decrement = 'i++;j--;k+=2;h-=3;recptr->no_base++;recptr->no_codes--;'
stmt_func_def_complex3 = 'int findverb(unsigned char voice[],unsigned char *Word,VIBAK *tvibptr,FILE *afp,long fl,unsigned char *VerbMean)' \
'{int found=0,i,j,pos;int vnum=0,vno=0,linga=4;unsigned char line1[300],*words[17],message[60],word[20],vword[80],type,' \
'lword[30],ltype="Z",temp[4];unsigned char *list[]={"ÈÞÏèÔÚÁèØ", "¤ÈÏÚÁèØ", "ÄÛÆ", "ÏÚÂèÏÛ", "¤ØåÏÚÂèÏ", "×ÈèÂÚØ", "ȳèÖ", "ÌÚ×", "×¢ÔÂè×Ï"};' \
'unsigned char *list1[]={"ºè¼ÚÆ","×Ý´","ÄÝ£´","¦¸è¹Ú","ÄèÔáÖ","ÈèÏÍÂèÆ","ÅÏèÌ","¤ÅÏèÌ","×¢×è³ÚÏ","ËÚÔÆÚ","ÔÛÔá³","ÊåÅ","ÊÝÄèÅÛ","ÅÜ","ÈèϺè¼Ú",};' \
'fseek(afp,fl,0);}if(tvibptr->stype =="2") type="0";' \
' while(found==0 ||(!feof(afp))){fgets(line1,299,afp);if(line1[0]==" - "){found=1;break;}vno=0;vno=split(line1,words);' \
'if(strcmp(words[0],"ÔÚ³èÍÌè")==0) {for(i=0;i<vno;i++){free(words[i]);continue;}}' \
'vno=0; vno=split(line1,words); ' \
'if(strcmp(words[0],"ÔÚ³èÍÌè")==0) /* Check for input sentence */ ' \
'{ for(i=0;i<vno;i++) free(words[i]); continue; }' \
' vno=split(line1,words); type=words[3][0]; strcpy(word,words[4]); ' \
'if(type=="1") { strncpy(temp,words[6]+2,1); temp[1]="\0"; linga=atoi(temp); pos=atoi(words[7]); }' \
' if(type=="5" || type=="4") { if(type=="5") strcpy(lword,VerbMean); else { strcpy(lword,words[9]); }' \
' strncpy(temp,words[11]+1,1); temp[1]="\0"; ltype=temp[0]; }' \
' if(type=="2") { strcpy(lword,words[13]); }' \
' if(type=="1"&& (pos >= 1 && pos <=3)) {' \
' for(i=0;i<15;i++) ' \
'{ if(strcmp(word,list1[i])==0) {' \
' if(linga==0) strcpy(message,"×ÌÔáÂÚ"); ' \
'if(linga==1) strcpy(message,"×ÌÔá£");' \
'if(linga==2) strcpy(message,"×ÌÔá¢");' \
' strcpy(vword,tvibptr->bword); strcat(vword,message); strcpy(tvibptr->arthaword,vword);' \
' return 1; } } } ' \
'if(type=="5") { found=dispmea(voice,vnum,list,tvibptr,ltype,lword); for(i=0;i<vno;i++) free(words[i]); return found; } ' \
'if(type=="2" && tvibptr->stype=="2" && strcmp(Word,words[1])==0) { found=dispmea(voice,vnum,list,tvibptr,ltype,lword); ' \
'for(i=0;i<vno;i++) free(words[i]); return found; } else if(type=="4" && tvibptr->stype=="4" && strcmp(Word,words[1])==0) ' \
'{ found=dispmea(voice,vnum,list,tvibptr,ltype,lword); for(i=0;i<vno;i++) free(words[i]); return found; } ' \
'for(i=0;i<vno;i++) free(words[i]);};'
stmt_assignment_and_condition = 'a = b; message = "×ÌÔá£"; if(c=fgetc(cfp)!=eof) x = y'
stmt_multilevel_indices_pointers = 'substr=strstr(word[karptr->sub_no],krdrecord->code[m][n])'
stmt_includes_defines_others = '#include <stdio.h> #include <string.h> #include <stdlib.h> #include <conio.h> #include <alloc.h> #include "data.h" #include "declr.h" #define eof 255 #define rt 86 int CheckComptability(DETAIL *srecord,DETAIL *krecord,DETAIL *vrecord,DETAIL *krdfirst,DETAIL *krdavy,unsigned char *sent,SHASTI *fshtptr,FILE *rfp,FILE *afp,FILE *sfp,int y) { int a=0,i=0,j=0,m=0,n=0,no_vsub=0,no_ksub=0,num_sub,Naflag=0; int flag=1,krdflag=0,krdavyf=0,verflag=0,karflag=0,shaflag=0; int krdmismatch=0,mismatch=0,krdpos,avypos,Saflag=0,Sapos; int krdoth=0,no_krdoth,krdsuc=1,subver=0; long afppos,pos; unsigned char c,line[500],*word[15],temp[150],*substr,temp1[30],temp2[30]; char code1[5],code2[5]; FILE *cfp; DETAIL *firstptr=NULL,*subptr=NULL,*fstptr=NULL,*subunmatch; DETAIL *karptr=NULL,*un_match=NULL,*karmatch=NULL; DETAIL *verptr=NULL,*krdrecord=NULL,*krdunmatch=NULL,*krdmatch=NULL,*avyrecord=NULL; SHASTI *shrvib=NULL,*tshrvib=NULL;int a=0,i=0,j=0,m=0,n=0,no_vsub=0,no_ksub=0,num_sub,Naflag=0; int flag=1,krdflag=0,krdavyf=0,verflag=0,karflag=0,shaflag=0; int krdmismatch=0,mismatch=0,krdpos,avypos,Saflag=0,Sapos; int krdoth=0,no_krdoth,krdsuc=1,subver=0; long afppos,pos; unsigned char c,line[500],*word[15],temp[150],*substr,temp1[30],temp2[30]; char code1[5],code2[5]; FILE *cfp; DETAIL *firstptr=NULL,*subptr=NULL,*fstptr=NULL,*subunmatch; DETAIL *karptr=NULL,*un_match=NULL,*karmatch=NULL; DETAIL *verptr=NULL,*krdrecord=NULL,*krdunmatch=NULL,*krdmatch=NULL,*avyrecord=NULL; SHASTI *shrvib=NULL,*tshrvib=NULL; cfp=fopen("comptble.aci","r"); if(cfp== NULL) exit(0); firstptr=srecord; subptr=srecord; fstptr=srecord; while(1) { if(strcmp(subptr->word,"Æ")==0) Naflag=1; else if(strcmp(subptr->word,"ר")==0) { Saflag=1; Sapos=subptr->pos; } if(subptr->next != NULL) subptr=subptr->next; else break; } subptr=srecord; karptr=krecord; verptr=vrecord; krdrecord=krdfirst; avyrecord=krdavy; krdpos=0;if(krdfirst != NULL) { if(strcmp(krdrecord->Type,"Krdanta")==0 || strcmpi(krdrecord->specf,"Subject")==0) { krdflag=1; krdpos=krdrecord->pos; } } if(krdavy != NULL) { if(strcmpi(krdavy->Type,"Krdavyaya")==0) krdavyf=1; } verflag=0; while(verptr != NULL) { if(strcmp(verptr->dispSpecf,"Verb")==0) verflag=1; if(verptr->next == NULL) break; verptr=verptr->next; } verptr=vrecord;while(1) { if(karptr != NULL && krdflag==0 && krdavyf==0) { if(karptr->sub_no != 7 && (karptr->pos != Sapos-1)) no_vsub++; if(karptr->next != vrecord) karptr=karptr->next; else break; } else if(karptr != NULL && verflag==1 && krdflag==1) { if(karptr->sub_no != 6) { if (karptr->pos > krdfirst->pos ) no_vsub++; else if(strcmp(krdfirst->specf,"Subject")!=0) no_vsub++; } else if(karptr->sub_no == 6) { if (karptr->pos != krdfirst->pos-1 ) no_vsub++; else if(strcmp(krdfirst->specf,"Subject")!=0) no_vsub++; } if(karptr->next != krdfirst) karptr=karptr->next; else break; } else if(karptr != NULL && verflag==1 && krdavyf==1) { if (karptr->pos > krdavy->pos) no_vsub++; if(karptr->next != krdavy) karptr=karptr->next; else break; } else break; }fprintf(rfp,"%s \n",sent); karptr=krecord; if(fshtptr==NULL) { while(1) { fprintf(rfp,"%s ", subptr->Type); if(strcmp(subptr->Type,"Noun")==0 || strcmp(subptr->Type,"Krdanta")==0 ) fprintf(rfp,"%s",subptr->specf); fprintf(rfp," : %s",subptr->word); fprintf(rfp,"\n"); if(subptr->next != NULL) subptr=subptr->next; else break; } subptr=srecord; shaflag=0; } else { sha_disp(subptr,sent,rfp); shaflag=1; }fprintf(rfp,"\n-------------------\n"); karptr=krecord; subptr=srecord; if(vrecord==NULL) vrecord=krdfirst; verptr=vrecord;}'
stmt_multilevel_pointers_indices_and_assigned_conditions = 'while(strcmpi(srecord->specf,"subject")==0 && strcmpi(srecord->Type,"Noun")==0) { if(krdflag==1) { for(m=0;m<(krdrecord->no_base);m++) ' \
'{ num_sub=no_ksub; karflag=0; un_match=NULL; if(karptr != NULL) { while(1) { if(strcmp(krdrecord->specf,"Subject")==0) ' \
'{ if(karptr->pos < krdrecord->pos && karptr != NULL ) { if(karptr->sub_no != 6) karflag++; } } ' \
'if(strcmpi(karptr->next->Type,"Krdanta")==0 ||strcmpi(karptr->next->Type,"Krdavyaya")==0 ) break; else karptr=karptr->next; } } krdoth=0; no_krdoth=0; ' \
'while(1) { if(strcmpi(krdrecord->specf,"Subject") !=0 ) { if(strcmpi(krdrecord->Type,"Krdanta") ==0 ) { no_krdoth++; krdoth++; } } ' \
'if(strcmpi(krdrecord->next->Type,"Krdanta") !=0 || krdrecord->next == NULL) break; else krdrecord=krdrecord->next; } krdrecord=krdfirst; ' \
'for(n=0;n<srecord->no_base;n++) { num_sub=no_ksub; rewind(cfp); mismatch=0; krdmismatch=0; while( (c=fgetc(cfp)) != eof) { ungetc(c,cfp); fgets(line,150,cfp);' \
' if(line[0]=="\n") continue; j=split(line,word); strncpy(code1,line,2); code1[2]="\0"; karptr=krecord; krdrecord=krdfirst; ' \
'if(strcmp(srecord->code[n],code1)==0 ) { if(strcmp(krdrecord->Type,"Krdanta")==0 ) ' \
'{ if((strcmpi(srecord->voice,"kartari") == 0 && srecord->linga==krdrecord->linga && srecord->vibvach==krdrecord->vibvach) || ' \
'strcmpi(srecord->voice,"karmani") == 0 ) { strncpy(code2,line+3,2); code2[2]="\0"; karptr=krecord; if(karflag==0 && strcmp(code2,"00")==0) ' \
'{ substr=strstr(word[1],krdrecord->code[m]); if(substr) mismatch=0; else mismatch=1; } else { if(karptr != NULL) { ' \
'while(1) { if((karptr->pos < krdrecord->pos) && karptr!= NULL && strcmp(karptr->code[0],code2)==0) { if(karptr->sub_no !=6 ) {' \
' substr=strstr(word[karptr->sub_no],krdrecord->code[m]); if(substr) { mismatch=0; karflag--; } else {' \
' mismatch=1; krdunmatch=krdrecord; un_match=karptr; } } } if(strcmpi(karptr->next->Type,"Krdanta")==0) break; else karptr=karptr->next; } } } } } } ' \
'if(krdoth) { strncpy(code2,line+3,2); code2[2]="\0"; if(karptr != NULL) { while(1) { while(1) { if(krdrecord->sub_no !=6) {' \
' if(strcmpi(krdfirst->specf,"Subject") != 0 && karptr->pos < krdrecord->pos) { } ' \
'if(strcmp(karptr->code[0],code1)==0 && strcmp(code2,"00")==0 && strcmp(karptr->specf,krdrecord->specf)==0 && krdrecord->matnoun==1 && ' \
'(karptr->pos == krdrecord->pos +1)) { substr=strstr(word[karptr->sub_no],krdrecord->code[m]); ' \
'if(substr && krdmismatch==0) { no_krdoth--; krdmatch=krdrecord; karmatch=karptr; } else if(!substr) { krdmismatch=1; krdunmatch=krdrecord; ' \
'karmatch=karptr; } break; } else if(krdrecord->matnoun != 1) { ' \
'if((strcmp(code1,"AA")==0 && strcmp(code2,"00")==0 ) && strcmp(krdrecord->specf,"Subject") !=0) { ' \
'substr=strstr(word[1],krdrecord->code[m]); if(substr && krdmismatch==0) { no_krdoth--; krdmatch=krdrecord; } else if(!substr) { krdmismatch=1; ' \
'krdunmatch=krdrecord; } } } } if(krdrecord->sub_no ==6) { shrvib=fshtptr ; while(strcmp(krdrecord->word,shrvib->word) !=0) shrvib=shrvib->next; ' \
'if(strcmp(shrvib->next->code[0],code1)==0 && strcmp(code2,"00")==0 && strcmp(karptr->specf,krdrecord->specf)==0) { ' \
'substr=strstr(word[1],krdrecord->code[m]); if(substr && krdmismatch==0) { no_krdoth--; krdmatch=krdrecord; karmatch=karptr; } else if(!substr) { ' \
'krdmismatch=1; krdunmatch=krdrecord; karmatch=karptr; } break; } } krdrecord=krdrecord->next; if(strcmp(krdrecord->Type,"Krdanta") !=0) break; ' \
'if(krdrecord->next == NULL) break; } krdrecord=krdfirst; if(strcmpi(karptr->next->Type,"krdanta")==0) break; else karptr=karptr->next; } } else {' \
' while(strcmp(krdrecord->Type,"Verb") !=0 || krdrecord->next != NULL) { if(krdrecord->sub_no !=6) { ' \
'if((strcmp(code1,"AA")==0 && strcmp(code2,"00")==0 ) && strcmp(krdrecord->specf,"Subject") !=0) { substr=strstr(word[1],krdrecord->code[m]); ' \
'if(substr && krdmismatch==0) { no_krdoth--; krdmatch=krdrecord; } else if(!substr) { krdmismatch=1; krdunmatch=krdrecord; } } } krdrecord=krdrecord->next;' \
' } } } for(i=0;i<j;i++) free(word[i]); } if(Naflag==0) { if(verflag==0) { if(!krdoth ) { if(mismatch==0 && karflag==0) { krdsuc=1; flag=1; ' \
'fprintf(rfp,"The Krdanta is Semantically Compatible if %s root means %s and subject is %s ",krdfirst->stem,krdfirst->base[m],srecord->base[n]); } else {' \
' krdsuc=0; flag=0; fprintf(rfp,"Verb %s is not compatible with subject %s",verptr->word,srecord->word); ' \
'if(un_match != NULL) fprintf(rfp,"if %s is %s",un_match->dispSpecf,un_match->word); } } else if(krdoth) { if(mismatch==0 && karflag==0 && no_krdoth==0) ' \
'{ krdsuc=1; flag=1; if(karptr != NULL) fprintf(rfp,"%s %s %s is compatible with %s %s %s",karmatch->Type,karmatch->specf,karmatch->word,krdmatch->Type,' \
'krdmatch->specf,krdmatch->word); else fprintf(rfp,"%s %s %s is semantically compatible",krdmatch->Type,krdmatch->specf,krdmatch->word); ' \
'fprintf(rfp,"The Krdanta is Semantically Compatible if %s root means %s and subject is %s ",krdfirst->stem,krdfirst->base[m],srecord->base[n]); } ' \
'else if((mismatch || karflag) && ! no_krdoth) { krdsuc=0; flag=0; ' \
'/*fprintf(rfp,"Verb %s is not compatible with subject %s\n",verptr->word,srecord->word);*/ ' \
'if(mismatch) fprintf(rfp,"%s %s %s is not compatble with %s is %s",krdunmatch->Type,krdunmatch->specf,krdunmatch->word,un_match->dispSpecf,un_match->word);' \
' } else if(krdmismatch==1) { krdsuc=0; flag=0; fprintf(rfp,"%s %s %s is not compatible with %s %s %s if Krdanta base is %s ",karmatch->Type,' \
'karmatch->specf,karmatch->word,krdunmatch->Type,krdunmatch->specf,krdunmatch->word,krdunmatch->base[0]); ' \
'if(un_match != NULL) fprintf(rfp,"if %s is %s ",un_match->dispSpecf,un_match->word); } } } if(verflag==1) { if(!krdoth ) { if(mismatch==0 && karflag==0) ' \
'{ krdsuc=1; fprintf(rfp,"The Krdanta is Semantically Compatible if %s root means %s and subject is %s ",krdfirst->stem,krdfirst->base[m],srecord->base[n])' \
'; } else { krdsuc=0; fprintf(rfp,"Verb %s is not compatible with subject %s ",verptr->word,srecord->word); ' \
'if(un_match != NULL) fprintf(rfp,"if %s is %s ",un_match->dispSpecf,un_match->word); } } else if(krdoth) { if(mismatch==0 && karflag==0 && no_krdoth==0) ' \
'{ krdsuc=1; if(krdmatch->matnoun==1) { if(karptr != NULL ) fprintf(rfp,"%s %s %s is compatible with %s %s %s ",karmatch->Type,' \
'karmatch->specf,karmatch->word,krdmatch->Type,krdmatch->specf,krdmatch->word); } ' \
'else fprintf(rfp,"%s %s %s is semantically compatible ",krdmatch->Type,krdmatch->specf,krdmatch->word); if(strcmp(krdfirst->Type,"Subject")==0) ' \
'fprintf(rfp,"The Krdanta is Semantically Compatible if %s root means %s and subject is %s ",krdfirst->stem,krdfirst->base[m],srecord->base[n]); } ' \
'else if((mismatch || karflag) && ! no_krdoth) { krdsuc=0; if(mismatch) fprintf(rfp,"%s %s %s is not compatble with Noun Subject if %s is %s ",' \
'krdunmatch->Type,krdunmatch->specf,krdunmatch->word,un_match->dispSpecf,un_match->word); } else if(krdmismatch==1) { krdsuc=0; ' \
'fprintf(rfp,"%s %s %s is not compatible with %s %s %s if Krdanta base is %s ",karmatch->Type,karmatch->specf,karmatch->word,krdunmatch->Type,' \
'krdunmatch->specf,krdunmatch->word,krdunmatch->base[0]); if(un_match != NULL) fprintf(rfp,"if %s is %s ",un_match->dispSpecf,un_match->word); } } } } ' \
'if(Naflag==1) { krdsuc=1; flag=1; } } } } if(krdavyf==1) { for(m=0;m<(avyrecord->no_base);m++) { num_sub=no_ksub; karflag=0; un_match=NULL; ' \
'while(1) { if((karptr->pos < avyrecord->pos) && karptr!= NULL) karflag++; if(strcmpi(karptr->next->Type,"Krdavyaya")==0 ) break; else karptr=karptr->next; }' \
' krdrecord=krdfirst; for(n=0;n<srecord->no_base;n++) { num_sub=no_ksub; rewind(cfp); mismatch=0; krdmismatch=0; ' \
'while( (c=fgetc(cfp)) != eof) { ungetc(c,cfp); fgets(line,150,cfp); if(line[0]=="\n") continue; j=split(line,word); strncpy(code1,line,2); code1[2]="\0"; ' \
'karptr=krecord; if(strcmp(srecord->code[n],code1)==0 ) { if(strcmp(avyrecord->Type,"Krdavyaya")==0) { strncpy(code2,line+3,2); code2[2]="\0"; ' \
'karptr=krecord; if(karflag==0 && strcmp(code2,"00")==0) { substr=strstr(word[1],verptr->code[m]); if(substr) mismatch=0; else mismatch=1; } else { ' \
'while(1) { if((karptr->pos < avyrecord->pos) && karptr!= NULL && strcmp(karptr->code[0],code2)==0) { substr=strstr(word[karptr->sub_no],avyrecord->code[m]' \
'); if(substr) { mismatch=0; karflag--; } else { mismatch=1; un_match=karptr; } } if(strcmpi(karptr->Type,"Noun") !=0) break; else karptr=karptr->next; } }' \
' } } for(i=0;i<j;i++) free(word[i]); } if(Naflag==0) { if(!krdoth) { if(mismatch==0 && karflag==0) { krdsuc=1; ' \
'fprintf(rfp,"The Krdavyaya is Semantically Compatible if %s root means %s and subject is %s ",krdavy->stem,krdavy->base[m],srecord->base[n]); } else { ' \
'krdsuc=0; fprintf(rfp,"Verb %s is not compatible with subject %s ",verptr->word,srecord->word); if(un_match != NULL) ' \
'fprintf(rfp,"if %s is %s ",un_match->dispSpecf,un_match->word); } } } } } } if(verflag==1) { subver=1; while(strcmpi(verptr->specf,"Verb")==0) ' \
'{ if(strcmpi(verptr->dispSpecf,"Verb") ==0 || strcmpi(verptr->dispSpecf,"Krdanta") ==0) { for(m=0;m<(verptr->no_base);m++) ' \
'{ num_sub=no_vsub; if(verptr->code[m][0] != "\0") { for(n=0;n<srecord->no_base;n++) { num_sub=no_vsub; rewind(cfp); karmatch=NULL; ' \
'while( (c=fgetc(cfp)) != eof) { ungetc(c,cfp); fgets(line,150,cfp); if(line[0]=="\n") continue; karptr=krecord; j=0; j=split(line,word); ' \
'strncpy(code1,line,2); code1[2]="\0"; strncpy(code2,line+3,2); code2[2]="\0"; if(shaflag==0) { if(strcmp(srecord->code[n],code1)==0) ' \
'{ if(strcmp(code2,"00")==0) { if(strcmp(srecord->code[n],code1)==0) { substr=strstr(word[1],verptr->code[m]); if(substr) ' \
'{ if(subver) subver=1; else subver=0; } else { subunmatch=srecord; subver=0; } } } ' \
'if(strcmp(code2,"00")==0 && (karptr==NULL || no_vsub==0)) { substr=strstr(word[1],verptr->code[m]); if(substr) flag=0; else flag=1; ' \
'if(krdflag) { if(!krdsuc && !flag) flag=0; if(krdsuc && !flag) flag=1; if(krdsuc && flag) flag=1; } } else if(krdflag==0 ) ' \
'{ while(strcmpi(karptr->specf,"Verb")!=0 && krecord != NULL) { if(strcmp(karptr->code[0],code2)==0 && Saflag==0) ' \
'{ substr=strstr(word[karptr->sub_no],verptr->code[m]); if(substr) { karmatch=karptr; num_sub--; } else un_match=karptr; } ' \
'if(strcmp(karptr->code[0],code2)==0 && Saflag==1) { substr=strstr(word[karptr->sub_no],verptr->code[m]); ' \
'if(substr) { karmatch=karptr; num_sub--; } else un_match=karptr; } karptr=karptr->next; } } else if(krdflag && no_vsub) ' \
'{ while(strcmpi(karptr->specf,"Verb") !=0 && krecord != NULL) { if(strcmp(karptr->code[0],code2)==0) { ' \
'if((karptr->pos > krdpos && strcmp(krdfirst->specf,"Subject")==0)|| strcmp(krdfirst->specf,"Subject")!=0 ) {' \
' substr=strstr(word[karptr->sub_no],verptr->code[m]); if(substr) { karmatch=karptr; num_sub--; } else un_match=karptr; } } ' \
'karptr=karptr->next; } } } } else if(shaflag==1) { ' \
'while(strcmpi(karptr->specf,"Verb") !=0 && strcmpi(karptr->specf,"Krdanta") !=0 && strcmpi(karptr->specf,"Krdavyaya") !=0 && krecord != NULL) {' \
' strncpy(code2,line+3,2); code2[2]="\0"; if(strcmp(code2,"00")==0) { if(strcmp(srecord->code[n],code1)==0) { ' \
'substr=strstr(word[1],verptr->code[m]); if(substr) { if(subver) subver=1; else { subunmatch=srecord; subver=0; } } else { ' \
'subunmatch=srecord; subver=0; } } } if(karptr->sub_no==6 ) { shrvib=fshtptr ; while(strcmp(karptr->word,shrvib->word) !=0) shrvib=shrvib->next; ' \
'if(strcmp(code1,shrvib->code[0])==0 && strcmp(code2,shrvib->next->code[0])==0 ) { if(strcmp(word[6],"*")==0) {' \
' karmatch=karptr; num_sub--; } else un_match=karptr; } } else if(karptr->sub_no < 6 && strcmp(srecord->code[n],code1)==0 ) { ' \
'if(strcmp(karptr->code[0],code2)==0 && karptr->pos > krdpos ) { substr=strstr(word[karptr->sub_no],verptr->code[m]); if(substr) { ' \
'karmatch=karptr; num_sub--; } else un_match=karptr; } } karptr=karptr->next; } } if(strcmp(code2,"00")==0 && Saflag==1) {' \
' subptr=fstptr; while(1) { if(strcmp(subptr->Type,"Noun")==0 && strcmp(subptr->specf,"Instrument")==0 && (subptr->pos == (Sapos -1)) ) {' \
' if(strcmp(subptr->code[n],code1)==0) { substr=strstr(word[1],verptr->code[m]); if(substr) { if(subver) subver=1; else { ' \
'subunmatch=srecord; subver=0; } } else { subunmatch=srecord; subver=0; } } } if(subptr->next == NULL) break; else subptr=subptr->next; } } ' \
'for(i=0;i<j;i++) free(word[i]); } if(Naflag==0) { if(num_sub==0 && subver) { flag=1; ' \
'fprintf(rfp,"The Verb %s is Semantically Compatible With Subject if Verb Root means %s ",verptr->stem,verptr->base[m]); ' \
'if(no_vsub) fprintf(rfp,"and %s is %s ",karmatch->specf,karmatch->base[m]); } else if(subver==0) { flag=0; ' \
'fprintf(rfp,"The Verb %s is not compatible with the Subject ",verptr->word); } else if(num_sub && subver) { flag=0; ' \
'fprintf(rfp,"The Verb %s is not compatible with Subject if %s is %s ",verptr->word,un_match->specf,un_match->stem); } if(krdflag) {' \
' if(krdsuc==1 && flag==1) flag=1; if(krdsuc==0 ||flag==0) flag=0; } } else if(Naflag==1) flag=1; } if(verptr->no_base > 1) { afppos=ftell(afp); ' \
'y=Sabdabodha(afp,sfp,rfp,firstptr,sent,flag,y,pos,Saflag,verptr->base[m],m); fseek(afp,afppos,0); pos=ftell(rfp); fseek(rfp,pos,0); } } ' \
'if(verptr->no_base == m+1 && verptr->no_base > 1) { while(!feof(afp)) { fgets(line,499,afp); if(line[0]=="-") break; } } } ' \
'if(verptr->next==NULL) break; else verptr=verptr->next; } } } srecord=srecord->next; } if(flag==1) ' \
'fprintf(rfp,"\nThe Sentence is Semantically Compatible"); else fprintf(rfp,"\nThe Sentence is Semantically Not Compatible"); ' \
'fprintf(rfp,"\n-------------------\n"); if(fshtptr != NULL) { shrvib=fshtptr; tshrvib=shrvib; while(fshtptr->next != NULL) {' \
' while(shrvib->next != NULL) { tshrvib=shrvib; shrvib=shrvib->next; } free(shrvib->specf); free(shrvib->word); for(i=0;i<shrvib->no_base;i++) {' \
' free(shrvib->base[i]); free(shrvib->code[i]); } free(shrvib); tshrvib->next=NULL; shrvib=fshtptr; } free(shrvib->specf); free(shrvib->word); ' \
'for(i=0;i<fshtptr->no_base;i++) { free(fshtptr->base[i]); free(fshtptr->code[i]); } free(fshtptr); }'
stmt_assignment_double = 'fvibptr=tvibptr=(VIBAK *)malloc(sizeof(VIBAK))'
stmt_assignment_index_plusplus = 'verb[j++]=word[i];'
stmt_function_arg_with_plus_invoke = 'tvibptr->sent=malloc(strlen(sen +1*k)-g);'
stmt_typedef_at_start = "typedef struct detail { char *Type; char *code[20]; char *subcode; unsigned char *specf; unsigned char *dispSpecf; unsigned char *mean_deno; unsigned char *word; unsigned char *stem; unsigned char *base[20]; unsigned char *voice; int linga; int vibvach; int mode; int sub_no; int no_base; int no_codes; int pos; int matnoun; int subinsen; struct detail *next; }DETAIL; "
stmt_typedef_already_defined = "typedef struct vibak VIBAK; typedef struct display DISP_ARTH; typedef struct shasti SHASTI; typedef struct disp_shasti DISPLAY;"
stmt_typedef_no_struct = 'typedef unsigned char* DEVSTR ;typedef unsigned char DEVCHR ; #define ANUSVARA (DEVCHR) "¢"'
stmt_expression_type_cast = 'if(message[strlen(message)-1]==(unsigned char) "*") ab = (unsigned char *) bc;'
stmt_char_plusminus_char = '*sp = input[i+1] - (unsigned char)("Ú"-"¥");'
stmt_type_cast_pointer_math = 'strcat(fp,(unsigned char *)"è");if( *(input+i+1)>=(unsigned char)"Ú" && *(input+i+1)<=(unsigned char)"æ");'
stmt_pesky_pointer_array = 'strcpy(sp+1,input+i+2);if(*(input+i) >= (unsigned char)"¤" && *(input+i) <= (unsigned char)"Ø") { if(fp[0] != "\0" && sp[0] != "\0") { splitWords->firstWord[count] = strdup(fp); splitWords->secondWord[count] = strdup(sp); count++; } }'
stmt_split_c_complete = '/**************************************************************** File : SPLIT.C Function : splitTheWord The C code of this function splits the word that is passed to it into right and left strings for all possible combinations. The right and left strings are stored in a structure and is returned by the funtion. NOTE: THE CALLER OF THIS FUNCTION MUST FREE THE MEMORY USED BY THIS STRUCTURE. Function : splitTheSentence The C code of this function splits the sentence that is passed to it into words and are stored in a structure and is returned by the funtion. NOTE: THE CALLER OF THIS FUNCTION MUST FREE THE MEMORY USED BY THIS STRUCTURE. Note that the Options->Compiler->Source Option in the IDE has been set to ANSI-C, to ensure strict adherence to ANSI-C standards. Compilation has been verified to give 0 warnings and 0 errors with this setting. ****************************************************************/; #include <stdio.h> #include <string.h> #include <conio.h> #include <stdlib.h> #include <process.h> #include "senanal.h" SPLIT *splitTheWord(unsigned char *input) { int i,j,len,count; unsigned char fp[100], sp[100]; SPLIT *splitWords, *splitWord; for(i=0;input[i]!= "\0";i++); i=-2 ; strcpy(fp,""); strcpy(sp,""); count=0; splitWords = (SPLIT *) malloc (sizeof(SPLIT)); while(i >= 0) { if(input[i]>=(unsigned char)"¤" && input[i]<=(unsigned char)"±") { strcpy(sp,input+i); for(j=0;j<i;j++) fp[j]=input[j]; fp[j]=0; } else if(input[i]>=(unsigned char)"³" && input[i]<=(unsigned char)"Ø") { if( *(input+i+1)>=(unsigned char)"Ú" && *(input+i+1)<=(unsigned char)"æ") { if(*(input+i+1)>=(unsigned char)"Ú" && *(input+i+1)<=(unsigned char)"ß") *sp = input[i+1] - (unsigned char)("Ú"-"¥"); else if(*(input+i+1)>=(unsigned char)"à" && *(input+i+1)<=(unsigned char)"æ") *sp = input[i+1] - (unsigned char)("à"-"«"); } else if( *(input+i+1) >= (unsigned char)"¤" && *(input+i+1) <=(unsigned char) "Ø" || *(input+i+1) == NULL || *(input+i+1) <= (unsigned char)"£") { *sp=(unsigned char)"¤"; strcpy(sp+1,input+i+1); } else if(*(input+i+1) == (unsigned char)"è") strcpy(sp,input+i+2); for(j=0;j<=i;j++) fp[j]= input[j]; fp[j]=0; strcat(fp,(unsigned char *)"è"); } if(*(input+i) >= (unsigned char)"¤" && *(input+i) <= (unsigned char)"Ø") { if(fp[0] != "\0" && sp[0] != "\0") { splitWords->firstWord[count] = strdup(fp); splitWords->secondWord[count] = strdup(sp); count++; } } /************************** Second Possible Split start ***************************/; if(input[i] >= (unsigned char)"¤" && input[i] <= (unsigned char)"Ø") { for(j=0;j<i;j++) fp[j]=input[j]; fp[j]=0; strcpy(sp,input+i); if(*(input+i) >= (unsigned char)"¤" && *(input+i) <= (unsigned char)"Ø") { if(fp[0] != "\0" && sp[0] != "\0") { splitWords->firstWord[count] = strdup(fp); splitWords->secondWord[count] = strdup(sp); count++; } } } /************************** Second Possible Split end ***************************/; i--; if(count==8) break; } splitWords->noOfSplits = count; splitWord = (SPLIT *) malloc (sizeof(SPLIT)); j = 0; for(i = 0; i < splitWords->noOfSplits; i++) { splitWord->firstWord[j] = (unsigned char *) malloc(strlen(splitWords->firstWord[i]) + 1); strcpy(splitWord->firstWord[j], splitWords->firstWord[i]); splitWord->secondWord[j] = (unsigned char *) malloc(strlen(splitWords->secondWord[i]) + 1); strcpy(splitWord->secondWord[j], splitWords->secondWord[i]); j++; if(strcmp(splitWords->firstWord[i],splitWords->firstWord[i+1]) == 0 && strcmp(splitWords->secondWord[i],splitWords->secondWord[i+1]) == 0) i++; } splitWord->noOfSplits = j; for(i = 0; i < splitWords->noOfSplits; i++) { free(splitWords->firstWord[i]); free(splitWords->secondWord[i]); } free(splitWords); return(splitWord); } /*SPLIT1 *splitTheSentence(unsigned char *record) { int i; SPLIT1 *splitSen; unsigned char *token; splitSen = (SPLIT1 *) malloc(sizeof(SPLIT1)); i = 0; token = strtok(record, " "); splitSen->word[i] = (unsigned char *) malloc(strlen(token) + 1); strcpy(splitSen->word[i], token); i++ ; while (1) { token = strtok(NULL, " ") ; if (token == NULL) break ; splitSen->word[i] = (unsigned char *) malloc(strlen(token) + 1); strcpy(splitSen->word[i], token) ; i++ ; } splitSen->noOfWords = i; return (splitSen); } */;'
samples = [stmt_comment, stmt_var_decl_initialized, stmt_var_decl_array, stmt_assignment, stmt_func_decl_default, stmt_func_decl_complex,
stmt_func_decl_complex1, stmt_func_decl_complex2, stmt_func_def_complex1, stmt_func_def_complex2, stmt_assignment_func, stmt_if_assign,
stmt_if_assign2, stmt_if_assign3, stmt_if_assign4, stmt_strcmp_cpy_cat, stmt_switch_case, stmt_switch_case1, stmt_switch_case2,
stmt_switch_case22, stmt_switch_case3, stmt_while, stmt_if_while_complex1, stmt_while_complex2, stmt_while_complex3, stmt_define, stmt_include,
stmt_include2, stmt_typedef_many, stmt_func_def_vibmenu_c_complete, stmt_increment_decrement, stmt_for, stmt_func_def_complex3,
stmt_assignment_and_condition, stmt_multilevel_indices_pointers, stmt_includes_defines_others, stmt_multilevel_pointers_indices_and_assigned_conditions,
stmt_assignment_double, stmt_assignment_index_plusplus, stmt_function_arg_with_plus_invoke,
stmt_typedef_at_start, stmt_typedef_already_defined, stmt_typedef_no_struct, stmt_expression_type_cast, stmt_char_plusminus_char,
stmt_type_cast_pointer_math, stmt_pesky_pointer_array, stmt_split_c_complete]
pattern_crlf, pattern_lf, pattern_spaces_2_or_more, pattern_tabs, \
pattern_c_strcmp, pattern_c_strcpy, pattern_c_strcat, pattern_c_strncpy, pattern_include, pattern_define, \
pattern_end_with_newline, pattern_star_slash, pattern_star_slash_semicolon, pattern_char_plus_minus_char = \
re.compile(r"\r\n"), re.compile(r"\n"), re.compile(" +"), re.compile("\t+"), \
re.compile("strcmpi?\((.+?),(.+?)\)\s*([=|!]=)\s*0"), re.compile("strcpy\((.+?)\s*,\s*(.+?)\)"),\
re.compile("strcat\(\s*(.+?)\s*,\s*(.+?)\s*\)"), re.compile("strncpy\((.+?)\s*,\s*(.+?)(\+?)(\d+?)*\,(\w+)\);\s+\w+\[\d+\]\s*\=\S+;?"), \
re.compile(r"#include\s+(.+?)\s+"), re.compile(r"#define\s+(.+?)\s+(.+?)\s+\w*"), \
re.compile(r"\n$"), re.compile(r"\*\/"), re.compile(r"\*\/;"), re.compile(r'\"(\S?)\"([\+|\-])\"(\S?)\"') | 168.811644 | 4,224 | 0.612156 |
79564f2faed8bbe87a865636bd3a52d74f44a1f2 | 1,512 | py | Python | pi7db/tests/csvtest/test.py | shivjeetbhullar/pi7db | 1215d849d1ffb0670fdb3fcc4f35602fe7963e51 | [
"MIT"
] | 4 | 2020-05-30T05:32:57.000Z | 2021-02-15T03:29:08.000Z | pi7db/tests/csvtest/test.py | shivjeetbhullar/pi7db | 1215d849d1ffb0670fdb3fcc4f35602fe7963e51 | [
"MIT"
] | null | null | null | pi7db/tests/csvtest/test.py | shivjeetbhullar/pi7db | 1215d849d1ffb0670fdb3fcc4f35602fe7963e51 | [
"MIT"
] | null | null | null | import os
import pandas as pd
class tabledb:
def __init__(self,db_name):
self.db_name = db_name
if not os.path.exists(db_name):os.mkdir(db_name)
def create_table(self,**kwargs):
if 'name' in kwargs and 'colums' in kwargs and isinstance(kwargs['name'],str) and isinstance(kwargs['colums'],list):
file_path = os.path.join(self.db_name,f"{kwargs['name']}.csv");kwargs['colums'].insert(0,'un_id')
if not os.path.exists(file_path):
df = pd.DataFrame(columns = kwargs['colums'])
df.to_csv(file_path,index=False, sep=',',encoding='utf-8')
else:
return "PATH ALREADY EXIST"
else:
return "NOT PROPER METHOD"
def re_config_table(self,**kwargs):
if 'name' in kwargs:
if isinstance(kwargs['name'],dict):
file_path = os.path.join(self.db_name,f"{list(kwargs['name'])[0]}.csv")
if os.path.exists(file_path):os.rename(file_path,os.path.join(self.db_name,f"{kwargs['name'][list(kwargs['name'])[0]]}.csv"))
if isinstance(kwargs['name'],str):
file_path = os.path.join(self.db_name,f"{kwargs['name']}.csv");df=pd.read_csv(file_path)
if 'colums' in kwargs and isinstance(kwargs['colums'],dict):
df = df.rename(kwargs['colums'], axis='columns')
df.to_csv(file_path,index=False,mode='w', sep=',',encoding='utf-8')
else:return "TABLE NOT FOUND"
db = tabledb('shivjeet')
#db.create_table(name="yes",colums=["naam","no","yo"])
db.re_config_table(name="yes",colums={"NAME":"Name","ADM-NUMBER":"Admission-no","YEAR":"Year"})
| 38.769231 | 130 | 0.666005 |
79564f3a1b09f03f57288baf5fa6ef240d8c1ad4 | 220 | py | Python | problems/binarysearch/Solution34.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | problems/binarysearch/Solution34.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | problems/binarysearch/Solution34.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | """ Given an array of integers nums sorted in ascending order, find the starting
and ending position of a given target value. If target is not found in the
array, return [-1, -1].
"""
class Solution34:
pass
| 27.5 | 82 | 0.7 |
7956501687c770073e6fbc26df8b201d426e7f64 | 7,535 | py | Python | dagoba/query.py | skvrahul/dagoba | 685e2eb9adeec2eb6d38521a7dd8ab67568519f9 | [
"MIT"
] | 14 | 2020-02-15T08:14:07.000Z | 2021-09-07T19:02:14.000Z | dagoba/query.py | skvrahul/dagoba | 685e2eb9adeec2eb6d38521a7dd8ab67568519f9 | [
"MIT"
] | null | null | null | dagoba/query.py | skvrahul/dagoba | 685e2eb9adeec2eb6d38521a7dd8ab67568519f9 | [
"MIT"
] | null | null | null | from dagoba.entities import State, Gremlin, Args
class Query():
def __init__(self, graph):
self.graph = graph
self.state = []
self.program = []
self.gremlins = []
def add(self, pipetype, args: Args):
step = (pipetype, args)
self.program.append(step)
return self
def run(self):
last_step = len(self.program) - 1 # Index of last step in the pipeline
maybe_gremlin = False # Gremlin / Signal / False
results = [] # Results of this invokation of `run`
done = -1 # Step behind which things have finished
pc = last_step # pc = Program Counter. Start by pointing to the last step
step = None
state = None
pipetype = None
while(done < last_step):
step = self.program[pc]
# Initialize state if needed
try:
state = self.state[pc]
except IndexError:
for i in range(len(self.state), pc + 1):
self.state.append(State())
state = self.state[pc]
pipetype = Core.getPipetype(step[0])
maybe_gremlin = pipetype(self.graph, step[1], maybe_gremlin, state)
if (maybe_gremlin == 'pull'):
maybe_gremlin = False
if pc - 1 > done:
pc -= 1 # Try pulling from the previous pipe
continue
else:
done = pc # This pipe is done as well
if maybe_gremlin == 'done':
maybe_gremlin = False
done = pc
pc += 1
if pc > last_step:
if maybe_gremlin:
results.append(maybe_gremlin)
maybe_gremlin = False
pc -= 1
outp = [grem.result if grem.result is not None else grem.vertex for grem in results]
return outp
class Core:
# Map of available pipetypes
Pipetypes = {}
@classmethod
def getFauxPipetype():
def _pipe(_, __, maybe_gremlin):
return maybe_gremlin or 'pull'
return _pipe
def error(msg):
print('Dagoba Error: ', msg)
@classmethod
def addPipetype(cls, name, func):
Core.Pipetypes[name] = func
def _func(self, *args):
args_ob = Args.from_tuple(args=args)
return self.add(pipetype=name, args=args_ob)
# Adding the pipe function dynamically to the Query class
# to allow this function to be invoked on Query objects
setattr(Query, name, _func)
def getPipetype(name):
if name in Core.Pipetypes:
return Core.Pipetypes[name]
else:
Core.error("Unrecognized pipe name")
return Core.getFauxPipetype
def filterEdges(query):
if not query:
# Empty or null query
return lambda x: True
elif isinstance(query, str):
# Edge label has to be the same as query
return lambda edge: edge._label == query
elif isinstance(query, list):
# Edge label has to be one of the query string
return lambda edge: edge._label in query
elif isinstance(query, dict):
# Edge has to match a set of parameters. Using edge's 'matches' method to match the dict
return lambda edge: edge.matches(query)
else:
# Unknown query. Match all edges
return lambda x: True
'''
Defining various standard pipetypes
'''
def _vertex(graph, args: Args, gremlin: Gremlin, state: State):
if state.vertices is None:
state.vertices = graph.findVertices(args)
if len(state.vertices) == 0:
return 'done'
vertex = state.vertices.pop()
if gremlin:
return Gremlin(vertex, gremlin.state)
else:
return Gremlin(vertex, None)
def _out(graph, args: Args, gremlin, state):
state_has_no_edges = state.edges is None or len(state.edges) == 0
if not gremlin and state_has_no_edges:
return 'pull'
if state_has_no_edges:
state.gremlin = gremlin
out_edges = graph.findOutEdges(gremlin.vertex)
state.edges = list(filter(Core.filterEdges(args.get(0)), out_edges))
if len(state.edges) == 0:
return 'pull'
vertex = state.edges.pop()._out
if gremlin:
return Gremlin(vertex, gremlin.state) # Return a Gremlin sitting on the vertex
else:
return Gremlin(vertex, None)
def _in(graph, args: Args, gremlin, state):
state_has_no_edges = state.edges is None or len(state.edges) == 0
if gremlin is None and state_has_no_edges:
return 'pull'
if state_has_no_edges:
state.gremlin = gremlin
state.edges = graph.findInEdges(gremlin.vertex).filter(Core.filterEdges(args.get(0)))
if len(state.edges) == 0:
return 'pull'
vertex = state.edges.pop()._in
return Gremlin(vertex, gremlin.state) # Return a Gremlin sitting on the vertex
def _property(graph, args: Args, gremlin: Gremlin, state):
if not gremlin:
return 'pull'
gremlin.result = gremlin.vertex.getProperty(args.get(0))
if gremlin.result is not None:
return gremlin
else:
return False
def _take(graph, args: Args, gremlin, state):
state.num_taken = state.num_taken or 0
if state.num_taken == args.get(0):
state.num_taken = 0
return 'done'
if not gremlin:
return 'pull'
state.num_taken += 1
return gremlin
def _filter(graph, args: Args, gremlin, state):
if not gremlin:
return 'pull'
# Filter query is a property dictionary
if isinstance(args.get(0), dict):
return gremlin if gremlin.vertex.matches(args.get(0)) else 'pull'
# Filter query is a function or lambda
elif callable(args.get(0)):
filter_func = args.get(0)
return gremlin if filter_func(gremlin.vertex) else 'pull'
# Unsupported filter type
else:
Core.error("Unrecognized filter query:" + str(type(args.get(0))))
return gremlin
def _unique(graph, args: Args, gremlin, state):
if not gremlin:
# No gremlin. Try to get one
return 'pull'
elif gremlin.vertex._id in state.vert_ids:
# Already returned this particular vertex
return 'pull'
else:
# Mark this gremlins vertex as returned
state.vert_ids.add(gremlin.vertex._id)
return gremlin
def _as(graph, args: Args, gremlin, state):
if not gremlin:
# No gremlin. Try to get one
return 'pull'
if gremlin.state.alias is None:
gremlins.state.alias = {}
gremlin.state.alias[args.get(0)] = gremlin.vertex
return gremlin
Core.addPipetype('vertex', Core._vertex)
Core.addPipetype('in', Core._in)
Core.addPipetype('out', Core._out)
Core.addPipetype('property', Core._property)
Core.addPipetype('take', Core._take)
Core.addPipetype('filter', Core._filter)
Core.addPipetype('unique', Core._unique)
Core.addPipetype('as', Core._as)
| 32.478448 | 102 | 0.564831 |
7956503d9b9ed71725d90867ddba85e2671fa160 | 5,432 | py | Python | gui/homeView.py | Guitheg/3dis | bca86f5424b8f8b9b7ab18152349f4dfb0141bbb | [
"MIT"
] | null | null | null | gui/homeView.py | Guitheg/3dis | bca86f5424b8f8b9b7ab18152349f4dfb0141bbb | [
"MIT"
] | null | null | null | gui/homeView.py | Guitheg/3dis | bca86f5424b8f8b9b7ab18152349f4dfb0141bbb | [
"MIT"
] | null | null | null | import sys
import tkinter as tk
from gui.entry_panel import EntryPanel, ButtonPanel, TwoEntryPanel, ThreeEntryPanel
from gui.scrollerFrame import ScrollerFrame
class HomeView(tk.Frame):
def __init__(self, master, config):
if sys.version_info < (3,):
tk.Frame.__init__(self, master)
else:
super(HomeView, self).__init__(master)
self.pack()
self.config = config
self.box1 = tk.LabelFrame(self)
self.box1.pack(side=tk.TOP, expand=tk.YES, fill=tk.BOTH, padx=5, pady=5)
self.btn_charger_image = tk.Button(self.box1, text="Charger une image")
self.btn_charger_image.pack(side=tk.TOP)
self.reinitialiser = tk.Button(self.box1, text="Réinitialiser")
self.reinitialiser.pack(side=tk.TOP)
self.save = tk.Button(self.box1, text="Sauvegarder le résultat")
self.save.pack(side=tk.TOP)
self.annuler = tk.Button(self.box1, text="Annuler derniere opération")
self.annuler.pack(side=tk.TOP)
self.info = tk.LabelFrame(self.box1, text="Informations")
self.info.pack(side=tk.TOP, expand=tk.YES, fill=tk.BOTH, padx=5, pady=5)
self.afficher_valeur = tk.Button(self.info, text="Afficher Valeurs", state = tk.DISABLED)
self.afficher_valeur.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.TRUE, pady=5, padx=5)
self.histogramme = tk.Button(self.info, text="Afficher Histogramme", state=tk.DISABLED)
self.histogramme.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.TRUE, pady=5, padx=5)
self.spectre = tk.Button(self.info, text="Afficher Spectre", state=tk.DISABLED)
self.spectre.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=tk.TRUE, pady=5, padx=5)
self.box2 = tk.LabelFrame(self)
self.box2.pack(side=tk.BOTTOM, expand=tk.YES, fill=tk.BOTH, padx=5, pady=5)
self.scrolledFrame = ScrollerFrame(self.box2)
self.scrolledFrame.pack(side=tk.TOP, expand=tk.YES, fill=tk.BOTH, padx=5, pady=5)
self.bruit_gauss_p = EntryPanel(self.scrolledFrame.interior, "Bruit en DB", "Appliquer", from_=0, to=100, resolution=1)
self.bruit_gauss = TwoEntryPanel(self.scrolledFrame.interior, "Bruit Gaussien", "Appliquer", from_=1, to=250, resolution=1)
self.bruit_uniforme = TwoEntryPanel(self.scrolledFrame.interior, "Bruit Uniforme", "Appliquer", from_=1, to=250, resolution=1)
self.bruit_periodique = ThreeEntryPanel(self.scrolledFrame.interior, "Bruit Periodique", "Appliquer", from_=0, to=2, resolution=0.01)
self.bruit_sp = EntryPanel(self.scrolledFrame.interior, "Bruit Sel et Poivre", "Appliquer", from_=0, to=1, resolution=0.01)
self.brillance = EntryPanel(self.scrolledFrame.interior, "Brillance", "Changer la brillance", from_=-200, to=200, resolution=0.1)
self.contraste = EntryPanel(self.scrolledFrame.interior, "Constraste", "Changer le contraste", from_=0, to=10, resolution=0.01)
self.gamma = EntryPanel(self.scrolledFrame.interior, "Gamma", "Changer le Gamma", from_=0.01, to=2, resolution=0.01)
self.gamma.entry.set(1)
self.negatif = ButtonPanel(self.scrolledFrame.interior, "Negatif", "Appliquer l'effet Negatif")
self.seuillage = EntryPanel(self.scrolledFrame.interior, "Seuillage", "Appliquer Seuillage", from_=0, to=250, resolution=1)
self.logarithmique = EntryPanel(self.scrolledFrame.interior, "Logarithmique", "Appliquer", from_=0, to=10, resolution=0.1)
self.logarithmique.entry.set(1)
self.puissance =EntryPanel(self.scrolledFrame.interior, "Puissance", "Appliquer Puissance", from_=0, to=30, resolution=0.01)
self.puissance.entry.set(1)
self.lut_sigmoid = ButtonPanel(self.scrolledFrame.interior, "Sigmoid", "Appliquer la Lut")
self.egaliser = ButtonPanel(self.scrolledFrame.interior, "Egalisation Histogramme", "Appliquer l'Egalisation")
self.sobel = EntryPanel(self.scrolledFrame.interior, "Opérateur de Sobel", "Appliquer SOBEL", from_=1, to=30, resolution=0.01)
self.moyenneur = EntryPanel(self.scrolledFrame.interior, "Filtre Moyenneur", "Appliquer Moyenneur", from_=1, to=15, resolution=1)
self.laplacien = ButtonPanel(self.scrolledFrame.interior, "Filtre Laplacien", "Appliquer Laplacien")
self.pb_ideal = EntryPanel(self.scrolledFrame.interior, "FFT Passe Bas Ideal", "Appliquer PB Ideal", from_=1, to=250, resolution=1)
self.ph_ideal = EntryPanel(self.scrolledFrame.interior, "FFT Passe Haut Ideal", "Appliquer PH Ideal", from_=1, to=250, resolution=1)
self.pb_gauss = EntryPanel(self.scrolledFrame.interior, "FFT Passe Bas Gauss", "Appliquer PB Gauss", from_=1, to=250, resolution=1)
self.ph_gauss = EntryPanel(self.scrolledFrame.interior, "FFT Passe Haut Gauss", "Appliquer PH Gauss", from_=1, to=250, resolution=1)
self.laplacien_fft = ButtonPanel(self.scrolledFrame.interior, "Laplacien FFT", "Appliquer PB Ideal")
self.buttons = [self.brillance, self.contraste, self.gamma, self.lut_sigmoid, self.negatif, self.seuillage,
self.logarithmique, self.puissance, self.egaliser, self.sobel, self.moyenneur, self.laplacien, self.pb_ideal,
self.ph_ideal, self.pb_gauss, self.ph_gauss, self.laplacien_fft, self.bruit_gauss, self.bruit_uniforme, self.bruit_periodique,
self.bruit_sp, self.bruit_gauss_p]
| 66.243902 | 150 | 0.696981 |
795651069766c2dcc76cd1ecf4cd742b8f6286b2 | 19,824 | py | Python | tests/filter.py | potatolondon/djangoappengine-1-4 | ae4993597f5afcfa0df42f0fa50913f4c85e2b74 | [
"BSD-3-Clause"
] | null | null | null | tests/filter.py | potatolondon/djangoappengine-1-4 | ae4993597f5afcfa0df42f0fa50913f4c85e2b74 | [
"BSD-3-Clause"
] | null | null | null | tests/filter.py | potatolondon/djangoappengine-1-4 | ae4993597f5afcfa0df42f0fa50913f4c85e2b74 | [
"BSD-3-Clause"
] | null | null | null | from django.test.utils import override_settings
import datetime
import time
from django.db import models
from django.db.models import Q
from django.db.utils import DatabaseError
from django.test import TestCase
from django.utils import unittest
from google.appengine.api.datastore import Get, Key
from ..db.utils import get_cursor, set_cursor
from .testmodels import FieldsWithOptionsModel, EmailModel, DateTimeModel, \
OrderedModel, BlobModel
class FilterTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58]
emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de', 'rasengan@naruto.com']
datetimes = [datetime.datetime(2010, 1, 1, 0, 0, 0, 0),
datetime.datetime(2010, 12, 31, 23, 59, 59, 999999),
datetime.datetime(2011, 1, 1, 0, 0, 0, 0),
datetime.datetime(2013, 7, 28, 22, 30, 20, 50)]
def setUp(self):
for index, (float, email, datetime_value) in enumerate(zip(
FilterTest.floats, FilterTest.emails, FilterTest.datetimes)):
# Ensure distinct times when saving entities.
time.sleep(0.01)
self.last_save_datetime = datetime.datetime.now()
self.last_save_time = self.last_save_datetime.time()
ordered_instance = OrderedModel(priority=index, pk=index + 1)
ordered_instance.save()
FieldsWithOptionsModel(floating_point=float,
integer=int(float), email=email,
time=self.last_save_time,
foreign_key=ordered_instance).save()
EmailModel(email=email).save()
DateTimeModel(datetime=datetime_value).save()
def test_startswith(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__startswith='r').order_by('email')],
['rasengan@naruto.com', 'rinnengan@sage.de'])
self.assertEquals(
[entity.email for entity in EmailModel.objects
.filter(email__startswith='r').order_by('email')],
['rasengan@naruto.com', 'rinnengan@sage.de'])
def test_pk_and_startswith(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(text__startswith='Ha', pk='rinnengan@sage.de').order_by('text')],
['rinnengan@sage.de'])
def test_gt(self):
# Test gt on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__gt=3.1).order_by('floating_point')],
[5.3, 9.1])
# Test gt on integer.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.filter(integer__gt=3).order_by('integer')],
[5, 9])
# Test filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__gt='as').order_by('email')],
['rasengan@naruto.com', 'rinnengan@sage.de',
'sharingan@uchias.com', ])
# Test ForeignKeys with id.
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.filter(foreign_key__gt=2)]),
['rasengan@naruto.com', 'rinnengan@sage.de'])
# And with instance.
ordered_instance = OrderedModel.objects.get(priority=1)
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.filter(foreign_key__gt=ordered_instance)]),
['rasengan@naruto.com', 'rinnengan@sage.de'])
def test_lt(self):
# Test lt on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__lt=3.1).order_by('floating_point')],
[1.58, 2.6])
# Test lt on integer.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.filter(integer__lt=3).order_by('integer')],
[1, 2])
# Test filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__lt='as').order_by('email')],
['app-engine@scholardocs.com', ])
# Filter on datetime.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(time__lt=self.last_save_time).order_by('time')],
['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de'])
# Test ForeignKeys with id.
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.filter(foreign_key__lt=3)]),
['app-engine@scholardocs.com', 'sharingan@uchias.com'])
# And with instance.
ordered_instance = OrderedModel.objects.get(priority=2)
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.filter(foreign_key__lt=ordered_instance)]),
['app-engine@scholardocs.com', 'sharingan@uchias.com'])
def test_gte(self):
# Test gte on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__gte=2.6).order_by('floating_point')],
[2.6, 5.3, 9.1])
# Test gte on integer.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.filter(integer__gte=2).order_by('integer')],
[2, 5, 9])
# Test filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__gte='rinnengan@sage.de').order_by('email')],
['rinnengan@sage.de', 'sharingan@uchias.com', ])
def test_lte(self):
# Test lte on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__lte=5.3).order_by('floating_point')],
[1.58, 2.6, 5.3])
# Test lte on integer.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.filter(integer__lte=5).order_by('integer')],
[1, 2, 5])
# Test filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__lte='rinnengan@sage.de').order_by('email')],
['app-engine@scholardocs.com', 'rasengan@naruto.com',
'rinnengan@sage.de'])
def test_equals(self):
# Test equality filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email='rinnengan@sage.de').order_by('email')],
['rinnengan@sage.de'])
def test_is_null(self):
self.assertEquals(FieldsWithOptionsModel.objects.filter(
floating_point__isnull=True).count(), 0)
FieldsWithOptionsModel(
integer=5.4, email='shinra.tensai@sixpaths.com',
time=datetime.datetime.now().time()).save()
self.assertEquals(FieldsWithOptionsModel.objects.filter(
floating_point__isnull=True).count(), 1)
# XXX: These filters will not work because of a Django bug.
# self.assertEquals(FieldsWithOptionsModel.objects.filter(
# foreign_key=None).count(), 1)
# (it uses left outer joins if checked against isnull)
# self.assertEquals(FieldsWithOptionsModel.objects.filter(
# foreign_key__isnull=True).count(), 1)
def test_exclude(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.all().exclude(floating_point__lt=9.1)
.order_by('floating_point')],
['rinnengan@sage.de', ])
# Test exclude with ForeignKey.
ordered_instance = OrderedModel.objects.get(priority=1)
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.all().exclude(foreign_key__gt=ordered_instance)]),
['app-engine@scholardocs.com', 'sharingan@uchias.com'])
def test_exclude_pk(self):
self.assertEquals(
[entity.pk for entity in OrderedModel.objects
.exclude(pk__in=[2, 3]).order_by('pk')],
[1, 4])
def test_chained_filter(self):
# Additionally tests count :)
self.assertEquals(FieldsWithOptionsModel.objects.filter(
floating_point__lt=5.3, floating_point__gt=2.6).count(), 0)
# Test across multiple columns. On App Engine only one filter
# is allowed to be an inequality filter.
self.assertEquals(
[(entity.floating_point, entity.integer)
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__lte=5.3, integer=2)
.order_by('floating_point')],
[(2.6, 2), ])
# Test multiple filters including the primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__gte='rinnengan@sage.de', integer=2)
.order_by('email')],
['sharingan@uchias.com', ])
# Test in filter on primary key with another arbitrary filter.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__in=['rinnengan@sage.de',
'sharingan@uchias.com'],
integer__gt=2)
.order_by('integer')],
['rinnengan@sage.de', ])
# Test exceptions.
# Test multiple filters exception when filtered and not ordered
# against the first filter.
self.assertRaises(
DatabaseError,
lambda: FieldsWithOptionsModel.objects
.filter(email__gte='rinnengan@sage.de', floating_point=5.3)
.order_by('floating_point')[0])
# Test exception if filtered across multiple columns with
# inequality filter.
self.assertRaises(
DatabaseError,
FieldsWithOptionsModel.objects
.filter(floating_point__lte=5.3, integer__gte=2)
.order_by('floating_point').get)
# Test exception if filtered across multiple columns with
# inequality filter with exclude.
self.assertRaises(
DatabaseError,
FieldsWithOptionsModel.objects
.filter(email__lte='rinnengan@sage.de')
.exclude(floating_point__lt=9.1).order_by('email').get)
self.assertRaises(
DatabaseError,
lambda: FieldsWithOptionsModel.objects
.all().exclude(floating_point__lt=9.1).order_by('email')[0])
# TODO: Maybe check all possible exceptions.
def test_slicing(self):
# Test slicing on filter with primary_key.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__lte='rinnengan@sage.de')
.order_by('email')[:2]],
['app-engine@scholardocs.com', 'rasengan@naruto.com', ])
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__lte='rinnengan@sage.de')
.order_by('email')[1:2]],
['rasengan@naruto.com', ])
# Test on non pk field.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.all().order_by('integer')[:2]],
[1, 2, ])
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.all().order_by('email')[::2]],
['app-engine@scholardocs.com', 'rinnengan@sage.de'])
def test_cursor(self):
results = list(FieldsWithOptionsModel.objects.all())
cursor = None
for item in results:
query = FieldsWithOptionsModel.objects.all()[:1]
if cursor is not None:
query = set_cursor(query, cursor)
next = query[0]
self.assertEqual(next.pk, item.pk)
cursor = get_cursor(query)
query = set_cursor(FieldsWithOptionsModel.objects.all(), cursor)
self.assertEqual(list(query[:1]), [])
def test_Q_objects(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(Q(email__lte='rinnengan@sage.de'))
.order_by('email')][:2],
['app-engine@scholardocs.com', 'rasengan@naruto.com', ])
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.exclude(Q(integer__lt=5) | Q(integer__gte=9))
.order_by('integer')],
[5, ])
self.assertRaises(
TypeError,
FieldsWithOptionsModel.objects
.filter(Q(floating_point=9.1), Q(integer=9) | Q(integer=2)))
def test_pk_in(self):
# Test pk__in with field name email.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__in=['app-engine@scholardocs.com',
'rasengan@naruto.com'])],
['app-engine@scholardocs.com', 'rasengan@naruto.com'])
def test_in(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(floating_point__in=[5.3, 2.6, 1.58])
.filter(integer__in=[1, 5, 9])],
['app-engine@scholardocs.com', 'rasengan@naruto.com'])
def test_in_with_pk_in(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(floating_point__in=[5.3, 2.6, 1.58])
.filter(email__in=['app-engine@scholardocs.com',
'rasengan@naruto.com'])],
['app-engine@scholardocs.com', 'rasengan@naruto.com'])
def test_in_with_order_by(self):
class Post(models.Model):
writer = models.IntegerField()
order = models.IntegerField()
Post(writer=1, order=1).save()
Post(writer=1, order=2).save()
Post(writer=1, order=3).save()
Post(writer=2, order=4).save()
Post(writer=2, order=5).save()
posts = Post.objects.filter(writer__in=[1, 2]).order_by('order')
orders = [post.order for post in posts]
self.assertEqual(orders, range(1, 6))
posts = Post.objects.filter(writer__in=[1, 2]).order_by('-order')
orders = [post.order for post in posts]
self.assertEqual(orders, range(5, 0, -1))
def test_inequality(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.exclude(floating_point=5.3).filter(integer__in=[1, 5, 9])],
['rasengan@naruto.com', 'rinnengan@sage.de'])
def test_values(self):
# Test values().
self.assertEquals(
[entity['pk'] for entity in FieldsWithOptionsModel.objects
.filter(integer__gt=3).order_by('integer').values('pk')],
['app-engine@scholardocs.com', 'rinnengan@sage.de'])
self.assertEquals(FieldsWithOptionsModel.objects
.filter(integer__gt=3).order_by('integer').values('pk').count(), 2)
# These queries first fetch the whole entity and then only
# return the desired fields selected in .values.
self.assertEquals(
[entity['integer'] for entity in FieldsWithOptionsModel.objects
.filter(email__startswith='r')
.order_by('email').values('integer')],
[1, 9])
self.assertEquals(
[entity['floating_point']
for entity in FieldsWithOptionsModel.objects
.filter(integer__gt=3)
.order_by('integer').values('floating_point')],
[5.3, 9.1])
# Test values_list.
self.assertEquals(
[entity[0] for entity in FieldsWithOptionsModel.objects
.filter(integer__gt=3).order_by('integer').values_list('pk')],
['app-engine@scholardocs.com', 'rinnengan@sage.de'])
def test_range(self):
# Test range on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__range=(2.6, 9.1))
.order_by('floating_point')],
[2.6, 5.3, 9.1])
# Test range on pk.
self.assertEquals(
[entity.pk for entity in FieldsWithOptionsModel.objects
.filter(pk__range=('app-engine@scholardocs.com',
'rinnengan@sage.de'))
.order_by('pk')],
['app-engine@scholardocs.com', 'rasengan@naruto.com',
'rinnengan@sage.de'])
# Test range on date/datetime objects.
start_time = self.last_save_datetime - datetime.timedelta(minutes=1)
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(time__range=(start_time, self.last_save_time))
.order_by('time')],
['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de', 'rasengan@naruto.com'])
def test_date(self):
# Test year on date range boundaries.
self.assertEquals(
[entity.datetime for entity in DateTimeModel.objects
.filter(datetime__year=2010).order_by('datetime')],
[datetime.datetime(2010, 1, 1, 0, 0, 0, 0),
datetime.datetime(2010, 12, 31, 23, 59, 59, 999999)])
# Test year on non boundary date.
self.assertEquals(
[entity.datetime for entity in DateTimeModel.objects
.filter(datetime__year=2013).order_by('datetime')],
[datetime.datetime(2013, 7, 28, 22, 30, 20, 50)])
def test_auto_now(self):
time.sleep(0.1)
entity = DateTimeModel.objects.all()[0]
auto_now = entity.datetime_auto_now
entity.save()
entity = DateTimeModel.objects.get(pk=entity.pk)
self.assertNotEqual(auto_now, entity.datetime_auto_now)
def test_auto_now_add(self):
time.sleep(0.1)
entity = DateTimeModel.objects.all()[0]
auto_now_add = entity.datetime_auto_now_add
entity.save()
entity = DateTimeModel.objects.get(pk=entity.pk)
self.assertEqual(auto_now_add, entity.datetime_auto_now_add)
def test_latest(self):
self.assertEquals(FieldsWithOptionsModel.objects
.latest('time').floating_point, 1.58)
def test_blob(self):
x = BlobModel(data='lalala')
x.full_clean()
x.save()
e = Get(Key.from_path(BlobModel._meta.db_table, x.pk))
self.assertEqual(e['data'], x.data)
x = BlobModel.objects.all()[0]
self.assertEqual(e['data'], x.data)
FilterTest = override_settings(USE_TZ=False)(FilterTest)
| 40.457143 | 89 | 0.598416 |
79565361fb24f73600a2cc4b762aa45832009d56 | 438 | py | Python | celery_worker.py | suAdminWen/cc-api | a00d9b82583fae57a3cd92ec0478d434f141f172 | [
"MIT"
] | 6 | 2019-03-04T03:08:07.000Z | 2019-07-16T13:43:12.000Z | celery_worker.py | suAdminWen/cc-api | a00d9b82583fae57a3cd92ec0478d434f141f172 | [
"MIT"
] | 1 | 2021-11-22T15:02:09.000Z | 2021-11-22T15:02:09.000Z | celery_worker.py | suAdminWen/cc-api | a00d9b82583fae57a3cd92ec0478d434f141f172 | [
"MIT"
] | 1 | 2019-07-01T01:01:27.000Z | 2019-07-01T01:01:27.000Z | """
启动 Celery Worker 进程
celery -A celery_worker.celery --loglevel=info worker
启动 Celery Beat 进程,定时将任务发送到 Broker
celery beat -A celery_worker.celery -s ./flask_cc_api/proj/schedule/beat
一个终端启动
celery -B -A celery_worker.celery worker --loglevel=info -s ./flask_cc_api/proj/schedule/beat
"""
from flask_cc_api.app import create_app
from flask_cc_api.extensions import celery # noqa
app = create_app()
| 31.285714 | 101 | 0.723744 |
7956536dcb3b51fcceb863de649a00a5be2e9b7d | 362 | py | Python | __init__.py | seungjaeryanlee/implementations-utils | d60ca4edd777e3033e00e8cae83557bb843130ec | [
"MIT"
] | null | null | null | __init__.py | seungjaeryanlee/implementations-utils | d60ca4edd777e3033e00e8cae83557bb843130ec | [
"MIT"
] | null | null | null | __init__.py | seungjaeryanlee/implementations-utils | d60ca4edd777e3033e00e8cae83557bb843130ec | [
"MIT"
] | null | null | null | """Methods commonly used in any paper implementations."""
from .anneal import get_linear_anneal_func # noqa: F401
from .logger import get_logger # noqa: F401
from .reproducibility import set_env_random_seeds, set_global_random_seeds # noqa: F401
from .save_load import load_models, save_models # noqa: F401
from .timestamp import get_timestamp # noqa: F401
| 51.714286 | 88 | 0.798343 |
7956538ad04a8e36b0d3f284ed15ba07950fd951 | 213 | py | Python | src/controllers/__init__.py | ewanlee/mackrl | 6dd505aa09830f16c35a022f67e255db935c807e | [
"Apache-2.0"
] | 26 | 2019-10-28T09:01:45.000Z | 2021-09-20T08:56:12.000Z | src/controllers/__init__.py | ewanlee/mackrl | 6dd505aa09830f16c35a022f67e255db935c807e | [
"Apache-2.0"
] | 1 | 2020-07-25T06:50:05.000Z | 2020-07-25T06:50:05.000Z | src/controllers/__init__.py | ewanlee/mackrl | 6dd505aa09830f16c35a022f67e255db935c807e | [
"Apache-2.0"
] | 6 | 2019-12-18T12:02:57.000Z | 2021-03-03T13:15:47.000Z | REGISTRY = {}
from .basic_agent import BasicAgentController
REGISTRY["basic_ac"] = BasicAgentController
from .mackrl_agents import MACKRLMultiagentController
REGISTRY["mackrl_mac"] = MACKRLMultiagentController
| 23.666667 | 53 | 0.835681 |
795655478480c5d603cca668a17b419ada2a9c1b | 3,516 | py | Python | djangoapp/MainApp/settings.py | kevinha298/django-postgres-docker | 2a4715bb39000471e2e98c289bd4bae734a0b319 | [
"MIT"
] | null | null | null | djangoapp/MainApp/settings.py | kevinha298/django-postgres-docker | 2a4715bb39000471e2e98c289bd4bae734a0b319 | [
"MIT"
] | null | null | null | djangoapp/MainApp/settings.py | kevinha298/django-postgres-docker | 2a4715bb39000471e2e98c289bd4bae734a0b319 | [
"MIT"
] | null | null | null | """
Django settings for MainApp project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 0)))
ALLOWED_HOSTS = []
ALLOWED_HOSTS.extend(
filter(
None,
os.environ.get('ALLOWED_HOSTS', '').split(','),
)
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MainApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MainApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.852941 | 91 | 0.689704 |
7956559c618cddbea84c1f0acf33727366ac73d7 | 6,269 | py | Python | roles/openshift_health_checker/openshift_checks/disk_availability.py | Ravichandramanupati/openshift | 1720af442f0b02359ce4cc70d32adca15d9d26ab | [
"Apache-2.0"
] | 1 | 2017-11-01T05:46:27.000Z | 2017-11-01T05:46:27.000Z | roles/openshift_health_checker/openshift_checks/disk_availability.py | gloria-sentinella/openshift-ansible | e03493f33073965ddf8c49256df80143059a2a51 | [
"Apache-2.0"
] | 3 | 2016-12-01T23:01:36.000Z | 2016-12-02T00:16:48.000Z | roles/openshift_health_checker/openshift_checks/disk_availability.py | gloria-sentinella/openshift-ansible | e03493f33073965ddf8c49256df80143059a2a51 | [
"Apache-2.0"
] | 1 | 2018-01-30T05:44:59.000Z | 2018-01-30T05:44:59.000Z | """Check that there is enough disk space in predefined paths."""
import tempfile
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class DiskAvailability(OpenShiftCheck):
"""Check that recommended disk space is available before a first-time install."""
name = "disk_availability"
tags = ["preflight"]
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_disk_space_bytes = {
'/var': {
'oo_masters_to_config': 40 * 10**9,
'oo_nodes_to_config': 15 * 10**9,
'oo_etcd_to_config': 20 * 10**9,
},
# Used to copy client binaries into,
# see roles/openshift_cli/library/openshift_container_binary_sync.py.
'/usr/local/bin': {
'oo_masters_to_config': 1 * 10**9,
'oo_nodes_to_config': 1 * 10**9,
'oo_etcd_to_config': 1 * 10**9,
},
# Used as temporary storage in several cases.
tempfile.gettempdir(): {
'oo_masters_to_config': 1 * 10**9,
'oo_nodes_to_config': 1 * 10**9,
'oo_etcd_to_config': 1 * 10**9,
},
}
# recommended disk space for each location under an upgrade context
recommended_disk_upgrade_bytes = {
'/var': {
'oo_masters_to_config': 10 * 10**9,
'oo_nodes_to_config': 5 * 10 ** 9,
'oo_etcd_to_config': 5 * 10 ** 9,
},
}
def is_active(self):
"""Skip hosts that do not have recommended disk space requirements."""
group_names = self.get_var("group_names", default=[])
active_groups = set()
for recommendation in self.recommended_disk_space_bytes.values():
active_groups.update(recommendation.keys())
has_disk_space_recommendation = bool(active_groups.intersection(group_names))
return super(DiskAvailability, self).is_active() and has_disk_space_recommendation
def run(self):
group_names = self.get_var("group_names")
user_config = self.get_var("openshift_check_min_host_disk_gb", default={})
try:
# For backwards-compatibility, if openshift_check_min_host_disk_gb
# is a number, then it overrides the required config for '/var'.
number = float(user_config)
user_config = {
'/var': {
'oo_masters_to_config': number,
'oo_nodes_to_config': number,
'oo_etcd_to_config': number,
},
}
except TypeError:
# If it is not a number, then it should be a nested dict.
pass
self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
if user_config:
self.register_log("user-configured thresholds", user_config)
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
# not part of the official recommendation but present in the user
# configuration.
for path, recommendation in self.recommended_disk_space_bytes.items():
free_bytes = self.free_bytes(path)
recommended_bytes = max(recommendation.get(name, 0) for name in group_names)
config = user_config.get(path, {})
# NOTE: the user config is in GB, but we compare bytes, thus the
# conversion.
config_bytes = max(config.get(name, 0) for name in group_names) * 10**9
recommended_bytes = config_bytes or recommended_bytes
# if an "upgrade" context is set, update the minimum disk requirement
# as this signifies an in-place upgrade - the node might have the
# required total disk space, but some of that space may already be
# in use by the existing OpenShift deployment.
context = self.get_var("r_openshift_health_checker_playbook_context", default="")
if context == "upgrade":
recommended_upgrade_paths = self.recommended_disk_upgrade_bytes.get(path, {})
if recommended_upgrade_paths:
recommended_bytes = config_bytes or max(recommended_upgrade_paths.get(name, 0)
for name in group_names)
if free_bytes < recommended_bytes:
free_gb = float(free_bytes) / 10**9
recommended_gb = float(recommended_bytes) / 10**9
msg = (
'Available disk space in "{}" ({:.1f} GB) '
'is below minimum recommended ({:.1f} GB)'
).format(path, free_gb, recommended_gb)
# warn if check failed under an "upgrade" context
# due to limits imposed by the user config
if config_bytes and context == "upgrade":
msg += ('\n\nMake sure to account for decreased disk space during an upgrade\n'
'due to an existing OpenShift deployment. Please check the value of\n'
' openshift_check_min_host_disk_gb={}\n'
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
self.register_failure(msg)
return {}
def free_bytes(self, path):
"""Return the size available in path based on ansible_mounts."""
mount = self.find_ansible_mount(path)
try:
return mount['size_available']
except KeyError:
raise OpenShiftCheckException(
'Unable to retrieve disk availability for "{path}".\n'
'Ansible facts included a matching mount point for this path:\n'
' {mount}\n'
'however it is missing the size_available field.\n'
'To investigate, you can inspect the output of `ansible -m setup <host>`'
''.format(path=path, mount=mount)
)
| 45.427536 | 108 | 0.600415 |
7956570bd0ee7d6ec6f2206e3f54618456205c47 | 5,300 | py | Python | src/arch/arm/ArmISA.py | shantanu-gupta/gem5 | 94af1a6531c5a14da0d67c0c1f5abd8e22e6e514 | [
"BSD-3-Clause"
] | 22 | 2019-11-12T23:37:14.000Z | 2021-12-21T22:17:15.000Z | src/arch/arm/ArmISA.py | shantanu-gupta/gem5 | 94af1a6531c5a14da0d67c0c1f5abd8e22e6e514 | [
"BSD-3-Clause"
] | 8 | 2020-02-05T17:47:10.000Z | 2021-09-06T03:58:56.000Z | src/arch/arm/ArmISA.py | shantanu-gupta/gem5 | 94af1a6531c5a14da0d67c0c1f5abd8e22e6e514 | [
"BSD-3-Clause"
] | 11 | 2019-04-03T19:44:08.000Z | 2021-09-01T12:39:23.000Z | # Copyright (c) 2012-2013, 2015-2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Giacomo Gabrielli
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from ArmPMU import ArmPMU
from ISACommon import VecRegRenameMode
# Enum for DecoderFlavour
class DecoderFlavour(Enum): vals = ['Generic']
class ArmISA(SimObject):
type = 'ArmISA'
cxx_class = 'ArmISA::ISA'
cxx_header = "arch/arm/isa.hh"
system = Param.System(Parent.any, "System this ISA object belongs to")
pmu = Param.ArmPMU(NULL, "Performance Monitoring Unit")
decoderFlavour = Param.DecoderFlavour('Generic', "Decoder flavour specification")
midr = Param.UInt32(0x410fc0f0, "MIDR value")
# See section B4.1.89 - B4.1.92 of the ARM ARM
# VMSAv7 support
id_mmfr0 = Param.UInt32(0x10201103, "Memory Model Feature Register 0")
id_mmfr1 = Param.UInt32(0x00000000, "Memory Model Feature Register 1")
# no HW access | WFI stalling | ISB and DSB |
# all TLB maintenance | no Harvard
id_mmfr2 = Param.UInt32(0x01230000, "Memory Model Feature Register 2")
# SuperSec | Coherent TLB | Bcast Maint |
# BP Maint | Cache Maint Set/way | Cache Maint MVA
id_mmfr3 = Param.UInt32(0x02102211, "Memory Model Feature Register 3")
# See section B4.1.84 of ARM ARM
# All values are latest for ARMv7-A profile
id_isar0 = Param.UInt32(0x02101111, "Instruction Set Attribute Register 0")
id_isar1 = Param.UInt32(0x02112111, "Instruction Set Attribute Register 1")
id_isar2 = Param.UInt32(0x21232141, "Instruction Set Attribute Register 2")
id_isar3 = Param.UInt32(0x01112131, "Instruction Set Attribute Register 3")
id_isar4 = Param.UInt32(0x10010142, "Instruction Set Attribute Register 4")
id_isar5 = Param.UInt32(0x00000000, "Instruction Set Attribute Register 5")
fpsid = Param.UInt32(0x410430a0, "Floating-point System ID Register")
# [31:0] is implementation defined
id_aa64afr0_el1 = Param.UInt64(0x0000000000000000,
"AArch64 Auxiliary Feature Register 0")
# Reserved for future expansion
id_aa64afr1_el1 = Param.UInt64(0x0000000000000000,
"AArch64 Auxiliary Feature Register 1")
# Initial vector register rename mode
vecRegRenameMode = Param.VecRegRenameMode('Full',
"Initial rename mode for vecregs")
# 1 CTX CMPs | 2 WRPs | 2 BRPs | !PMU | !Trace | Debug v8-A
id_aa64dfr0_el1 = Param.UInt64(0x0000000000101006,
"AArch64 Debug Feature Register 0")
# Reserved for future expansion
id_aa64dfr1_el1 = Param.UInt64(0x0000000000000000,
"AArch64 Debug Feature Register 1")
# !CRC32 | !SHA2 | !SHA1 | !AES
id_aa64isar0_el1 = Param.UInt64(0x0000000000000000,
"AArch64 Instruction Set Attribute Register 0")
# Reserved for future expansion
id_aa64isar1_el1 = Param.UInt64(0x0000000000000000,
"AArch64 Instruction Set Attribute Register 1")
# 4K | 64K | !16K | !BigEndEL0 | !SNSMem | !BigEnd | 8b ASID | 40b PA
id_aa64mmfr0_el1 = Param.UInt64(0x0000000000f00002,
"AArch64 Memory Model Feature Register 0")
# Reserved for future expansion
id_aa64mmfr1_el1 = Param.UInt64(0x0000000000000000,
"AArch64 Memory Model Feature Register 1")
| 46.491228 | 85 | 0.746226 |
79565845da36437cbf0b538d950951d85e0f74ea | 86 | py | Python | hook-win10toast.py | Jasperabez/PPTSynchro | adb71246caa8509b89664a318fa55a110b584c55 | [
"MIT"
] | null | null | null | hook-win10toast.py | Jasperabez/PPTSynchro | adb71246caa8509b89664a318fa55a110b584c55 | [
"MIT"
] | null | null | null | hook-win10toast.py | Jasperabez/PPTSynchro | adb71246caa8509b89664a318fa55a110b584c55 | [
"MIT"
] | 1 | 2021-01-06T03:33:49.000Z | 2021-01-06T03:33:49.000Z | from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata('win10toast') | 28.666667 | 49 | 0.837209 |
795658f2ca0be8bc8f2129e60b3e5b87afc91a0b | 1,307 | py | Python | python3/161.one-edit-distance.248980150.ac.py | Diego-Zulu/leetcode_answers | ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4 | [
"MIT"
] | null | null | null | python3/161.one-edit-distance.248980150.ac.py | Diego-Zulu/leetcode_answers | ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4 | [
"MIT"
] | null | null | null | python3/161.one-edit-distance.248980150.ac.py | Diego-Zulu/leetcode_answers | ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=161 lang=python3
#
# [161] One Edit Distance
#
# https://leetcode.com/problems/one-edit-distance/description/
#
# algorithms
# Medium (32.17%)
# Likes: 584
# Dislikes: 104
# Total Accepted: 104.1K
# Total Submissions: 323.5K
# Testcase Example: '"ab"\n"acb"'
#
# Given two strings s and t, determine if they are both one edit distance
# apart.
#
# Note:
#
# There are 3 possiblities to satisify one edit distance apart:
#
#
# Insert a character into s to get t
# Delete a character from s to get t
# Replace a character of s to get t
#
#
# Example 1:
#
#
# Input: s = "ab", t = "acb"
# Output: true
# Explanation: We can insert 'c' into s to get t.
#
#
# Example 2:
#
#
# Input: s = "cab", t = "ad"
# Output: false
# Explanation: We cannot get t from s by only one step.
#
# Example 3:
#
#
# Input: s = "1203", t = "1213"
# Output: true
# Explanation: We can replace '0' with '1' to get t.
#
#
# @lc code=start
class Solution:
def isOneEditDistance(self, s: str, t: str) -> bool:
n, m = len(s), len(t)
if abs(n - m) > 1:
return False
for i in range(min(n, m)):
if s[i] != t[i]:
return s[i+1:] == t[i:] or s[i:] == t[i+1:] or s[i+1:] == t[i+1:]
return abs(n - m) == 1
# @lc code=end
| 20.421875 | 81 | 0.577659 |
7956596c39dfc9fa4afd67fcb8688a20009be07f | 321 | py | Python | code/test_scripts/clear_leds.py | AdamCorbinFAUPhD/CEN5035-Software-Engineering-Course-Project | bcc23ffeee68cad0a86d75995e6d335c24ed9f50 | [
"MIT"
] | null | null | null | code/test_scripts/clear_leds.py | AdamCorbinFAUPhD/CEN5035-Software-Engineering-Course-Project | bcc23ffeee68cad0a86d75995e6d335c24ed9f50 | [
"MIT"
] | 24 | 2019-09-12T06:38:03.000Z | 2019-12-04T20:27:32.000Z | code/test_scripts/clear_leds.py | AdamCorbinFAUPhD/CEN5035-Software-Engineering-Course-Project | bcc23ffeee68cad0a86d75995e6d335c24ed9f50 | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import time
# Install instrictions
# pip install RPi.GPIO
GPIO.setmode(GPIO.BCM)
LED_R = 16
LED_G = 20
LED_B = 21
GPIO.setup(LED_R, GPIO.OUT)
GPIO.setup(LED_G, GPIO.OUT)
GPIO.setup(LED_B, GPIO.OUT)
GPIO.output(LED_R, False)
GPIO.output(LED_G, False)
GPIO.output(LED_B, False)
GPIO.cleanup() | 15.285714 | 27 | 0.741433 |
79565a2dc4a0d85a787a20bdc2a48e8304d37c40 | 1,186 | py | Python | src/apps/runs/serializers/run.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 27 | 2020-05-03T11:01:27.000Z | 2022-03-17T05:33:10.000Z | src/apps/runs/serializers/run.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 54 | 2020-05-09T01:18:41.000Z | 2022-01-22T10:31:15.000Z | src/apps/runs/serializers/run.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 9 | 2020-09-29T11:31:32.000Z | 2022-03-09T01:37:50.000Z | from rest_framework.serializers import HyperlinkedModelSerializer
from src.apps.runs.models import Run
class RunSerializer(HyperlinkedModelSerializer):
"""
RunSerializer serializes one or several run for create, list and detail display on the api
"""
class Meta:
model = Run
fields = [
"url",
"id",
"created_at",
"name",
"data_board_len",
"inputs_version",
"max_search_threads_allowed",
"selfplay_client_config",
"rating_client_config",
"git_revision_hash_whitelist",
]
extra_kwargs = {
"url": {"lookup_field": "name"},
}
class RunSerializerForClient(HyperlinkedModelSerializer):
"""
Serializer exposing only the fields of a run that a self-play client needs on startup.
"""
class Meta:
model = Run
fields = [
"id",
"url",
"name",
"data_board_len",
"inputs_version",
"max_search_threads_allowed",
]
extra_kwargs = {
"url": {"lookup_field": "name"},
}
| 24.708333 | 94 | 0.544688 |
79565c14378664350d6dbdcdd7980a2fead00ba2 | 39,799 | py | Python | Lib/inspect.py | orestis/python | 870a82aac7788ffa105e2a3e4480b3715c93bff6 | [
"PSF-2.0"
] | 1 | 2021-12-26T22:20:34.000Z | 2021-12-26T22:20:34.000Z | Lib/inspect.py | orestis/python | 870a82aac7788ffa105e2a3e4480b3715c93bff6 | [
"PSF-2.0"
] | null | null | null | Lib/inspect.py | orestis/python | 870a82aac7788ffa105e2a3e4480b3715c93bff6 | [
"PSF-2.0"
] | 2 | 2018-08-06T04:37:38.000Z | 2022-02-27T18:07:12.000Z | # -*- coding: iso-8859-1 -*-
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys
import os
import types
import string
import re
import dis
import imp
import tokenize
import linecache
from operator import attrgetter
from collections import namedtuple
# These constants are from Include/code.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
return (hasattr(object, "__set__") and hasattr(object, "__get__"))
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support interation over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (isfunction(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('arg is a built-in module')
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('arg is a built-in class')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('arg is not a module, class, method, '
'function, traceback, frame, or code object')
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the Python source file an object was defined in, if it exists."""
filename = getfile(object)
if filename[-4:].lower() in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and filename[-len(suffix):].lower() == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in sys.modules.items():
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object)
if not file:
raise IOError('source code not available')
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names, possibly containing nested
lists. Keyword-only arguments are appended. 'varargs' and 'varkw'
are the names of the * and ** arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names (with 'args'
possibly containing nested lists), and 'varargs' and 'varkw' are the
names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('arg is not a code object')
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__)
return FullArgSpec(args, varargs, varkw, func.__defaults__,
kwonlyargs, func.__kwdefaults__, func.__annotations__)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + ', '.join(seq) + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in (list, tuple):
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation,
join=joinseq):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = strseq(arg, formatargandannotation, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('arg is not a frame or traceback object')
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
if hasattr(sys, '_getframe'):
currentframe = sys._getframe
else:
currentframe = lambda _=None: None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
| 39.600995 | 82 | 0.641674 |
79565d92319e3af81e292948d48a31d97edd24e2 | 6,562 | py | Python | src/skmultiflow/lazy/knn_classifier.py | lambertsbennett/scikit-multiflow | bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc | [
"BSD-3-Clause"
] | 1 | 2020-04-16T10:17:03.000Z | 2020-04-16T10:17:03.000Z | src/skmultiflow/lazy/knn_classifier.py | lambertsbennett/scikit-multiflow | bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc | [
"BSD-3-Clause"
] | null | null | null | src/skmultiflow/lazy/knn_classifier.py | lambertsbennett/scikit-multiflow | bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc | [
"BSD-3-Clause"
] | null | null | null | from skmultiflow.core import ClassifierMixin
from skmultiflow.lazy.base_neighbors import BaseNeighbors
from skmultiflow.utils.utils import *
import warnings
def KNN(n_neighbors=5, max_window_size=1000,
leaf_size=30): # pragma: no cover
warnings.warn("'KNN' has been renamed to 'KNNClassifier' in v0.5.0.\n"
"The old name will be removed in v0.7.0", category=FutureWarning)
return KNNClassifier(n_neighbors=n_neighbors,
max_window_size=max_window_size,
leaf_size=leaf_size)
class KNNClassifier(BaseNeighbors, ClassifierMixin):
""" k-Nearest Neighbors classifier.
This non-parametric classification method keeps track of the last
``max_window_size`` training samples. The predicted class-label for a
given query sample is obtained in two steps:
1. Find the closest n_neighbors to the query sample in the data window.
2. Aggregate the class-labels of the n_neighbors to define the predicted
class for the query sample.
Parameters
----------
n_neighbors: int (default=5)
The number of nearest neighbors to search for.
max_window_size: int (default=1000)
The maximum size of the window storing the last observed samples.
leaf_size: int (default=30)
sklearn.KDTree parameter. The maximum number of samples that can
be stored in one leaf node, which determines from which point the
algorithm will switch for a brute-force approach. The bigger this
number the faster the tree construction time, but the slower the
query time will be.
metric: string or sklearn.DistanceMetric object
sklearn.KDTree parameter. The distance metric to use for the KDTree.
Default=’euclidean’. KNNClassifier.valid_metrics() gives a list of
the metrics which are valid for KDTree.
Notes
-----
This estimator is not optimal for a mixture of categorical and numerical
features. This implementation treats all features from a given stream as
numerical.
Examples
--------
>>> # Imports
>>> from skmultiflow.lazy import KNNClassifier
>>> from skmultiflow.data import SEAGenerator
>>> # Setting up the stream
>>> stream = SEAGenerator(random_state=1, noise_percentage=.1)
>>> knn = KNNClassifier(n_neighbors=8, max_window_size=2000, leaf_size=40)
>>> # Keep track of sample count and correct prediction count
>>> n_samples = 0
>>> corrects = 0
>>> while n_samples < 5000:
... X, y = stream.next_sample()
... my_pred = knn.predict(X)
... if y[0] == my_pred[0]:
... corrects += 1
... knn = knn.partial_fit(X, y)
... n_samples += 1
>>>
>>> # Displaying results
>>> print('KNNClassifier usage example')
>>> print('{} samples analyzed.'.format(n_samples))
5000 samples analyzed.
>>> print("KNNClassifier's performance: {}".format(corrects/n_samples))
KNN's performance: 0.8776
"""
def __init__(self,
n_neighbors=5,
max_window_size=1000,
leaf_size=30,
metric='euclidean'):
super().__init__(n_neighbors=n_neighbors,
max_window_size=max_window_size,
leaf_size=leaf_size,
metric=metric)
self.classes = []
def partial_fit(self, X, y, classes=None, sample_weight=None):
""" Partially (incrementally) fit the model.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
The data upon which the algorithm will create its model.
y: Array-like
An array-like containing the classification targets for all
samples in X.
classes: numpy.ndarray, optional (default=None)
Array with all possible/known classes.
sample_weight: Not used.
Returns
-------
KNNClassifier
self
Notes
-----
For the K-Nearest Neighbors Classifier, fitting the model is the
equivalent of inserting the newer samples in the observed window,
and if the size_limit is reached, removing older results. To store
the viewed samples we use a InstanceWindow object. For this class'
documentation please visit skmultiflow.core.utils.data_structures
"""
r, c = get_dimensions(X)
if classes is not None:
self.classes = list(set().union(self.classes, classes))
for i in range(r):
self.data_window.add_sample(X[i], y[i])
return self
def predict(self, X):
""" Predict the class label for sample X
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
All the samples we want to predict the label for.
Returns
-------
numpy.ndarray
A 1D array of shape (, n_samples), containing the
predicted class labels for all instances in X.
"""
y_proba = self.predict_proba(X)
y_pred = np.argmax(y_proba, axis=1)
return y_pred
def predict_proba(self, X):
""" Estimate the probability of X belonging to each class-labels.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
Returns
-------
numpy.ndarray
A 2D array of shape (n_samples, n_classes). Where each i-th row
contains len(self.target_value) elements, representing the
probability that the i-th sample of X belongs to a certain
class label.
"""
r, c = get_dimensions(X)
if self.data_window is None or self.data_window.size < self.n_neighbors:
# The model is empty, defaulting to zero
return np.zeros(shape=(r, 1))
proba = []
self.classes = list(set().union(self.classes,
np.unique(self.data_window.targets_buffer.astype(np.int))))
new_dist, new_ind = self._get_neighbors(X)
for i in range(r):
votes = [0.0 for _ in range(int(max(self.classes) + 1))]
for index in new_ind[i]:
votes[int(self.data_window.targets_buffer[index])] += 1. / len(new_ind[i])
proba.append(votes)
return np.asarray(proba)
| 35.47027 | 99 | 0.604084 |
79565dee0e59b7cadf994245015707f50ba2601c | 464 | py | Python | alumno/migrations/0003_alumno_verificador.py | nicolasmontenegro/emprenred | 906ea24e8b6357b109e24c140fd92fc24bb33e79 | [
"Apache-2.0"
] | null | null | null | alumno/migrations/0003_alumno_verificador.py | nicolasmontenegro/emprenred | 906ea24e8b6357b109e24c140fd92fc24bb33e79 | [
"Apache-2.0"
] | null | null | null | alumno/migrations/0003_alumno_verificador.py | nicolasmontenegro/emprenred | 906ea24e8b6357b109e24c140fd92fc24bb33e79 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-16 22:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alumno', '0002_auto_20161116_1910'),
]
operations = [
migrations.AddField(
model_name='alumno',
name='verificador',
field=models.CharField(blank=True, max_length=1),
),
]
| 22.095238 | 61 | 0.62069 |
79565e9a883bc37f61b760bc05b6ca63c7730ff5 | 5,455 | py | Python | h/services/group_create.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | h/services/group_create.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | h/services/group_create.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | from functools import partial
from h import session
from h.models import Group, GroupScope
from h.models.group import (
OPEN_GROUP_TYPE_FLAGS,
PRIVATE_GROUP_TYPE_FLAGS,
RESTRICTED_GROUP_TYPE_FLAGS,
)
class GroupCreateService:
def __init__(self, session, user_fetcher, publish):
"""
Create a new GroupCreateService.
:param session: the SQLAlchemy session object
:param user_fetcher: a callable for fetching users by userid
:param publish: a callable for publishing events
"""
self.session = session
self.user_fetcher = user_fetcher
self.publish = publish
def create_private_group(self, name, userid, **kwargs):
"""
Create a new private group.
A private group is one that only members can read or write.
:param name: the human-readable name of the group
:param userid: the userid of the group creator
:param kwargs: optional attributes to set on the group, as keyword
arguments
:returns: the created group
"""
return self._create(
name=name,
userid=userid,
type_flags=PRIVATE_GROUP_TYPE_FLAGS,
scopes=[],
add_creator_as_member=True,
**kwargs
)
def create_open_group(self, name, userid, scopes, **kwargs):
"""
Create a new open group.
An open group is one that anyone in the same authority can read or write.
:param name: the human-readable name of the group
:param userid: the userid of the group creator
:param scopes: the list of URIs that the group will be scoped to
:type scopes: list(str)
:param kwargs: optional attributes to set on the group, as keyword
arguments
:returns: the created group
"""
return self._create(
name=name,
userid=userid,
type_flags=OPEN_GROUP_TYPE_FLAGS,
scopes=scopes,
add_creator_as_member=False,
**kwargs
)
def create_restricted_group(self, name, userid, scopes, **kwargs):
"""
Create a new restricted group.
A restricted group is one that anyone in the same authority can read but
only members can write.
:param name: the human-readable name of the group
:param userid: the userid of the group creator
:param scopes: the list of URIs that the group will be scoped to
:type scopes: list(str)
:param kwargs: optional attributes to set on the group, as keyword
arguments
:returns: the created group
"""
return self._create(
name=name,
userid=userid,
type_flags=RESTRICTED_GROUP_TYPE_FLAGS,
scopes=scopes,
add_creator_as_member=True,
**kwargs
)
def _create(
self, name, userid, type_flags, scopes, add_creator_as_member, **kwargs
):
"""
Create a group and save it to the DB.
:param name: the human-readable name of the group
:param userid: the userid of the group creator
:param type_flags: the type of this group
:param scopes: the list of scopes (URIs) that the group will be scoped to
:type scopes: list(str)
:param add_creator_as_member: if the group creator should be added as a member
:param kwargs: optional attributes to set on the group, as keyword
arguments
"""
if scopes is None:
scopes = []
creator = self.user_fetcher(userid)
group_scopes = [GroupScope(scope=s) for s in scopes]
if "organization" in kwargs:
self._validate_authorities_match(
creator.authority, kwargs["organization"].authority
)
group = Group(
name=name,
authority=creator.authority,
creator=creator,
joinable_by=type_flags.joinable_by,
readable_by=type_flags.readable_by,
writeable_by=type_flags.writeable_by,
scopes=group_scopes,
**kwargs
)
self.session.add(group)
if add_creator_as_member:
group.members.append(group.creator)
# Flush the DB to generate group.pubid before publish()ing it.
self.session.flush()
self.publish("group-join", group.pubid, group.creator.userid)
return group
def _validate_authorities_match(self, group_authority, org_authority):
if group_authority != org_authority:
raise ValueError(
"Organization's authority {} must match the group creator's authority {}.".format(
org_authority, group_authority
)
)
def group_create_factory(_context, request):
"""Return a GroupCreateService instance for the passed context and request."""
user_service = request.find_service(name="user")
return GroupCreateService(
session=request.db,
user_fetcher=user_service.fetch,
publish=partial(_publish, request),
)
def _publish(request, event_type, groupid, userid):
request.realtime.publish_user(
{
"type": event_type,
"session_model": session.model(request),
"userid": userid,
"group": groupid,
}
)
| 31.531792 | 98 | 0.610449 |
79565ed7368641e703058e791c112a8c1e6247a4 | 44,977 | py | Python | salt/cloud/clouds/nova.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | salt/cloud/clouds/nova.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | salt/cloud/clouds/nova.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
OpenStack Nova Cloud Module
===========================
OpenStack is an open source project that is in use by a number a cloud
providers, each of which have their own ways of using it.
The OpenStack Nova module for Salt Cloud was bootstrapped from the OpenStack
module for Salt Cloud, which uses a libcloud-based connection. The Nova module
is designed to use the nova and glance modules already built into Salt.
These modules use the Python novaclient and glanceclient libraries,
respectively. In order to use this module, the proper salt configuration must
also be in place. This can be specified in the master config, the minion
config, a set of grains or a set of pillars.
.. code-block:: yaml
my_openstack_profile:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
Note that there is currently a dependency upon netaddr. This can be installed
on Debian-based systems by means of the python-netaddr package.
This module currently requires the latest develop branch of Salt to be
installed.
This module has been tested to work with HP Cloud and Rackspace. See the
documentation for specific options for either of these providers. These
examples could be set up in the cloud configuration at
``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/openstack.conf``:
.. code-block:: yaml
my-openstack-config:
# The name of the configuration profile to use on said minion
config_profile: my_openstack_profile
ssh_key_name: mykey
driver: nova
userdata_file: /tmp/userdata.txt
To use keystoneauth1 instead of keystoneclient, include the `use_keystoneauth`
option in the provider config.
.. note:: this is required to use keystone v3 as for authentication.
.. code-block:: yaml
my-openstack-config:
use_keystoneauth: True
identity_url: 'https://controller:5000/v3'
auth_version: 3
compute_name: nova
compute_region: RegionOne
service_type: compute
verify: '/path/to/custom/certs/ca-bundle.crt'
tenant: admin
user: admin
password: passwordgoeshere
driver: nova
Note: by default the nova driver will attempt to verify its connection
utilizing the system certificates. If you need to verify against another bundle
of CA certificates or want to skip verification altogether you will need to
specify the verify option. You can specify True or False to verify (or not)
against system certificates, a path to a bundle or CA certs to check against, or
None to allow keystoneauth to search for the certificates on its own.(defaults to True)
For local installations that only use private IP address ranges, the
following option may be useful. Using the old syntax:
Note: For api use, you will need an auth plugin. The base novaclient does not
support apikeys, but some providers such as rackspace have extended keystone to
accept them
.. code-block:: yaml
my-openstack-config:
# Ignore IP addresses on this network for bootstrap
ignore_cidr: 192.168.50.0/24
my-nova:
identity_url: 'https://identity.api.rackspacecloud.com/v2.0/'
compute_region: IAD
user: myusername
password: mypassword
tenant: <userid>
driver: nova
my-api:
identity_url: 'https://identity.api.rackspacecloud.com/v2.0/'
compute_region: IAD
user: myusername
api_key: <api_key>
os_auth_plugin: rackspace
tenant: <userid>
driver: nova
networks:
- net-id: 47a38ff2-fe21-4800-8604-42bd1848e743
- net-id: 00000000-0000-0000-0000-000000000000
- net-id: 11111111-1111-1111-1111-111111111111
This is an example profile.
.. code-block:: yaml
debian8-2-iad-cloudqe4:
provider: cloudqe4-iad
size: performance1-2
image: Debian 8 (Jessie) (PVHVM)
script_args: -UP -p python-zmq git 2015.8
and one using cinder volumes already attached
.. code-block:: yaml
# create the block storage device
centos7-2-iad-rackspace:
provider: rackspace-iad
size: general1-2
block_device:
- source: image
id: <image_id>
dest: volume
size: 100
shutdown: <preserve/remove>
bootindex: 0
# with the volume already created
centos7-2-iad-rackspace:
provider: rackspace-iad
size: general1-2
boot_volume: <volume id>
# create the volume from a snapshot
centos7-2-iad-rackspace:
provider: rackspace-iad
size: general1-2
snapshot: <cinder snapshot id>
# create the create an extra ephemeral disk
centos7-2-iad-rackspace:
provider: rackspace-iad
size: general1-2
ephemeral:
- size: 100
format: <swap/ext4>
# create the create an extra ephemeral disk
centos7-2-iad-rackspace:
provider: rackspace-iad
size: general1-2
swap: <size>
Block Device can also be used for having more than one block storage device attached
.. code-block:: yaml
centos7-2-iad-rackspace:
provider: rackspace-iad
size: general1-2
block_device:
- source: image
id: <image_id>
dest: volume
size: 100
shutdown: <preserve/remove>
bootindex: 0
- source: blank
dest: volume
device: xvdc
size: 100
shutdown: <preserve/remove>
Floating IPs can be auto assigned and ssh_interface can be set to fixed_ips, floating_ips, public_ips or private_ips
.. code-block:: yaml
centos7-2-iad-rackspace:
provider: rackspace-iad
size: general1-2
ssh_interface: floating_ips
floating_ip:
auto_assign: True
pool: public
Note: You must include the default net-ids when setting networks or the server
will be created without the rest of the interfaces
Note: For rackconnect v3, rackconnectv3 needs to be specified with the
rackconnect v3 cloud network as its variable.
'''
# pylint: disable=E0102
# Import python libs
from __future__ import absolute_import
import os
import logging
import socket
import pprint
import yaml
# Import Salt Libs
import salt.ext.six as six
import salt.utils
import salt.client
from salt.utils.openstack import nova
try:
import novaclient.exceptions
except ImportError as exc:
pass
# Import Salt Cloud Libs
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
import salt.utils.cloud
import salt.utils.pycrypto as sup
import salt.config as config
from salt.utils import namespaced_function
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
try:
from netaddr import all_matching_cidrs
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Get logging started
log = logging.getLogger(__name__)
request_log = logging.getLogger('requests')
__virtualname__ = 'nova'
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
script = namespaced_function(script, globals())
reboot = namespaced_function(reboot, globals())
# Only load in this module if the Nova configurations are in place
def __virtual__():
'''
Check for Nova configurations
'''
request_log.setLevel(getattr(logging, __opts__.get('requests_log_level', 'warning').upper()))
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('user', 'tenant', 'identity_url', 'compute_region',)
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'netaddr': HAS_NETADDR,
'python-novaclient': nova.check_nova(),
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
kwargs = vm_.copy() # pylint: disable=E1103
kwargs['username'] = vm_['user']
kwargs['project_id'] = vm_['tenant']
kwargs['auth_url'] = vm_['identity_url']
kwargs['region_name'] = vm_['compute_region']
kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)
if 'password' in vm_:
kwargs['password'] = vm_['password']
if 'verify' in vm_ and vm_['use_keystoneauth'] is True:
kwargs['verify'] = vm_['verify']
elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:
log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')
conn = nova.SaltNova(**kwargs)
return conn
def avail_locations(conn=None, call=None):
'''
Return a list of locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
if conn is None:
conn = get_conn()
endpoints = nova.get_entry(conn.get_catalog(), 'type', 'compute')['endpoints']
ret = {}
for endpoint in endpoints:
ret[endpoint['region']] = endpoint
return ret
def get_image(conn, vm_):
'''
Return the image object to use
'''
vm_image = config.get_cloud_config_value('image', vm_, __opts__, default='').encode(
'ascii', 'salt-cloud-force-ascii'
)
if not vm_image:
log.debug('No image set, must be boot from volume')
return None
image_list = conn.image_list()
for img in image_list:
if vm_image in (image_list[img]['id'], img):
return image_list[img]['id']
try:
image = conn.image_show(vm_image)
return image['id']
except novaclient.exceptions.NotFound as exc:
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found: {1}'.format(
vm_image,
str(exc)
)
)
def get_block_mapping_opts(vm_):
ret = {}
ret['block_device_mapping'] = config.get_cloud_config_value('block_device_mapping', vm_, __opts__, default={})
ret['block_device'] = config.get_cloud_config_value('block_device', vm_, __opts__, default=[])
ret['ephemeral'] = config.get_cloud_config_value('ephemeral', vm_, __opts__, default=[])
ret['swap'] = config.get_cloud_config_value('swap', vm_, __opts__, default=None)
ret['snapshot'] = config.get_cloud_config_value('snapshot', vm_, __opts__, default=None)
ret['boot_volume'] = config.get_cloud_config_value('boot_volume', vm_, __opts__, default=None)
return ret
def show_instance(name, call=None):
'''
Show the details from the provider concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
conn = get_conn()
node = conn.show_instance(name).__dict__
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node
def get_size(conn, vm_):
'''
Return the VM's size object
'''
sizes = conn.list_sizes()
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
if not vm_size:
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(sizes[size]['id']), str(size)):
return sizes[size]['id']
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ignore_cidr(vm_, ip):
'''
Return True if we are to ignore the specified IP. Compatible with IPv4.
'''
if HAS_NETADDR is False:
log.error('Error: netaddr is not installed')
return 'Error: netaddr is not installed'
cidr = config.get_cloud_config_value(
'ignore_cidr', vm_, __opts__, default='', search_global=False
)
if cidr != '' and all_matching_cidrs(ip, [cidr]):
log.warning(
'IP "{0}" found within "{1}"; ignoring it.'.format(ip, cidr)
)
return True
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def rackconnect(vm_):
'''
Determine if we should wait for rackconnect automation before running.
Either 'False' (default) or 'True'.
'''
return config.get_cloud_config_value(
'rackconnect', vm_, __opts__, default=False,
search_global=False
)
def rackconnectv3(vm_):
'''
Determine if server is using rackconnectv3 or not
Return the rackconnect network name or False
'''
return config.get_cloud_config_value(
'rackconnectv3', vm_, __opts__, default=False,
search_global=False
)
def cloudnetwork(vm_):
'''
Determine if we should use an extra network to bootstrap
Either 'False' (default) or 'True'.
'''
return config.get_cloud_config_value(
'cloudnetwork', vm_, __opts__, default=False,
search_global=False
)
def managedcloud(vm_):
'''
Determine if we should wait for the managed cloud automation before
running. Either 'False' (default) or 'True'.
'''
return config.get_cloud_config_value(
'managedcloud', vm_, __opts__, default=False,
search_global=False
)
def destroy(name, conn=None, call=None):
'''
Delete a single VM
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = conn.server_by_name(name)
profiles = get_configured_provider()['profiles'] # pylint: disable=E0602
if node is None:
log.error('Unable to find the VM {0}'.format(name))
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
flush_mine_on_destroy = False
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: {0}'.format(name))
salt_client = salt.client.get_local_client(__opts__['conf_file'])
minions = salt_client.cmd(name, 'mine.flush')
log.info('Clearing Salt Mine: {0}, {1}'.format(
name,
flush_mine_on_destroy
))
log.info('Destroying VM: {0}'.format(name))
ret = conn.delete(node.id)
if ret:
log.info('Destroyed VM: {0}'.format(name))
# Fire destroy action
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('delete_sshkeys', False) is True:
salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0])
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.cachedir_index_del'](name)
return True
log.error('Failed to Destroy VM: {0}'.format(name))
return False
def request_instance(vm_=None, call=None):
'''
Put together all of the information necessary to request an instance
through Novaclient and then fire off the request the instance.
Returns data about the instance
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-')
conn = get_conn()
kwargs = vm_.copy()
try:
kwargs['image_id'] = get_image(conn, vm_)
except Exception as exc:
raise SaltCloudSystemExit(
'Error creating {0} on OPENSTACK\n\n'
'Could not find image {1}: {2}\n'.format(
vm_['name'], vm_['image'], exc
)
)
try:
kwargs['flavor_id'] = get_size(conn, vm_)
except Exception as exc:
raise SaltCloudSystemExit(
'Error creating {0} on OPENSTACK\n\n'
'Could not find size {1}: {2}\n'.format(
vm_['name'], vm_['size'], exc
)
)
kwargs['key_name'] = config.get_cloud_config_value(
'ssh_key_name', vm_, __opts__, search_global=False
)
security_groups = config.get_cloud_config_value(
'security_groups', vm_, __opts__, search_global=False
)
if security_groups is not None:
vm_groups = security_groups
avail_groups = conn.secgroup_list()
group_list = []
for vmg in vm_groups:
if vmg in [name for name, details in six.iteritems(avail_groups)]:
group_list.append(vmg)
else:
raise SaltCloudNotFound(
'No such security group: \'{0}\''.format(vmg)
)
kwargs['security_groups'] = group_list
avz = config.get_cloud_config_value(
'availability_zone', vm_, __opts__, default=None, search_global=False
)
if avz is not None:
kwargs['availability_zone'] = avz
kwargs['nics'] = config.get_cloud_config_value(
'networks', vm_, __opts__, search_global=False, default=None
)
files = config.get_cloud_config_value(
'files', vm_, __opts__, search_global=False
)
if files:
kwargs['files'] = {}
for src_path in files:
if os.path.exists(files[src_path]):
with salt.utils.fopen(files[src_path], 'r') as fp_:
kwargs['files'][src_path] = fp_.read()
else:
kwargs['files'][src_path] = files[src_path]
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
if userdata_file is not None:
try:
with salt.utils.fopen(userdata_file, 'r') as fp_:
kwargs['userdata'] = salt.utils.cloud.userdata_template(
__opts__, vm_, fp_.read()
)
except Exception as exc:
log.exception(
'Failed to read userdata from %s: %s', userdata_file, exc)
kwargs['config_drive'] = config.get_cloud_config_value(
'config_drive', vm_, __opts__, search_global=False
)
kwargs.update(get_block_mapping_opts(vm_))
event_kwargs = {
'name': kwargs['name'],
'image': kwargs.get('image_id', 'Boot From Volume'),
'size': kwargs['flavor_id'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, event_kwargs.keys()),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
data = conn.boot(**kwargs)
except Exception as exc:
raise SaltCloudSystemExit(
'Error creating {0} on Nova\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: {1}\n'.format(
vm_['name'], exc
)
)
if data.extra.get('password', None) is None and vm_.get('key_filename', None) is None:
raise SaltCloudSystemExit('No password returned. Set ssh_key_file.')
floating_ip_conf = config.get_cloud_config_value('floating_ip',
vm_,
__opts__,
search_global=False,
default={})
if floating_ip_conf.get('auto_assign', False):
pool = floating_ip_conf.get('pool', 'public')
floating_ip = None
for fl_ip, opts in six.iteritems(conn.floating_ip_list()):
if opts['fixed_ip'] is None and opts['pool'] == pool:
floating_ip = fl_ip
break
if floating_ip is None:
floating_ip = conn.floating_ip_create(pool)['ip']
def __query_node_data(vm_):
try:
node = show_instance(vm_['name'], 'action')
log.debug(
'Loaded node data for {0}:\n{1}'.format(
vm_['name'],
pprint.pformat(node)
)
)
except Exception as err:
log.error(
'Failed to get nodes list: {0}'.format(
err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return False
return node['state'] == 'ACTIVE' or None
# if we associate the floating ip here,then we will fail.
# As if we attempt to associate a floating IP before the Nova instance has completed building,
# it will fail.So we should associate it after the Nova instance has completed building.
try:
salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(vm_,)
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
try:
conn.floating_ip_associate(vm_['name'], floating_ip)
vm_['floating_ip'] = floating_ip
except Exception as exc:
raise SaltCloudSystemExit(
'Error assigning floating_ip for {0} on Nova\n\n'
'The following exception was thrown by libcloud when trying to '
'assing a floating ip: {1}\n'.format(
vm_['name'], exc
)
)
if not vm_.get('password', None):
vm_['password'] = data.extra.get('password', '')
return data, vm_
def _query_node_data(vm_, data, conn):
try:
node = show_instance(vm_['name'], 'action')
log.debug('Loaded node data for {0}:'
'\n{1}'.format(vm_['name'], pprint.pformat(node)))
except Exception as err:
# Show the traceback if the debug logging level is enabled
log.error('Failed to get nodes list: {0}'.format(err),
exc_info_on_loglevel=logging.DEBUG)
# Trigger a failure in the wait for IP function
return False
running = node['state'] == 'ACTIVE'
if not running:
# Still not running, trigger another iteration
return
if rackconnect(vm_) is True:
extra = node.get('extra', {})
rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '')
if rc_status != 'DEPLOYED':
log.debug('Waiting for Rackconnect automation to complete')
return
if managedcloud(vm_) is True:
extra = conn.server_show_libcloud(node['id']).extra
mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '')
if mc_status != 'Complete':
log.debug('Waiting for managed cloud automation to complete')
return
access_ip = node.get('extra', {}).get('access_ip', '')
rcv3 = rackconnectv3(vm_) in node['addresses']
sshif = ssh_interface(vm_) in node['addresses']
if any((rcv3, sshif)):
networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_)
for network in node['addresses'].get(networkname, []):
if network['version'] is 4:
access_ip = network['addr']
break
vm_['cloudnetwork'] = True
# Conditions to pass this
#
# Rackconnect v2: vm_['rackconnect'] = True
# If this is True, then the server will not be accessible from the ipv4 addres in public_ips.
# That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the
# server. In this case we can use the private_ips for ssh_interface, or the access_ip.
#
# Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork>
# If this is the case, salt will need to use the cloud network to login to the server. There
# is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud
# also cannot use the private_ips, because that traffic is dropped at the hypervisor.
#
# CloudNetwork: vm['cloudnetwork'] = True
# If this is True, then we should have an access_ip at this point set to the ip on the cloud
# network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will
# use the initial access_ip, and not overwrite anything.
if (any((cloudnetwork(vm_), rackconnect(vm_)))
and (ssh_interface(vm_) != 'private_ips' or rcv3)
and access_ip != ''):
data.public_ips = [access_ip]
return data
result = []
if ('private_ips' not in node
and 'public_ips' not in node
and 'floating_ips' not in node
and 'fixed_ips' not in node
and 'access_ip' in node.get('extra', {})):
result = [node['extra']['access_ip']]
private = node.get('private_ips', [])
public = node.get('public_ips', [])
fixed = node.get('fixed_ips', [])
floating = node.get('floating_ips', [])
if private and not public:
log.warning('Private IPs returned, but not public. '
'Checking for misidentified IPs')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)
log.warning('Public IP address was not ready when we last checked. '
'Appending public IP address now.')
public = data.public_ips
else:
log.warning('{0} is a private IP'.format(private_ip))
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
# populate return data with private_ips
# when ssh_interface is set to private_ips and public_ips exist
if not result and ssh_interface(vm_) == 'private_ips':
for private_ip in private:
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
non_private_ips = []
if public:
data.public_ips = public
if ssh_interface(vm_) == 'public_ips':
non_private_ips.append(public)
if floating:
data.floating_ips = floating
if ssh_interface(vm_) == 'floating_ips':
non_private_ips.append(floating)
if fixed:
data.fixed_ips = fixed
if ssh_interface(vm_) == 'fixed_ips':
non_private_ips.append(fixed)
if non_private_ips:
log.debug('result = {0}'.format(non_private_ips))
data.private_ips = result
if ssh_interface(vm_) != 'private_ips':
return data
if result:
log.debug('result = {0}'.format(result))
data.private_ips = result
if ssh_interface(vm_) == 'private_ips':
return data
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'nova',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
deploy = config.get_cloud_config_value('deploy', vm_, __opts__)
key_filename = config.get_cloud_config_value(
'ssh_key_file', vm_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined ssh_key_file \'{0}\' does not exist'.format(
key_filename
)
)
vm_['key_filename'] = key_filename
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
conn = get_conn()
if 'instance_id' in vm_:
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'{0[name]}\''.format(vm_))
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
data = conn.server_show_libcloud(vm_['instance_id'])
if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__['change_password'] is True:
vm_['password'] = sup.secure_password()
conn.root_password(vm_['instance_id'], vm_['password'])
else:
# Put together all of the information required to request the instance,
# and then fire off the request for it
data, vm_ = request_instance(vm_)
# Pull the instance ID, valid for both spot and normal instances
vm_['instance_id'] = data.id
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(vm_, data, conn),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
elif ssh_interface(vm_) == 'fixed_ips':
ip_address = preferred_ip(vm_, data.fixed_ips)
elif ssh_interface(vm_) == 'floating_ips':
ip_address = preferred_ip(vm_, data.floating_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address {0}'.format(ip_address))
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: {0}'.format(salt_ip_address))
elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips':
salt_ip_address = preferred_ip(vm_, data.fixed_ips)
log.info('Salt interface set to: {0}'.format(salt_ip_address))
elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips':
salt_ip_address = preferred_ip(vm_, data.floating_ips)
log.info('Salt interface set to: {0}'.format(salt_ip_address))
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: {0}'.format(salt_ip_address))
if not ip_address:
raise SaltCloudSystemExit('A valid IP address was not found')
vm_['ssh_host'] = ip_address
vm_['salt_host'] = salt_ip_address
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in ret['extra']:
del ret['extra']['password']
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data.__dict__)
)
)
event_data = {
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
'instance_id': vm_['instance_id'],
'floating_ips': data.floating_ips,
'fixed_ips': data.fixed_ips,
'private_ips': data.private_ips,
'public_ips': data.public_ips
}
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', event_data, event_data.keys()),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver'])
return ret
def avail_images():
'''
Return a dict of all available VM images on the cloud provider.
'''
conn = get_conn()
return conn.image_list()
def avail_sizes():
'''
Return a dict of all available VM sizes on the cloud provider.
'''
conn = get_conn()
return conn.flavor_list()
def list_nodes(call=None, **kwargs):
'''
Return a list of the VMs that in this location
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
conn = get_conn()
server_list = conn.server_list()
if not server_list:
return {}
for server in server_list:
server_tmp = conn.server_show(server_list[server]['id']).get(server)
# If the server is deleted while looking it up, skip
if server_tmp is None:
continue
private = []
public = []
if 'addresses' not in server_tmp:
server_tmp['addresses'] = {}
for network in server_tmp['addresses']:
for address in server_tmp['addresses'][network]:
if salt.utils.cloud.is_public_ip(address.get('addr', '')):
public.append(address['addr'])
elif ':' in address['addr']:
public.append(address['addr'])
elif '.' in address['addr']:
private.append(address['addr'])
if server_tmp['accessIPv4']:
if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']):
public.append(server_tmp['accessIPv4'])
else:
private.append(server_tmp['accessIPv4'])
if server_tmp['accessIPv6']:
public.append(server_tmp['accessIPv6'])
ret[server] = {
'id': server_tmp['id'],
'image': server_tmp['image']['id'],
'size': server_tmp['flavor']['id'],
'state': server_tmp['state'],
'private_ips': private,
'public_ips': public,
}
return ret
def list_nodes_full(call=None, **kwargs):
'''
Return a list of the VMs that in this location
'''
if call == 'action':
raise SaltCloudSystemExit(
(
'The list_nodes_full function must be called with'
' -f or --function.'
)
)
ret = {}
conn = get_conn()
server_list = conn.server_list()
if not server_list:
return {}
for server in server_list:
try:
ret[server] = conn.server_show_libcloud(
server_list[server]['id']
).__dict__
except IndexError as exc:
ret = {}
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret
def list_nodes_min(call=None, **kwargs):
'''
Return a list of the VMs that in this location
'''
if call == 'action':
raise SaltCloudSystemExit(
(
'The list_nodes_min function must be called with'
' -f or --function.'
)
)
conn = get_conn()
server_list = conn.server_list_min()
if not server_list:
return {}
return server_list
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__['query.selection'], call,
)
def volume_create(name, size=100, snapshot=None, voltype=None, **kwargs):
'''
Create block storage device
'''
conn = get_conn()
create_kwargs = {'name': name,
'size': size,
'snapshot': snapshot,
'voltype': voltype}
create_kwargs['availability_zone'] = kwargs.get('availability_zone', None)
return conn.volume_create(**create_kwargs)
# Command parity with EC2 and Azure
create_volume = volume_create
def volume_delete(name, **kwargs):
'''
Delete block storage device
'''
conn = get_conn()
return conn.volume_delete(name)
def volume_detach(name, **kwargs):
'''
Detach block volume
'''
conn = get_conn()
return conn.volume_detach(
name,
timeout=300
)
def volume_attach(name, server_name, device='/dev/xvdb', **kwargs):
'''
Attach block volume
'''
conn = get_conn()
return conn.volume_attach(
name,
server_name,
device,
timeout=300
)
# Command parity with EC2 and Azure
attach_volume = volume_attach
def volume_create_attach(name, call=None, **kwargs):
'''
Create and attach volumes to created node
'''
if call == 'function':
raise SaltCloudSystemExit(
'The create_attach_volumes action must be called with '
'-a or --action.'
)
if type(kwargs['volumes']) is str:
volumes = yaml.safe_load(kwargs['volumes'])
else:
volumes = kwargs['volumes']
ret = []
for volume in volumes:
created = False
volume_dict = {
'name': volume['name'],
}
if 'volume_id' in volume:
volume_dict['volume_id'] = volume['volume_id']
elif 'snapshot' in volume:
volume_dict['snapshot'] = volume['snapshot']
else:
volume_dict['size'] = volume['size']
if 'type' in volume:
volume_dict['type'] = volume['type']
if 'iops' in volume:
volume_dict['iops'] = volume['iops']
if 'id' not in volume_dict:
created_volume = create_volume(**volume_dict)
created = True
volume_dict.update(created_volume)
attach = attach_volume(
name=volume['name'],
server_name=name,
device=volume.get('device', None),
call='action'
)
if attach:
msg = (
'{0} attached to {1} (aka {2})'.format(
volume_dict['id'],
name,
volume_dict['name'],
)
)
log.info(msg)
ret.append(msg)
return ret
# Command parity with EC2 and Azure
create_attach_volumes = volume_create_attach
def volume_list(**kwargs):
'''
List block devices
'''
conn = get_conn()
return conn.volume_list()
def network_list(call=None, **kwargs):
'''
List private networks
'''
conn = get_conn()
return conn.network_list()
def network_create(name, **kwargs):
'''
Create private networks
'''
conn = get_conn()
return conn.network_create(name, **kwargs)
def virtual_interface_list(name, **kwargs):
'''
Create private networks
'''
conn = get_conn()
return conn.virtual_interface_list(name)
def virtual_interface_create(name, net_name, **kwargs):
'''
Create private networks
'''
conn = get_conn()
return conn.virtual_interface_create(name, net_name)
def floating_ip_pool_list(call=None):
'''
List all floating IP pools
.. versionadded:: 2016.3.0
'''
if call != 'function':
raise SaltCloudSystemExit(
'The floating_ip_pool_list action must be called with -f or --function'
)
conn = get_conn()
return conn.floating_ip_pool_list()
def floating_ip_list(call=None):
'''
List floating IPs
.. versionadded:: 2016.3.0
'''
if call != 'function':
raise SaltCloudSystemExit(
'The floating_ip_list action must be called with -f or --function'
)
conn = get_conn()
return conn.floating_ip_list()
def floating_ip_create(kwargs, call=None):
'''
Allocate a floating IP
.. versionadded:: 2016.3.0
'''
if call != 'function':
raise SaltCloudSystemExit(
'The floating_ip_create action must be called with -f or --function'
)
if 'pool' not in kwargs:
log.error('pool is required')
return False
conn = get_conn()
return conn.floating_ip_create(kwargs['pool'])
def floating_ip_delete(kwargs, call=None):
'''
De-allocate floating IP
.. versionadded:: 2016.3.0
'''
if call != 'function':
raise SaltCloudSystemExit(
'The floating_ip_delete action must be called with -f or --function'
)
if 'floating_ip' not in kwargs:
log.error('floating_ip is required')
return False
conn = get_conn()
return conn.floating_ip_delete(kwargs['floating_ip'])
def floating_ip_associate(name, kwargs, call=None):
'''
Associate a floating IP address to a server
.. versionadded:: 2016.3.0
'''
if call != 'action':
raise SaltCloudSystemExit(
'The floating_ip_associate action must be called with -a of --action.'
)
if 'floating_ip' not in kwargs:
log.error('floating_ip is required')
return False
conn = get_conn()
conn.floating_ip_associate(name, kwargs['floating_ip'])
return list_nodes()[name]
def floating_ip_disassociate(name, kwargs, call=None):
'''
Disassociate a floating IP from a server
.. versionadded:: 2016.3.0
'''
if call != 'action':
raise SaltCloudSystemExit(
'The floating_ip_disassociate action must be called with -a of --action.'
)
if 'floating_ip' not in kwargs:
log.error('floating_ip is required')
return False
conn = get_conn()
conn.floating_ip_disassociate(name, kwargs['floating_ip'])
return list_nodes()[name]
| 30.890797 | 116 | 0.610823 |
79565ee9a3d1d6d5d029aadb09e5081ba89b4e52 | 483 | py | Python | fcm_notifications.py | Ice-crusher/FlaskCovidAlert | 6c50c4ebe67bc387c39413aae635b88f53ef320e | [
"Apache-2.0"
] | null | null | null | fcm_notifications.py | Ice-crusher/FlaskCovidAlert | 6c50c4ebe67bc387c39413aae635b88f53ef320e | [
"Apache-2.0"
] | null | null | null | fcm_notifications.py | Ice-crusher/FlaskCovidAlert | 6c50c4ebe67bc387c39413aae635b88f53ef320e | [
"Apache-2.0"
] | null | null | null | from pyfcm import FCMNotification
from extensions import fcmApiKey
pushService = FCMNotification(api_key=fcmApiKey)
def sendNotifications(fcmToken):
registration_id = fcmToken
message_title = "Uwaga!"
message_body = "Wcześniej spotkałeś osobę chorą na COVID-19. Jesteś w strefie ryzyka zakażenia. Uważaj na siebie!"
result = pushService.notify_single_device(registration_id=registration_id, message_title=message_title, message_body=message_body)
print(result)
| 40.25 | 134 | 0.807453 |
795660bd3ce55669abea9fb0606351fbaac0bb88 | 599 | py | Python | accounts/migrations/0006_auto_20220223_0033.py | aastha1999/SystemDesign-Pastebin | 56e24ff2e98bf38414a758b86813d65df1a422bf | [
"MIT"
] | 1 | 2022-01-18T11:29:39.000Z | 2022-01-18T11:29:39.000Z | accounts/migrations/0006_auto_20220223_0033.py | aastha1999/SystemDesign-Pastebin | 56e24ff2e98bf38414a758b86813d65df1a422bf | [
"MIT"
] | null | null | null | accounts/migrations/0006_auto_20220223_0033.py | aastha1999/SystemDesign-Pastebin | 56e24ff2e98bf38414a758b86813d65df1a422bf | [
"MIT"
] | 1 | 2022-01-27T12:23:50.000Z | 2022-01-27T12:23:50.000Z | # Generated by Django 3.2.11 on 2022-02-22 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_remove_user_password2'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=500, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(max_length=500, verbose_name='username'),
),
]
| 24.958333 | 82 | 0.597663 |
7956614e78c56c2746d3d9f242cdfa0921f5cbbd | 5,657 | py | Python | tests/chainer_tests/training_tests/updaters_tests/test_multiprocess_parallel_updater.py | LuoYuanke/PrivChainer | 758d765c7903f6913cfd58c21db069d5f2a12203 | [
"MIT"
] | null | null | null | tests/chainer_tests/training_tests/updaters_tests/test_multiprocess_parallel_updater.py | LuoYuanke/PrivChainer | 758d765c7903f6913cfd58c21db069d5f2a12203 | [
"MIT"
] | null | null | null | tests/chainer_tests/training_tests/updaters_tests/test_multiprocess_parallel_updater.py | LuoYuanke/PrivChainer | 758d765c7903f6913cfd58c21db069d5f2a12203 | [
"MIT"
] | 1 | 2022-02-20T10:32:59.000Z | 2022-02-20T10:32:59.000Z | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import chainer.training.updaters.multiprocess_parallel_updater as mpu
import copy
class SimpleNet(chainer.Chain):
insize = 5
def __init__(self, dtype=numpy.float32):
super(SimpleNet, self).__init__()
self.dtype = dtype
W = initializers.HeNormal(1 / numpy.sqrt(2), self.dtype)
bias = initializers.Zero(self.dtype)
with self.init_scope():
self.conv = chainer.links.Convolution2D(2, 2, 3, initialW=W,
initial_bias=bias)
self.fc = chainer.links.Linear(18, 2, initialW=W,
initial_bias=bias)
self.train = True
def clear(self):
self.loss = None
self.accuracy = None
def __call__(self, x, t):
h = chainer.functions.relu(self.conv(x))
y = self.fc(h)
self.loss = chainer.functions.softmax_cross_entropy(y, t)
self.accuracy = chainer.functions.accuracy(y, t)
return self.loss
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float16],
}))
class TestGatherScatter(unittest.TestCase):
def setUp(self):
pass
@attr.gpu
def test_gather_scatter_grads(self):
cupy = cuda.cupy
model0 = SimpleNet(dtype=self.dtype)
model1 = copy.deepcopy(model0)
model0.to_gpu()
model1.to_gpu()
optimizer0 = chainer.optimizers.SGD(lr=1.0)
optimizer0.setup(model0)
optimizer1 = chainer.optimizers.SGD(lr=1.0)
optimizer1.setup(model1)
bsize = 8
x = numpy.random.uniform(0, 1, (bsize, 2, 5, 5)).astype(self.dtype)
t = numpy.empty(bsize, dtype=numpy.int32)
for i in range(bsize):
t[i] = i % 2
x = chainer.Variable(chainer.cuda.to_gpu(x))
t = chainer.Variable(chainer.cuda.to_gpu(t))
loss0 = model0(x, t)
model0.cleargrads()
model1.cleargrads()
loss0.backward()
gg0 = mpu.gather_grads(model0)
mpu.scatter_grads(model1, gg0)
cupy.testing.assert_array_equal(model0.conv.W.grad, model1.conv.W.grad)
cupy.testing.assert_array_equal(model0.conv.b.grad, model1.conv.b.grad)
cupy.testing.assert_array_equal(model0.fc.W.grad, model1.fc.W.grad)
cupy.testing.assert_array_equal(model0.fc.b.grad, model1.fc.b.grad)
optimizer0.update()
optimizer1.update()
cupy.testing.assert_array_equal(model0.conv.W.data, model1.conv.W.data)
cupy.testing.assert_array_equal(model0.conv.b.data, model1.conv.b.data)
cupy.testing.assert_array_equal(model0.fc.W.data, model1.fc.W.data)
cupy.testing.assert_array_equal(model0.fc.b.data, model1.fc.b.data)
def test_gather_grads_raise_on_cpu(self):
model = SimpleNet(dtype=self.dtype)
with self.assertRaises(RuntimeError):
mpu.gather_grads(model)
@attr.gpu
def test_gather_scatter_params(self):
cupy = cuda.cupy
model0 = SimpleNet(dtype=self.dtype)
model1 = SimpleNet(dtype=self.dtype)
model0.to_gpu()
model1.to_gpu()
gp0 = mpu.gather_params(model0)
mpu.scatter_params(model1, gp0)
cupy.testing.assert_array_equal(model0.conv.W.data, model1.conv.W.data)
cupy.testing.assert_array_equal(model0.conv.b.data, model1.conv.b.data)
cupy.testing.assert_array_equal(model0.fc.W.data, model1.fc.W.data)
cupy.testing.assert_array_equal(model0.fc.b.data, model1.fc.b.data)
def test_gather_params_raise_on_cpu(self):
model = SimpleNet(dtype=self.dtype)
with self.assertRaises(RuntimeError):
mpu.gather_params(model)
class SimpleNetRawArray(chainer.Chain):
def __init__(self, testcase):
super(SimpleNetRawArray, self).__init__()
with self.init_scope():
self.conv = chainer.links.Convolution2D(2, 2, 3)
self.fc = chainer.links.Linear(18, 2)
self.train = True
self.call_called = 0
self.testcase = testcase
def clear(self):
self.loss = None
self.accuracy = None
def __call__(self, x, t):
self.testcase.assertNotIsInstance(x, chainer.Variable)
self.testcase.assertNotIsInstance(t, chainer.Variable)
self.call_called += 1
h = chainer.functions.relu(self.conv(x))
y = self.fc(h)
self.loss = chainer.functions.softmax_cross_entropy(y, t)
self.accuracy = chainer.functions.accuracy(y, t)
return self.loss
class TestRawArray(unittest.TestCase):
def setUp(self):
pass
@attr.gpu
def test_update_uses_raw_array(self):
if mpu.MultiprocessParallelUpdater.available():
model = SimpleNetRawArray(self)
dataset = [((numpy.ones((2, 5, 5)) * i).astype(numpy.float32),
numpy.int32(0)) for i in range(100)]
batch_size = 5
devices = (1,)
iters = [chainer.iterators.SerialIterator(i, batch_size) for i in
chainer.datasets.split_dataset_n_random(
dataset, len(devices))]
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
updater = mpu.MultiprocessParallelUpdater(
iters, optimizer, devices=devices)
updater.update()
self.assertEqual(model.call_called, 1)
testing.run_module(__name__, __file__)
| 30.578378 | 79 | 0.628779 |
795661b6ff13f3b295b887eea8d7dafe3acfc06d | 2,139 | py | Python | setup.py | heikoheiko/pyethapp | 17a560abe8f8742bf5e30fa8f7e5495faf3cb101 | [
"BSD-3-Clause"
] | 48 | 2015-12-23T14:38:14.000Z | 2022-01-19T06:01:57.000Z | setup.py | WEI-HE/pyethapp | 17a560abe8f8742bf5e30fa8f7e5495faf3cb101 | [
"BSD-3-Clause"
] | 2 | 2016-05-08T02:27:47.000Z | 2017-03-06T23:37:47.000Z | setup.py | WEI-HE/pyethapp | 17a560abe8f8742bf5e30fa8f7e5495faf3cb101 | [
"BSD-3-Clause"
] | 27 | 2015-04-05T05:26:14.000Z | 2022-02-05T16:43:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
pytest.main(self.test_args)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
install_requires = set(x.strip() for x in open('requirements.txt'))
install_requires_replacements = {
'https://github.com/ethereum/pyrlp/tarball/develop': 'rlp>=0.3.8',
'https://github.com/ethereum/pydevp2p/tarball/master': 'devp2p>=0.4.0',
'https://github.com/ethereum/pyethereum/tarball/develop': 'ethereum>=0.9.65'}
install_requires = [install_requires_replacements.get(r, r) for r in install_requires]
test_requirements = []
version = '0.9.16' # preserve format, this is read from __init__.py
setup(
name='pyethapp',
version=version,
description="Python Ethereum Client",
long_description=readme + '\n\n' + history,
author="HeikoHeiko",
author_email='heiko@ethdev.com',
url='https://github.com/ethereum/pyethapp',
packages=[
'pyethapp',
],
package_dir={'pyethapp':
'pyethapp'},
include_package_data=True,
license="BSD",
zip_safe=False,
keywords='pyethapp',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
],
cmdclass={'test': PyTest},
install_requires=install_requires,
tests_require=test_requirements,
entry_points='''
[console_scripts]
pyethapp=pyethapp.app:app
'''
)
| 27.779221 | 86 | 0.658719 |
795663352b2e5526baf93c0f7936ba1624e06b13 | 3,175 | py | Python | pygubu/builder/widgets/calendarframe.py | alejandroautalan/pygubu | 80ed6b14defc7129fcbd093a3b4b44961f84421e | [
"MIT"
] | 1,716 | 2015-01-02T12:34:53.000Z | 2022-03-31T03:28:51.000Z | pygubu/builder/widgets/calendarframe.py | codesockett/pygubu | a4978e414d5f6e3be1ff6df94cba9fb6c0e2af6f | [
"MIT"
] | 207 | 2015-01-03T03:54:38.000Z | 2022-03-31T15:20:14.000Z | pygubu/builder/widgets/calendarframe.py | codesockett/pygubu | a4978e414d5f6e3be1ff6df94cba9fb6c0e2af6f | [
"MIT"
] | 261 | 2015-01-02T19:17:36.000Z | 2022-03-20T05:21:49.000Z | # encoding: utf8
from pygubu import BuilderObject, register_property, register_widget
from pygubu.builder.ttkstdwidgets import TTKFrame
from pygubu.widgets.calendarframe import CalendarFrame
class CalendarFrameBuilder(BuilderObject):
class_ = CalendarFrame
OPTIONS_STANDARD = TTKFrame.OPTIONS_STANDARD
OPTIONS_SPECIFIC = TTKFrame.OPTIONS_SPECIFIC
OPTIONS_CUSTOM = ('firstweekday', 'year', 'month',
'calendarfg', 'calendarbg', 'headerfg', 'headerbg',
'selectbg', 'selectfg', 'state', 'markbg', 'markfg')
ro_properties = TTKFrame.ro_properties
properties = OPTIONS_STANDARD + OPTIONS_SPECIFIC + OPTIONS_CUSTOM
virtual_events = ('<<CalendarFrameDateSelected>>',)
_builder_id = 'pygubu.builder.widgets.calendarframe'
register_widget(_builder_id, CalendarFrameBuilder,
'CalendarFrame', ('ttk', 'Pygubu Widgets'))
props = {
'state': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'choice',
'values': ('', 'normal', 'disabled'),
'state': 'readonly'}},
},
'firstweekday': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'choice',
'values': ('0', '6'), 'state': 'readonly'},
'default': '6',
}
},
'year': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'entry'}
}
},
'month': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'choice',
'values': ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
'11', '12'), 'state': 'readonly'},
'default': '1'
}
},
# Better to change locale by code.
# 'locale': {
# 'editor': 'entry'
# },
'calendarfg': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'colorentry'},
}
},
'calendarbg': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'colorentry'}
}
},
'headerfg': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'colorentry'}
}
},
'headerbg': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'colorentry'}
}
},
'selectbg': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'colorentry'}
}
},
'selectfg': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'colorentry'}
}
},
'markbg': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'colorentry'}
}
},
'markfg': {
'editor': 'dynamic',
_builder_id: {
'params': {
'mode': 'colorentry'}
}
}
}
for p in props:
register_property(p, props[p])
| 26.02459 | 77 | 0.454488 |
795663f114fcc6a28f1db131f565651273956537 | 4,788 | py | Python | cotede/qctests/cars_normbias.py | jessicaaustin/CoTeDe | 0ca2a1c71de980d91262fd36fd5d8ab8cc09f019 | [
"BSD-3-Clause"
] | null | null | null | cotede/qctests/cars_normbias.py | jessicaaustin/CoTeDe | 0ca2a1c71de980d91262fd36fd5d8ab8cc09f019 | [
"BSD-3-Clause"
] | null | null | null | cotede/qctests/cars_normbias.py | jessicaaustin/CoTeDe | 0ca2a1c71de980d91262fd36fd5d8ab8cc09f019 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
from datetime import timedelta
import logging
import numpy as np
from numpy import ma
from oceansdb import CARS
module_logger = logging.getLogger(__name__)
class CARS_NormBias(object):
def __init__(self, data, varname, cfg, autoflag=True):
self.data = data
self.varname = varname
self.cfg = cfg
# Default is to do not use standard error to estimate the bias,
# because that is the traditional approach.
if 'use_standard_error' not in self.cfg:
self.cfg['use_standard_error'] = False
self.set_features()
if autoflag:
self.test()
def keys(self):
return self.features.keys() + \
["flag_%s" % f for f in self.flags.keys()]
def set_features(self):
if ('LATITUDE' in self.data.attributes.keys()) and \
('LONGITUDE' in self.data.attributes.keys()):
kwargs = {
'lat': self.data.attributes['LATITUDE'],
'lon': self.data.attributes['LONGITUDE']}
if ('LATITUDE' in self.data.keys()) and \
('LONGITUDE' in self.data.keys()):
dLmax = max(
data['LATITUDE'].max()-data['LATITUDE'].min(),
data['LONGITUDE'].max()-data['LONGITUDE'].min())
# Only use each measurement coordinate if it is spread.
if dLmax >= 0.01:
kwargs = {
'lat': self.data['LATITUDE'],
'lon': self.data['LONGITUDE'],
'alongtrack_axis': ['lat', 'lon']}
if ('DEPTH' in self.data.keys()):
depth = self.data['DEPTH']
elif ('PRES' in self.data.keys()):
depth = self.data['PRES']
try:
doy = int(self.data.attributes['date'].strftime('%j'))
except:
doy = int(self.data.attributes['datetime'].strftime('%j'))
db = CARS()
if self.varname[-1] == '2':
vtype = self.varname[:-1]
else:
vtype = self.varname
idx = ~ma.getmaskarray(depth) & np.array(depth >= 0)
cars = db[vtype].extract(
var=['mn', 'std_dev'],
doy=doy,
depth=depth[idx],
**kwargs)
if idx.all() is not True:
for v in cars.keys():
tmp = ma.masked_all(depth.shape, dtype=cars[v].dtype)
tmp[idx] = cars[v]
cars[v] = tmp
self.features = {
'cars_mean': cars['mn'],
'cars_std': cars['std_dev']}
self.features['cars_bias'] = self.data[self.varname] - \
self.features['cars_mean']
# if use_standard_error = True, the comparison with the climatology
# considers the standard error, i.e. the bias will be only the
# ammount above the standard error range.
assert not self.cfg['use_standard_error']
if self.cfg['use_standard_error'] is True:
standard_error = self.features['cars_std'] / \
self.features['cars_nsamples'] ** 0.5
idx = np.absolute(self.features['cars_bias']) <= \
standard_error
self.features['cars_bias'][idx] = 0
idx = np.absolute(self.features['cars_bias']) > standard_error
self.features['cars_bias'][idx] -= \
np.sign(self.features['cars_bias'][idx]) * \
standard_error[idx]
self.features['cars_normbias'] = self.features['cars_bias'] / \
self.features['cars_std']
def test(self):
# 3 is the possible minimum to estimate the std, but I shold use higher.
try:
min_samples = self.cfg['min_samples']
except KeyError:
min_samples = 3
self.flags = {}
try:
flag_good = self.cfg['flag_good']
except KeyError:
flag_good = 1
try:
flag_bad = self.cfg['flag_bad']
except KeyError:
flag_bad = 3
threshold = self.cfg['threshold']
assert (np.size(threshold) == 1) and \
(threshold is not None)
flag = np.zeros(self.data[self.varname].shape, dtype='i1')
normbias_abs = np.absolute(self.features['cars_normbias'])
ind = np.nonzero(normbias_abs <= threshold)
flag[ind] = flag_good
ind = np.nonzero(normbias_abs > threshold)
flag[ind] = flag_bad
# Flag as 9 any masked input value
flag[ma.getmaskarray(self.data[self.varname])] = 9
self.flags['cars_normbias'] = flag
| 33.02069 | 80 | 0.519632 |
795664623b155ee6d551189a28150924dd1f7217 | 3,986 | py | Python | config/settings/local.py | EricMuller/mynote-backend | 69bc39b8cfad52d6c42003cfa7bd629f3e8eccb7 | [
"MIT"
] | 1 | 2017-04-26T10:24:21.000Z | 2017-04-26T10:24:21.000Z | config/settings/local.py | EricMuller/mynotes-backend | 69bc39b8cfad52d6c42003cfa7bd629f3e8eccb7 | [
"MIT"
] | 5 | 2020-06-05T18:16:39.000Z | 2022-01-13T00:45:49.000Z | config/settings/local.py | EricMuller/webmarks-rest-api | 69bc39b8cfad52d6c42003cfa7bd629f3e8eccb7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use mailhog for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
HOST_NAME = env('WEBMARK_HOST_NAME', default='127.0.0.1')
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=[
HOST_NAME, 'localhost', '127.0.0.1'])
if not DEBUG:
print ("env variable DJANGO_DEBUG is False !!!")
# start django with --insecure for static file
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY',
default='r(f_)4bp@%ritjy#gq19f0_z+c8+#zr=0b@)w8)_-f=+)*k0j0')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = env("EMAIL_HOST", default=HOST_NAME)
EMAIL_HOST_USER = 'e'
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
# EMAIL_USE_SSL
# EMAIL_TIMEOUT
# EMAIL_SSL_KEYFILE
# EMAIL_SSL_CERTFILE
# CACHING
# ------------------------------------------------------------------------------
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'LOCATION': ''
# }
# }
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = "redis://{}:{}/0".format(
env('REDIS_ENDPOINT_ADDRESS', default=HOST_NAME),
env('REDIS_PORT', default=6379)
)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# django-channels
# ------------------------------------------------------------------------------
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "config.routing.channel_routing",
},
}
# CHANNEL_LAYERS = {
# "default": {
# "BACKEND": "asgi_redis.RedisChannelLayer",
# "ROUTING": "config.routing.channel_routing",
# # "CONFIG": {
# # "hosts": [("redis-channel-1", 6379), ("redis-channel-2", 6379)],
# # },
# },
# }
# django-debug-toolbar
# ------------------------------------------------------------------------------
# MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
# INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# CELERY
# In development, all tasks will be executed locally by blocking until the
# task returns
CELERY_ALWAYS_EAGER = True
# END CELERY
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| 29.525926 | 117 | 0.519067 |
795664a77f6733a258f808c8d8df2096dc9a9a04 | 522 | py | Python | Chapter02/02_keras_cifar10.py | PacktPublishing/Practical-Computer-Vision | 8cf8ee58d306e0fb1a2e21b6518136bb4c174ff5 | [
"MIT"
] | 23 | 2018-02-28T05:00:34.000Z | 2022-02-04T13:26:03.000Z | Chapter02/02_keras_cifar10.py | PacktPublishing/Practical-Computer-Vision | 8cf8ee58d306e0fb1a2e21b6518136bb4c174ff5 | [
"MIT"
] | 4 | 2018-03-23T11:35:31.000Z | 2022-01-31T14:52:57.000Z | Chapter02/02_keras_cifar10.py | PacktPublishing/Practical-Computer-Vision | 8cf8ee58d306e0fb1a2e21b6518136bb4c174ff5 | [
"MIT"
] | 23 | 2018-02-08T03:17:16.000Z | 2022-01-03T09:12:47.000Z | from __future__ import print_function
from keras.datasets import cifar10
import matplotlib.pyplot as plt
# Download and load dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# to know the size of data
print("Train data shape:", x_train.shape, "Test data shape:", x_test.shape)
# plot sample image
idx = 1500
print("Label:",labels[y_train[idx][0]])
plt.imshow(x_train[idx])
plt.axis('off')
plt.show() | 30.705882 | 99 | 0.708812 |
795664acecd4dc57dbc5eb11dd23680ff1e95b86 | 3,410 | py | Python | var/spack/repos/builtin/packages/rocm-gdb/package.py | glenn-horton-smith/spack | bc6fb03f231d814ffd6f558aab20e6dbb46b26af | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/rocm-gdb/package.py | glenn-horton-smith/spack | bc6fb03f231d814ffd6f558aab20e6dbb46b26af | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/rocm-gdb/package.py | glenn-horton-smith/spack | bc6fb03f231d814ffd6f558aab20e6dbb46b26af | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RocmGdb(AutotoolsPackage):
"""This is ROCmgdb, the ROCm source-level debugger for Linux,
based on GDB, the GNU source-level debugger."""
homepage = "https://github.com/ROCm-Developer-Tools/ROCgdb/"
url = "https://github.com/ROCm-Developer-Tools/ROCgdb/archive/rocm-4.5.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala']
version('4.5.0', sha256='dd37c8b1ea6bb41b1263183637575d7bf4746cabc573dbff888e23b0379877b0')
version('4.3.1', sha256='995756a24b1e1510647dac1476a3a9a8e3af8e9fd9f4af1d00dd2db28e7a4ef2')
version('4.3.0', sha256='8ee0667ab2cd91b2cc41d3a7af046d36a6b4e2007f050265aa65e0aedec83fd7')
version('4.2.0', sha256='4bc579584a1f8614111e7e44d8aa1c6d5d06be3f5db055aba2cf1abc140122ac')
version('4.1.0', sha256='28dc806e48695d654d52fb1a634df6d4c1243f00846ae90161e7a5e9f4d88b24', deprecated=True)
version('4.0.0', sha256='b90291b0a8409fe66d8a65d2731dcb87b9f5a22bac9ce3ffbab726eb129ba13d', deprecated=True)
version('3.10.0', sha256='05455cb47dd42404ee8bba047def6a6846a7e877e7a7db8dcffc7100d5ba16f0', deprecated=True)
version('3.9.0', sha256='0765c96439c0efa145418d210d865b9faed463466d7522274959cc4476a37097', deprecated=True)
version('3.8.0', sha256='a7c11dc30c952587c616bf7769bad603c3bf80522afc8b73ccda5b78d27bed41', deprecated=True)
version('3.7.0', sha256='7a29ef584fd7b6c66bb03aaf8ec2f5a8c758370672a28a4d0d95066e5f6fbdc1', deprecated=True)
version('3.5.0', sha256='cf36d956e84c7a5711b71f281a44b0a9708e13e941d8fca0247d01567e7ee7d1', deprecated=True)
depends_on('cmake@3:', type='build')
depends_on('texinfo', type='build')
depends_on('bison', type='build')
depends_on('flex@2.6.4:', type='build')
depends_on('libunwind', type='build')
depends_on('expat', type=('build', 'link'))
depends_on('python', type=('build', 'link'))
depends_on('zlib', type='link')
depends_on('babeltrace@1.2.4', type='link')
depends_on('gmp', type=('build', 'link'), when='@4.5.0:')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0', '4.0.0', '4.1.0',
'4.2.0', '4.3.0', '4.3.1', '4.5.0']:
depends_on('rocm-dbgapi@' + ver, type='link', when='@' + ver)
depends_on('comgr@' + ver, type='link', when='@' + ver)
build_directory = 'spack-build'
def configure_args(self):
# Generic options to compile GCC
options = [
# Distributor options
'--program-prefix=roc',
'--enable-64-bit-bfd',
'--with-bugurl=https://github.com/ROCm-Developer-Tools/ROCgdb/issues',
'--with-pkgversion=-ROCm',
'--enable-targets=x86_64-linux-gnu,amdgcn-amd-amdhsa',
'--disable-ld',
'--disable-gas',
'--disable-gdbserver',
'--disable-sim',
'--enable-tui',
'--disable-gdbtk',
'--disable-shared',
'--with-expat',
'--with-system-zlib'
'--without-guile',
'--with-babeltrace',
'--with-lzma',
'--with-python',
'--with-rocm-dbgapi={0}'.format(self.spec['rocm-dbgapi'].prefix)
]
return options
| 46.081081 | 113 | 0.653372 |
7956650e504d6b20986512007209754786603302 | 1,527 | py | Python | core/models.py | thiagofreitascarneiro/Revenda_de_Carros | 6c778e9254c474a954a97c0678ffe272abab7c92 | [
"MIT"
] | null | null | null | core/models.py | thiagofreitascarneiro/Revenda_de_Carros | 6c778e9254c474a954a97c0678ffe272abab7c92 | [
"MIT"
] | null | null | null | core/models.py | thiagofreitascarneiro/Revenda_de_Carros | 6c778e9254c474a954a97c0678ffe272abab7c92 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth import get_user_model
class Chassi(models.Model):
numero = models.CharField('Chassi', max_length=16, help_text='Maximo 16 caracteres ')
class Meta:
verbose_name = 'Chassi'
verbose_name_plural = 'Chassis'
def __str__(self):
return self.numero
class Montadora(models.Model):
nome = models.CharField('Nome', max_length=50)
class Meta:
verbose_name = 'Montadora'
verbose_name_plural = 'Montadoras'
def __str__(self):
return self.nome
class Carro(models.Model):
'''
# OneToOneField
Cada carro só pode se relacionar com um Chassi
e cada Chassi só pode se relacionar com um carro.
# ForeignKey (One to Many)
Cada carro tem uma montadora mas uma montadora
pode 'montar' vários carros.
#ManyTomany
Um carro pode ser dirigido por varios motoristas
e um motorista pode dirigir diversos carros.
'''
chassi = models.OneToOneField(Chassi, on_delete=models.CASCADE)
montadora = models.ForeignKey(Montadora, on_delete=models.CASCADE)
motoristas = models.ManyToManyField(get_user_model())
modelo = models.CharField('Modelo', max_length=30, help_text='Máximo 30 caracteres')
preco = models.DecimalField('Preço', max_digits=8, decimal_places=2)
class Meta:
verbose_name = 'Carro'
verbose_name_plural = 'Carros'
def __str__(self):
return f'{self.montadora} {self.modelo}'
| 27.267857 | 89 | 0.67518 |
795666aa20d2127c910d9dfa6e4130365f9165e5 | 2,855 | py | Python | linguistic_style_transfer_model/config/global_config.py | spencerbraun/linguistic-style-transfer | 76774bc805c56e823f958f98e6ae8d5973518757 | [
"Apache-2.0"
] | 136 | 2018-08-17T21:31:01.000Z | 2022-01-03T15:01:41.000Z | linguistic_style_transfer_model/config/global_config.py | spencerbraun/linguistic-style-transfer | 76774bc805c56e823f958f98e6ae8d5973518757 | [
"Apache-2.0"
] | 18 | 2018-06-29T21:41:28.000Z | 2020-07-09T08:53:37.000Z | linguistic_style_transfer_model/config/global_config.py | spencerbraun/linguistic-style-transfer | 76774bc805c56e823f958f98e6ae8d5973518757 | [
"Apache-2.0"
] | 29 | 2018-09-27T04:51:10.000Z | 2022-03-01T21:24:04.000Z | from datetime import datetime as dt
logger_name = "linguistic_style_transfer"
experiment_timestamp = dt.now().strftime("%Y%m%d%H%M%S")
vocab_size = None # set by runtime param
bow_size = None # set by runtime params and exclusions
filter_sentiment_words = True
filter_stopwords = True
embedding_size = 300
max_sequence_length = 15
validation_interval = 1
tsne_sample_limit = 1000
save_directory = "./saved-models/{}".format(experiment_timestamp)
classifier_save_directory = "./saved-models-classifier/{}".format(experiment_timestamp)
log_directory = "./tensorflow-logs/{}".format(experiment_timestamp)
all_style_embeddings_path = save_directory + "/all_style_embeddings.npy"
all_content_embeddings_path = save_directory + "/all_content_embeddings.npy"
all_shuffled_labels_path = save_directory + "/all_shuffled_labels_path.pkl"
label_mapped_style_embeddings_path = save_directory + "/label_mapped_style_embeddings.pkl"
tsne_plot_folder = save_directory + "/tsne_plots/"
style_embedding_plot_file = "tsne_embeddings_plot_style_{}.svg"
content_embedding_plot_file = "tsne_embeddings_plot_content_{}.svg"
style_embedding_custom_plot_file = "tsne_embeddings_custom_plot_style.svg"
content_embedding_custom_plot_file = "tsne_embeddings_custom_plot_content.svg"
unk_token = "<unk>"
sos_token = "<sos>"
eos_token = "<eos>"
predefined_word_index = {
unk_token: 0,
sos_token: 1,
eos_token: 2,
}
tokenizer_filters = '!"#$%&()*+,-./:;=?@[\\]^_`{|}~\t\n'
bleu_score_weights = {
1: (1.0, 0.0, 0.0, 0.0),
2: (0.5, 0.5, 0.0, 0.0),
3: (0.34, 0.33, 0.33, 0.0),
4: (0.25, 0.25, 0.25, 0.25),
}
model_save_file = "linguistic_style_transfer_model.ckpt"
model_save_path = save_directory + "/" + model_save_file
model_config_file = "model_config.json"
model_config_file_path = save_directory + "/" + model_config_file
vocab_save_file = "vocab.json"
vocab_save_path = save_directory + "/" + vocab_save_file
classifier_vocab_save_path = classifier_save_directory + "/" + vocab_save_file
index_to_label_dict_file = "index_to_label_dict.json"
label_to_index_dict_file = "label_to_index_dict.json"
index_to_label_dict_path = save_directory + "/" + index_to_label_dict_file
label_to_index_dict_path = save_directory + "/" + label_to_index_dict_file
average_label_embeddings_file = "average_label_embeddings.pkl"
average_label_embeddings_path = save_directory + "/" + average_label_embeddings_file
style_coordinates_file = "style_coordinates.pkl"
content_coordinates_file = "content_coordinates.pkl"
style_coordinates_path = save_directory + "/" + style_coordinates_file
content_coordinates_path = save_directory + "/" + content_coordinates_file
validation_scores_file = "validation_scores.txt"
validation_scores_path = save_directory + "/" + validation_scores_file
sentiment_words_file_path = "data/opinion-lexicon/sentiment-words.txt"
| 37.077922 | 90 | 0.785639 |
7956687f2f72d410efb04e15cb9144b6f9f827b4 | 1,223 | py | Python | package.py | threebotapps/interface | 031f1533a9a4c9f56388779e4c2dce725a81d556 | [
"Apache-2.0"
] | null | null | null | package.py | threebotapps/interface | 031f1533a9a4c9f56388779e4c2dce725a81d556 | [
"Apache-2.0"
] | null | null | null | package.py | threebotapps/interface | 031f1533a9a4c9f56388779e4c2dce725a81d556 | [
"Apache-2.0"
] | null | null | null | from Jumpscale import j
class Package(j.baseclasses.threebot_package):
def _init(self, **kwargs):
if "branch" in kwargs.keys():
self.branch = kwargs["branch"]
else:
self.branch = "master"
def stop(self):
"""
called when the 3bot stops
:return:
"""
pass
def uninstall(self):
"""
called when the package is no longer needed and will be removed from the threebot
:return:
"""
# TODO: clean up bcdb ?
pass
def prepare(self):
"""
is called at install time
:return:
"""
pass
def start(self):
server = self.openresty
server.install(reset=False)
server.configure()
website = server.get_from_port(443)
locations = website.locations.get("interface_location")
website_location = locations.locations_spa.new()
website_location.name = "interface"
website_location.path_url = "/"
fullpath = j.sal.fs.joinPaths(self.package_root, "html/")
website_location.path_location = fullpath
locations.configure()
website.configure()
website.save()
| 24.959184 | 89 | 0.571545 |
7956688d1033d376e055f4cc3ec96075e36d68b6 | 1,585 | py | Python | xlsxwriter/test/comparison/test_table13.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_table13.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_table13.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('table13.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'num_format': 2, 'dxf_index': 2})
format2 = workbook.add_format({'num_format': 2, 'dxf_index': 1})
format3 = workbook.add_format({'num_format': 2, 'dxf_index': 0})
data = [
['Foo', 1234, 2000, 4321],
['Bar', 1256, 4000, 4320],
['Baz', 2234, 3000, 4332],
['Bop', 1324, 1000, 4333],
]
worksheet.set_column('C:F', 10.288)
worksheet.add_table('C2:F6', {'data': data,
'columns': [{},
{'format': format1},
{'format': format2},
{'format': format3},
]})
workbook.close()
self.assertExcelEqual()
| 30.480769 | 79 | 0.490221 |
79566902f9301a59e5950e02b29884baf094eddc | 1,405 | py | Python | Chapter04/stock_market.py | llichengtong/project_folder | 0f5967f9d4424c00a3e51be9d1856b68fadb10f1 | [
"MIT"
] | 403 | 2016-07-03T14:29:51.000Z | 2022-03-26T08:37:36.000Z | Chapter04/stock_market.py | bgwilf/Python-Machine-Learning-Cookbook | 0f5967f9d4424c00a3e51be9d1856b68fadb10f1 | [
"MIT"
] | 5 | 2016-12-07T14:44:43.000Z | 2021-07-13T10:41:46.000Z | Chapter04/stock_market.py | bgwilf/Python-Machine-Learning-Cookbook | 0f5967f9d4424c00a3e51be9d1856b68fadb10f1 | [
"MIT"
] | 370 | 2016-07-03T14:29:52.000Z | 2022-03-19T17:34:39.000Z | import json
import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import covariance, cluster
from matplotlib.finance import quotes_historical_yahoo_ochl as quotes_yahoo
# Input symbol file
symbol_file = 'symbol_map.json'
# Choose a time period
start_date = datetime.datetime(2004, 4, 5)
end_date = datetime.datetime(2007, 6, 2)
# Load the symbol map
with open(symbol_file, 'r') as f:
symbol_dict = json.loads(f.read())
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_yahoo(symbol, start_date, end_date, asobject=True)
for symbol in symbols]
# Extract opening and closing quotes
opening_quotes = np.array([quote.open for quote in quotes]).astype(np.float)
closing_quotes = np.array([quote.close for quote in quotes]).astype(np.float)
# The daily fluctuations of the quotes
delta_quotes = closing_quotes - opening_quotes
# Build a graph model from the correlations
edge_model = covariance.GraphLassoCV()
# Standardize the data
X = delta_quotes.copy().T
X /= X.std(axis=0)
# Train the model
with np.errstate(invalid='ignore'):
edge_model.fit(X)
# Build clustering model using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
num_labels = labels.max()
# Print the results of clustering
for i in range(num_labels + 1):
print "Cluster", i+1, "-->", ', '.join(names[labels == i])
| 27.54902 | 77 | 0.745907 |
79566907ce3552a19c56f92e1a7617c61c1cce30 | 1,377 | py | Python | main.py | RezaFirouzii/fum-delta-vision | 0a8ad1d434006a9aee0a12c1f021c0bca0bc87e2 | [
"MIT"
] | null | null | null | main.py | RezaFirouzii/fum-delta-vision | 0a8ad1d434006a9aee0a12c1f021c0bca0bc87e2 | [
"MIT"
] | null | null | null | main.py | RezaFirouzii/fum-delta-vision | 0a8ad1d434006a9aee0a12c1f021c0bca0bc87e2 | [
"MIT"
] | null | null | null | import pickle
import cv2 as cv
import numpy as np
import Calibration.calibration as clb
if __name__ == "__main__":
cap = cv.VideoCapture(1, cv.CAP_DSHOW)
if not cap.isOpened():
raise IOError("Video was not opened!")
coeffs = clb.load_coefficients('calibration/')
matrix = pickle.load(open('calibration/prespective_matrix.pickle', 'rb'))
while True:
res, frame = cap.read()
if not res:
break
# frame = cv.resize(frame, None, fx=1.5, fy=1.5)
rows, cols = frame.shape[:2]
# frame = cv.resize(frame, (1280, 720))
cv.imwrite('samples/frame.jpg', frame)
break
# distortion
# frame, roi = clb.undistort(frame, coeffs)
# prespective transform
# frame = cv.warpPerspective(frame, matrix, (cols, rows))
# frame_copy = frame.copy()
# frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# frame = cv.adaptiveThreshold(frame, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 15, 9)
# kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
# frame = cv.morphologyEx(frame, cv.MORPH_OPEN, kernel)
# frame = cv.medianBlur(frame, 3)
cv.imshow("Chg", frame)
# cv.imshow("org", frame_copy)
if cv.waitKey(30) == 27:
break
cap.release()
cv.destroyAllWindows() | 30.6 | 106 | 0.607117 |
79566a703d58b553f1a7d29ad25bf2981e0ca4c9 | 18 | py | Python | app/models/transmart.py | thehyve/cookiecutter | 57e2605739ba8859445b7bd4e577b4f9f3bc8c23 | [
"MIT"
] | null | null | null | app/models/transmart.py | thehyve/cookiecutter | 57e2605739ba8859445b7bd4e577b4f9f3bc8c23 | [
"MIT"
] | null | null | null | app/models/transmart.py | thehyve/cookiecutter | 57e2605739ba8859445b7bd4e577b4f9f3bc8c23 | [
"MIT"
] | null | null | null | from .. import db
| 9 | 17 | 0.666667 |
79566c1f39ad993e7d76139d630f170a015de94c | 2,217 | py | Python | bin/pannzer/operators/TFIDF.py | nestorzaburannyi/annotate | e175226504efef811d4ac3914f2ab342968edf98 | [
"MIT"
] | 1 | 2021-11-26T17:29:56.000Z | 2021-11-26T17:29:56.000Z | bin/pannzer/operators/TFIDF.py | nestorzaburannyi/annotate | e175226504efef811d4ac3914f2ab342968edf98 | [
"MIT"
] | 1 | 2020-03-19T21:12:23.000Z | 2020-03-19T21:12:23.000Z | bin/pannzer/operators/TFIDF.py | nestorzaburannyi/annotate | e175226504efef811d4ac3914f2ab342968edf98 | [
"MIT"
] | null | null | null | from myoperator import RowOperator
import math,sys
class TFIDF(RowOperator):
"""
Generate cleaned word vectors and respective count vector
and term idf vector.
term idf = log(total number of terms in the dataset /
number of documents where terms appears)
# we assume any term occurs just once in any document
Creates data columns 'word','wordcount','termidf'
Inputs: data column 'cleandesc'
"""
def __init__(self, glob):
sys.stderr.write("# Init TFIDF\n")
self.glob = glob
# define nicknames for column indices
[self.cleandesc_col,self.word_col,self.wordcount_col,self.termidf_col,self.vectorlength_col]=self.glob.use_sheet("data").use_columns(['desc','word','wordcount','termidf','vector_length'])
# use online dictionary. Object handles in glob are hardcoded
self.glob.use_online_dictionaries(["WORDCOUNT"])
def process(self,row, verbose=False):
cleandesc=row[self.cleandesc_col]
# create word, wordcount, termidf vectors
tmp=cleandesc.upper().split(" ")
words=[]
counts=[]
termidf=[]
tmp.sort()
ssq=0.0
for word in tmp:
if not word in self.glob.wordcounts:
if verbose: sys.stderr.write("# Warning: unknown word %s\n%s\n" %(word,tmp))
continue
words.append(word)
cnt=self.glob.wordcounts[word]
if not cnt: cnt="1"
counts.append(str(cnt))
# PK's script uses nwordtotal instead of nprot
x=math.log(self.glob.nwordtotal/float(cnt))
ssq+=x*x
termidf.append(str(x))
row[self.word_col]=" ".join(words)
row[self.wordcount_col]=" ".join(counts)
row[self.termidf_col]=" ".join(termidf)
row[self.vectorlength_col]=str(math.sqrt(ssq))
| 45.244898 | 203 | 0.524583 |
79566d12a73664df2fcf6f41ddeb2e74b748ea2e | 1,536 | py | Python | handyman.parent/handyman/doozle-daemon.py | JohnPaulZuci/zucihandyman | b37605355fb55fc2a7255c0d8aba0616fba53a74 | [
"Apache-2.0"
] | null | null | null | handyman.parent/handyman/doozle-daemon.py | JohnPaulZuci/zucihandyman | b37605355fb55fc2a7255c0d8aba0616fba53a74 | [
"Apache-2.0"
] | null | null | null | handyman.parent/handyman/doozle-daemon.py | JohnPaulZuci/zucihandyman | b37605355fb55fc2a7255c0d8aba0616fba53a74 | [
"Apache-2.0"
] | null | null | null | from http.server import HTTPServer, BaseHTTPRequestHandler
import json
from io import BytesIO
from asn1crypto._ffi import null
import os
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'hi')
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
y = json.loads(body.decode('utf-8'))
if "import" in y:
with open('/home/sid/refjson.py', 'w') as f:
f.write(y)
else:
print(y)
try:
# generate x at R
if "import" not in y:
os.system(y)
message = {
"message":"success"
}
else:
os.system('python3 /home/sid/refjson.py')
message = {
"message":"success"
}
except rpy2.rinterface.RRuntimeError as e:
self.send_response(404)
self.end_headers()
message = {
"message":str(e)
}
else:
self.send_response(200)
self.end_headers()
y = json.dumps(message)
self.wfile.write(y.encode(encoding='utf_8', errors='strict'))
with open('config.json') as f:
data = json.load(f)
httpd = HTTPServer((data['url'], int(data['port'])), SimpleHTTPRequestHandler)
httpd.serve_forever() | 29.538462 | 78 | 0.529948 |
79566d536b3f15157ddef313e18a6670b777aad0 | 21,853 | py | Python | transitfeed/shapelib.py | aalekhpatel07/transitfeed | 490c2342c53885da23bdd18f428073e60f4d8728 | [
"Apache-2.0"
] | null | null | null | transitfeed/shapelib.py | aalekhpatel07/transitfeed | 490c2342c53885da23bdd18f428073e60f4d8728 | [
"Apache-2.0"
] | null | null | null | transitfeed/shapelib.py | aalekhpatel07/transitfeed | 490c2342c53885da23bdd18f428073e60f4d8728 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2.4
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library for manipulating points and polylines.
This is a library for creating and manipulating points on the unit
sphere, as an approximate model of Earth. The primary use of this
library is to make manipulation and matching of polylines easy in the
transitfeed library.
NOTE: in this library, Earth is modelled as a sphere, whereas
GTFS specifies that latitudes and longitudes are in WGS84. For the
purpose of comparing and matching latitudes and longitudes that
are relatively close together on the surface of the earth, this
is adequate; for other purposes, this library may not be accurate
enough.
"""
__author__ = "chris.harrelson.code@gmail.com (Chris Harrelson)"
import copy
import decimal
import heapq
import math
class ShapeError(Exception):
"""Thrown whenever there is a shape parsing error."""
pass
EARTH_RADIUS_METERS = 6371010.0
class Point(object):
"""
A class representing a point on the unit sphere in three dimensions.
"""
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __hash__(self):
return hash((self.x, self.y, self.z))
def __cmp__(self, other):
if not isinstance(other, Point):
raise TypeError(
'Point.__cmp__(x,y) requires y to be a "Point", '
'not a "%s"' % type(other).__name__
)
return cmp((self.x, self.y, self.z), (other.x, other.y, other.z))
def __str__(self):
return "(%.15f, %.15f, %.15f) " % (self.x, self.y, self.z)
def Norm2(self):
"""
Returns the L_2 (Euclidean) norm of self.
"""
sum = self.x * self.x + self.y * self.y + self.z * self.z
return math.sqrt(float(sum))
def IsUnitLength(self):
return abs(self.Norm2() - 1.0) < 1e-14
def Plus(self, other):
"""
Returns a new point which is the pointwise sum of self and other.
"""
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
def Minus(self, other):
"""
Returns a new point which is the pointwise subtraction of other from
self.
"""
return Point(self.x - other.x, self.y - other.y, self.z - other.z)
def DotProd(self, other):
"""
Returns the (scalar) dot product of self with other.
"""
return self.x * other.x + self.y * other.y + self.z * other.z
def Times(self, val):
"""
Returns a new point which is pointwise multiplied by val.
"""
return Point(self.x * val, self.y * val, self.z * val)
def Normalize(self):
"""
Returns a unit point in the same direction as self.
"""
return self.Times(1 / self.Norm2())
def RobustCrossProd(self, other):
"""
A robust version of cross product. If self and other
are not nearly the same point, returns the same value
as CrossProd() modulo normalization. Otherwise returns
an arbitrary unit point orthogonal to self.
"""
assert self.IsUnitLength() and other.IsUnitLength()
x = self.Plus(other).CrossProd(other.Minus(self))
if abs(x.x) > 1e-15 or abs(x.y) > 1e-15 or abs(x.z) > 1e-15:
return x.Normalize()
else:
return self.Ortho()
def LargestComponent(self):
"""
Returns (i, val) where i is the component index (0 - 2)
which has largest absolute value and val is the value
of the component.
"""
if abs(self.x) > abs(self.y):
if abs(self.x) > abs(self.z):
return (0, self.x)
else:
return (2, self.z)
else:
if abs(self.y) > abs(self.z):
return (1, self.y)
else:
return (2, self.z)
def Ortho(self):
"""Returns a unit-length point orthogonal to this point"""
(index, val) = self.LargestComponent()
index = index - 1
if index < 0:
index = 2
temp = Point(0.012, 0.053, 0.00457)
if index == 0:
temp.x = 1
elif index == 1:
temp.y = 1
elif index == 2:
temp.z = 1
return self.CrossProd(temp).Normalize()
def CrossProd(self, other):
"""
Returns the cross product of self and other.
"""
return Point(
self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x,
)
@staticmethod
def _approxEq(a, b):
return abs(a - b) < 1e-11
def Equals(self, other):
"""
Returns true of self and other are approximately equal.
"""
return (
self._approxEq(self.x, other.x)
and self._approxEq(self.y, other.y)
and self._approxEq(self.z, other.z)
)
def Angle(self, other):
"""
Returns the angle in radians between self and other.
"""
return math.atan2(self.CrossProd(other).Norm2(), self.DotProd(other))
def ToLatLng(self):
"""
Returns that latitude and longitude that this point represents
under a spherical Earth model.
"""
rad_lat = math.atan2(self.z, math.sqrt(self.x * self.x + self.y * self.y))
rad_lng = math.atan2(self.y, self.x)
return (rad_lat * 180.0 / math.pi, rad_lng * 180.0 / math.pi)
@staticmethod
def FromLatLng(lat, lng):
"""
Returns a new point representing this latitude and longitude under
a spherical Earth model.
"""
phi = lat * (math.pi / 180.0)
theta = lng * (math.pi / 180.0)
cosphi = math.cos(phi)
return Point(math.cos(theta) * cosphi, math.sin(theta) * cosphi, math.sin(phi))
def GetDistanceMeters(self, other):
assert self.IsUnitLength() and other.IsUnitLength()
return self.Angle(other) * EARTH_RADIUS_METERS
def SimpleCCW(a, b, c):
"""
Returns true if the triangle abc is oriented counterclockwise.
"""
return c.CrossProd(a).DotProd(b) > 0
def GetClosestPoint(x, a, b):
"""
Returns the point on the great circle segment ab closest to x.
"""
assert x.IsUnitLength()
assert a.IsUnitLength()
assert b.IsUnitLength()
a_cross_b = a.RobustCrossProd(b)
# project to the great circle going through a and b
p = x.Minus(a_cross_b.Times(x.DotProd(a_cross_b) / a_cross_b.Norm2()))
# if p lies between a and b, return it
if SimpleCCW(a_cross_b, a, p) and SimpleCCW(p, b, a_cross_b):
return p.Normalize()
# otherwise return the closer of a or b
if x.Minus(a).Norm2() <= x.Minus(b).Norm2():
return a
else:
return b
class Poly(object):
"""
A class representing a polyline.
"""
def __init__(self, points=[], name=None):
self._points = list(points)
self._name = name
def AddPoint(self, p):
"""
Adds a new point to the end of the polyline.
"""
assert p.IsUnitLength()
self._points.append(p)
def GetName(self):
return self._name
def GetPoint(self, i):
return self._points[i]
def GetPoints(self):
return self._points
def GetNumPoints(self):
return len(self._points)
def _GetPointSafe(self, i):
try:
return self.GetPoint(i)
except IndexError:
return None
def GetClosestPoint(self, p):
"""
Returns (closest_p, closest_i), where closest_p is the closest point
to p on the piecewise linear curve represented by the polyline,
and closest_i is the index of the point on the polyline just before
the polyline segment that contains closest_p.
"""
assert len(self._points) > 0
closest_point = self._points[0]
closest_i = 0
for i in range(0, len(self._points) - 1):
(a, b) = (self._points[i], self._points[i + 1])
cur_closest_point = GetClosestPoint(p, a, b)
if p.Angle(cur_closest_point) < p.Angle(closest_point):
closest_point = cur_closest_point.Normalize()
closest_i = i
return (closest_point, closest_i)
def LengthMeters(self):
"""Return length of this polyline in meters."""
assert len(self._points) > 0
length = 0
for i in range(0, len(self._points) - 1):
length += self._points[i].GetDistanceMeters(self._points[i + 1])
return length
def Reversed(self):
"""Return a polyline that is the reverse of this polyline."""
return Poly(reversed(self.GetPoints()), self.GetName())
def CutAtClosestPoint(self, p):
"""
Let x be the point on the polyline closest to p. Then
CutAtClosestPoint returns two new polylines, one representing
the polyline from the beginning up to x, and one representing
x onwards to the end of the polyline. x is the first point
returned in the second polyline.
"""
(closest, i) = self.GetClosestPoint(p)
tmp = [closest]
tmp.extend(self._points[i + 1 :])
return (Poly(self._points[0 : i + 1]), Poly(tmp))
def GreedyPolyMatchDist(self, shape):
"""
Tries a greedy matching algorithm to match self to the
given shape. Returns the maximum distance in meters of
any point in self to its matched point in shape under the
algorithm.
Args: shape, a Poly object.
"""
tmp_shape = Poly(shape.GetPoints())
max_radius = 0
for (i, point) in enumerate(self._points):
tmp_shape = tmp_shape.CutAtClosestPoint(point)[1]
dist = tmp_shape.GetPoint(0).GetDistanceMeters(point)
max_radius = max(max_radius, dist)
return max_radius
@staticmethod
def MergePolys(polys, merge_point_threshold=10):
"""
Merge multiple polylines, in the order that they were passed in.
Merged polyline will have the names of their component parts joined by ';'.
Example: merging [a,b], [c,d] and [e,f] will result in [a,b,c,d,e,f].
However if the endpoints of two adjacent polylines are less than
merge_point_threshold meters apart, we will only use the first endpoint in
the merged polyline.
"""
name = ";".join((p.GetName(), "")[p.GetName() is None] for p in polys)
merged = Poly([], name)
if polys:
first_poly = polys[0]
for p in first_poly.GetPoints():
merged.AddPoint(p)
last_point = merged._GetPointSafe(-1)
for poly in polys[1:]:
first_point = poly._GetPointSafe(0)
if (
last_point
and first_point
and last_point.GetDistanceMeters(first_point)
<= merge_point_threshold
):
points = poly.GetPoints()[1:]
else:
points = poly.GetPoints()
for p in points:
merged.AddPoint(p)
last_point = merged._GetPointSafe(-1)
return merged
def __str__(self):
return self._ToString(str)
def ToLatLngString(self):
return self._ToString(lambda p: str(p.ToLatLng()))
def _ToString(self, pointToStringFn):
return "%s: %s" % (
self.GetName() or "",
", ".join([pointToStringFn(p) for p in self._points]),
)
class PolyCollection(object):
"""
A class representing a collection of polylines.
"""
def __init__(self):
self._name_to_shape = {}
pass
def AddPoly(self, poly, smart_duplicate_handling=True):
"""
Adds a new polyline to the collection.
"""
inserted_name = poly.GetName()
if poly.GetName() in self._name_to_shape:
if not smart_duplicate_handling:
raise ShapeError("Duplicate shape found: " + poly.GetName())
print(
"Warning: duplicate shape id being added to collection: "
+ poly.GetName()
)
if poly.GreedyPolyMatchDist(self._name_to_shape[poly.GetName()]) < 10:
print(" (Skipping as it apears to be an exact duplicate)")
else:
print(" (Adding new shape variant with uniquified name)")
inserted_name = "%s-%d" % (inserted_name, len(self._name_to_shape))
self._name_to_shape[inserted_name] = poly
def NumPolys(self):
return len(self._name_to_shape)
def FindMatchingPolys(self, start_point, end_point, max_radius=150):
"""
Returns a list of polylines in the collection that have endpoints
within max_radius of the given start and end points.
"""
matches = []
for shape in self._name_to_shape.values():
if (
start_point.GetDistanceMeters(shape.GetPoint(0)) < max_radius
and end_point.GetDistanceMeters(shape.GetPoint(-1)) < max_radius
):
matches.append(shape)
return matches
class PolyGraph(PolyCollection):
"""
A class representing a graph where the edges are polylines.
"""
def __init__(self):
PolyCollection.__init__(self)
self._nodes = {}
def AddPoly(self, poly, smart_duplicate_handling=True):
PolyCollection.AddPoly(self, poly, smart_duplicate_handling)
start_point = poly.GetPoint(0)
end_point = poly.GetPoint(-1)
self._AddNodeWithEdge(start_point, poly)
self._AddNodeWithEdge(end_point, poly)
def _AddNodeWithEdge(self, point, edge):
if point in self._nodes:
self._nodes[point].add(edge)
else:
self._nodes[point] = set([edge])
def ShortestPath(self, start, goal):
"""Uses the A* algorithm to find a shortest path between start and goal.
For more background see http://en.wikipedia.org/wiki/A-star_algorithm
Some definitions:
g(x): The actual shortest distance traveled from initial node to current
node.
h(x): The estimated (or "heuristic") distance from current node to goal.
We use the distance on Earth from node to goal as the heuristic.
This heuristic is both admissible and monotonic (see wikipedia for
more details).
f(x): The sum of g(x) and h(x), used to prioritize elements to look at.
Arguments:
start: Point that is in the graph, start point of the search.
goal: Point that is in the graph, end point for the search.
Returns:
A Poly object representing the shortest polyline through the graph from
start to goal, or None if no path found.
"""
assert start in self._nodes
assert goal in self._nodes
closed_set = set() # Set of nodes already evaluated.
open_heap = [(0, start)] # Nodes to visit, heapified by f(x).
open_set = set([start]) # Same as open_heap, but a set instead of a heap.
g_scores = {start: 0} # Distance from start along optimal path
came_from = {} # Map to reconstruct optimal path once we're done.
while open_set:
(f_x, x) = heapq.heappop(open_heap)
open_set.remove(x)
if x == goal:
return self._ReconstructPath(came_from, goal)
closed_set.add(x)
edges = self._nodes[x]
for edge in edges:
if edge.GetPoint(0) == x:
y = edge.GetPoint(-1)
else:
y = edge.GetPoint(0)
if y in closed_set:
continue
tentative_g_score = g_scores[x] + edge.LengthMeters()
tentative_is_better = False
if y not in open_set:
h_y = y.GetDistanceMeters(goal)
f_y = tentative_g_score + h_y
open_set.add(y)
heapq.heappush(open_heap, (f_y, y))
tentative_is_better = True
elif tentative_g_score < g_scores[y]:
tentative_is_better = True
if tentative_is_better:
came_from[y] = (x, edge)
g_scores[y] = tentative_g_score
return None
def _ReconstructPath(self, came_from, current_node):
"""
Helper method for ShortestPath, to reconstruct path.
Arguments:
came_from: a dictionary mapping Point to (Point, Poly) tuples.
This dictionary keeps track of the previous neighbor to a node, and
the edge used to get from the previous neighbor to the node.
current_node: the current Point in the path.
Returns:
A Poly that represents the path through the graph from the start of the
search to current_node.
"""
if current_node in came_from:
(previous_node, previous_edge) = came_from[current_node]
if previous_edge.GetPoint(0) == current_node:
previous_edge = previous_edge.Reversed()
p = self._ReconstructPath(came_from, previous_node)
return Poly.MergePolys([p, previous_edge], merge_point_threshold=0)
else:
return Poly([], "")
def FindShortestMultiPointPath(
self, points, max_radius=150, keep_best_n=10, verbosity=0
):
"""
Return a polyline, representing the shortest path through this graph that
has edge endpoints on each of a given list of points in sequence. We allow
fuzziness in matching of input points to points in this graph.
We limit ourselves to a view of the best keep_best_n paths at any time, as a
greedy optimization.
"""
assert len(points) > 1
nearby_points = []
paths_found = [] # A heap sorted by inverse path length.
for i, point in enumerate(points):
nearby = [
p for p in self._nodes.keys() if p.GetDistanceMeters(point) < max_radius
]
if verbosity >= 2:
print(
"Nearby points for point %d %s: %s"
% (
i + 1,
str(point.ToLatLng()),
", ".join([str(n.ToLatLng()) for n in nearby]),
)
)
if nearby:
nearby_points.append(nearby)
else:
print("No nearby points found for point %s" % str(point.ToLatLng()))
return None
pathToStr = lambda start, end, path: (
" Best path %s -> %s: %s"
% (
str(start.ToLatLng()),
str(end.ToLatLng()),
path and path.GetName() or "None",
)
)
if verbosity >= 3:
print("Step 1")
step = 2
start_points = nearby_points[0]
end_points = nearby_points[1]
for start in start_points:
for end in end_points:
path = self.ShortestPath(start, end)
if verbosity >= 3:
print(pathToStr(start, end, path))
PolyGraph._AddPathToHeap(paths_found, path, keep_best_n)
for possible_points in nearby_points[2:]:
if verbosity >= 3:
print("\nStep %d" % step)
step += 1
new_paths_found = []
start_end_paths = {} # cache of shortest paths between (start, end) pairs
for score, path in paths_found:
start = path.GetPoint(-1)
for end in possible_points:
if (start, end) in start_end_paths:
new_segment = start_end_paths[(start, end)]
else:
new_segment = self.ShortestPath(start, end)
if verbosity >= 3:
print(pathToStr(start, end, new_segment))
start_end_paths[(start, end)] = new_segment
if new_segment:
new_path = Poly.MergePolys(
[path, new_segment], merge_point_threshold=0
)
PolyGraph._AddPathToHeap(new_paths_found, new_path, keep_best_n)
paths_found = new_paths_found
if paths_found:
best_score, best_path = max(paths_found)
return best_path
else:
return None
@staticmethod
def _AddPathToHeap(heap, path, keep_best_n):
if path and path.GetNumPoints():
new_item = (-path.LengthMeters(), path)
if new_item not in heap:
if len(heap) < keep_best_n:
heapq.heappush(heap, new_item)
else:
heapq.heapreplace(heap, new_item)
| 34.360063 | 88 | 0.571317 |
795670a9420b4cfedbacea05cc247cb3581976ed | 1,529 | py | Python | test/test_audit.py | westonsteimel/pip-audit | 3b29e66418de9236c92b3ffaa254ae9def2bdcbf | [
"Apache-2.0"
] | 447 | 2021-09-02T17:06:18.000Z | 2022-03-28T20:58:22.000Z | test/test_audit.py | westonsteimel/pip-audit | 3b29e66418de9236c92b3ffaa254ae9def2bdcbf | [
"Apache-2.0"
] | 177 | 2021-09-02T16:45:42.000Z | 2022-03-29T14:19:11.000Z | test/test_audit.py | westonsteimel/pip-audit | 3b29e66418de9236c92b3ffaa254ae9def2bdcbf | [
"Apache-2.0"
] | 25 | 2021-09-13T16:04:23.000Z | 2022-03-21T17:47:28.000Z | import pretend # type: ignore
import pytest
from packaging.version import Version
from pip_audit import _audit as audit
from pip_audit._audit import AuditOptions, Auditor
from pip_audit._service.interface import VulnerabilityResult
def test_audit(vuln_service, dep_source):
service = vuln_service()
source = dep_source()
auditor = Auditor(service)
results = auditor.audit(source)
assert next(results) == (
next(source.collect()),
[
VulnerabilityResult(
id="fake-id",
description="this is not a real result",
fix_versions=[Version("1.1.0")],
)
],
)
with pytest.raises(StopIteration):
next(results)
def test_audit_dry_run(monkeypatch, vuln_service, dep_source):
service = vuln_service()
source = dep_source()
auditor = Auditor(service, options=AuditOptions(dry_run=True))
service = pretend.stub(query_all=pretend.call_recorder(lambda s: None))
logger = pretend.stub(info=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(auditor, "_service", service)
monkeypatch.setattr(audit, "logger", logger)
# dict-construct here to consume the iterator, causing the effects below.
_ = dict(auditor.audit(source))
# In dry-run mode, no calls should be made the the vuln service,
# but an appropriate number of logging calls should be made.
assert service.query_all.calls == []
assert len(logger.info.calls) == len(list(source.collect()))
| 31.204082 | 77 | 0.682145 |
7956716be12ee2c90766c53c22c4aee67a0a6352 | 1,753 | py | Python | sacluster/lib/def_conf/def_config_loading.py | hpc-team2021/saclaster_addon_MiddlewareSetup | 3fc354248ac58e1c5b529819cd7c6b471d279ca1 | [
"Apache-2.0"
] | 1 | 2021-09-01T00:12:19.000Z | 2021-09-01T00:12:19.000Z | sacluster/lib/def_conf/def_config_loading.py | hpc-team2021/saclaster_addon_MiddlewareSetup | 3fc354248ac58e1c5b529819cd7c6b471d279ca1 | [
"Apache-2.0"
] | null | null | null | sacluster/lib/def_conf/def_config_loading.py | hpc-team2021/saclaster_addon_MiddlewareSetup | 3fc354248ac58e1c5b529819cd7c6b471d279ca1 | [
"Apache-2.0"
] | 1 | 2021-10-06T07:52:26.000Z | 2021-10-06T07:52:26.000Z |
import json
import os
from info_print import printout
from load_external_data import external_data
from config_validation import config_validation
import logging
logger = logging.getLogger("sacluster").getChild(os.path.basename(__file__))
def config_loading_main(ext_info, in_path = "", info_list = [1,0,0,0], fp = ""):
logger.debug('loading config params')
_ = printout("loading config params ...", info_list = info_list, fp = fp)
while(True):
_, ext = os.path.splitext(in_path)
if(os.path.isfile(in_path) == True and (ext == ".json" or ext == ".js")):
try:
with open(in_path, 'r') as f:
json_f = json.load(f)
break
except PermissionError as e:
logger.error('PermissionError: the specified path cannot be accessed')
_ = printout("PermissionError: the specified path cannot be accessed.", info_type = 0, info_list = info_list, fp = fp)
#while(True):
logger.debug('request new path')
in_path = printout("New path >>", info_type = 1, info_list = info_list, fp = fp)
else:
logger.error('FileImportError: the file did not load properly')
_ = printout("FileImportError: config params can not be loaded. Please specify the json file.", info_list = info_list, fp = fp)
in_path = printout("New path >>", info_type = 1, info_list = info_list, fp = fp)
logger.debug('Start checking the config param')
config_param = config_validation(ext_info, json_f, info_list = info_list, fp = fp)
return config_param
| 17.707071 | 139 | 0.592698 |
7956718d0ba3a75798fbb071c388531b73777b3e | 4,783 | py | Python | gen_models/convgru.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 56 | 2019-10-20T03:09:02.000Z | 2022-03-25T09:21:40.000Z | gen_models/convgru.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 3 | 2020-10-01T07:33:51.000Z | 2021-05-12T03:40:57.000Z | gen_models/convgru.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 10 | 2019-11-04T16:56:09.000Z | 2022-03-25T09:21:41.000Z | '''
Stolen from https://github.com/jacobkimmel/pytorch_convgru/blob/master/convgru.py
'''
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.nn import init
class ConvGRUCell(nn.Module):
"""
Generate a convolutional GRU cell
"""
def __init__(self, input_size, hidden_size, kernel_size):
super().__init__()
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
self.reset_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
self.update_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
self.out_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
init.orthogonal(self.reset_gate.weight)
init.orthogonal(self.update_gate.weight)
init.orthogonal(self.out_gate.weight)
init.constant(self.reset_gate.bias, 0.)
init.constant(self.update_gate.bias, 0.)
init.constant(self.out_gate.bias, 0.)
def forward(self, input_, prev_state):
# get batch and spatial sizes
batch_size = input_.data.size()[0]
spatial_size = input_.data.size()[2:]
# generate empty prev_state, if None is provided
if prev_state is None:
state_size = [batch_size, self.hidden_size] + list(spatial_size)
if torch.cuda.is_available():
prev_state = Variable(torch.zeros(state_size)).cuda()
else:
prev_state = Variable(torch.zeros(state_size))
# data size is [batch, channel, height, width]
stacked_inputs = torch.cat([input_, prev_state], dim=1)
update = F.sigmoid(self.update_gate(stacked_inputs))
reset = F.sigmoid(self.reset_gate(stacked_inputs))
out_inputs = F.tanh(self.out_gate(torch.cat([input_, prev_state * reset], dim=1)))
new_state = prev_state * (1 - update) + out_inputs * update
return new_state
class ConvGRU(nn.Module):
def __init__(self, input_size, hidden_sizes, kernel_sizes, n_layers):
'''
Generates a multi-layer convolutional GRU.
Preserves spatial dimensions across cells, only altering depth.
Parameters
----------
input_size : integer. depth dimension of input tensors.
hidden_sizes : integer or list. depth dimensions of hidden state.
if integer, the same hidden size is used for all cells.
kernel_sizes : integer or list. sizes of Conv2d gate kernels.
if integer, the same kernel size is used for all cells.
n_layers : integer. number of chained `ConvGRUCell`.
'''
super(ConvGRU, self).__init__()
self.input_size = input_size
if type(hidden_sizes) != list:
self.hidden_sizes = [hidden_sizes]*n_layers
else:
assert len(hidden_sizes) == n_layers, '`hidden_sizes` must have the same length as n_layers'
self.hidden_sizes = hidden_sizes
if type(kernel_sizes) != list:
self.kernel_sizes = [kernel_sizes]*n_layers
else:
assert len(kernel_sizes) == n_layers, '`kernel_sizes` must have the same length as n_layers'
self.kernel_sizes = kernel_sizes
self.n_layers = n_layers
cells = []
for i in range(self.n_layers):
if i == 0:
input_dim = self.input_size
else:
input_dim = self.hidden_sizes[i-1]
cell = ConvGRUCell(input_dim, self.hidden_sizes[i], self.kernel_sizes[i])
name = 'ConvGRUCell_' + str(i).zfill(2)
setattr(self, name, cell)
cells.append(getattr(self, name))
self.cells = cells
def forward(self, x, hidden=None):
'''
Parameters
----------
x : 4D input tensor. (batch, channels, height, width).
hidden : list of 4D hidden state representations. (batch, channels, height, width).
Returns
-------
upd_hidden : 5D hidden representation. (layer, batch, channels, height, width).
'''
if not hidden:
hidden = [None]*self.n_layers
input_ = x
upd_hidden = []
for layer_idx in range(self.n_layers):
cell = self.cells[layer_idx]
cell_hidden = hidden[layer_idx]
# pass through layer
upd_cell_hidden = cell(input_, cell_hidden)
upd_hidden.append(upd_cell_hidden)
# update input_ to the last updated hidden layer for next pass
input_ = upd_cell_hidden
# retain tensors in list to allow different hidden sizes
return upd_hidden
| 34.912409 | 105 | 0.624713 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.