code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans;
import java.beans.PropertyChangeEvent;
import org.jspecify.annotations.Nullable;
/**
* Superclass for exceptions related to a property access,
* such as type mismatch or invocation target exception.
*
* @author Rod Johnson
* @author Juergen Hoeller
*/
@SuppressWarnings("serial")
public abstract class PropertyAccessException extends BeansException {
private final @Nullable PropertyChangeEvent propertyChangeEvent;
/**
* Create a new PropertyAccessException.
* @param propertyChangeEvent the PropertyChangeEvent that resulted in the problem
* @param msg the detail message
* @param cause the root cause
*/
public PropertyAccessException(PropertyChangeEvent propertyChangeEvent, String msg, @Nullable Throwable cause) {
super(msg, cause);
this.propertyChangeEvent = propertyChangeEvent;
}
/**
* Create a new PropertyAccessException without PropertyChangeEvent.
* @param msg the detail message
* @param cause the root cause
*/
public PropertyAccessException(String msg, @Nullable Throwable cause) {
super(msg, cause);
this.propertyChangeEvent = null;
}
/**
* Return the PropertyChangeEvent that resulted in the problem.
* <p>May be {@code null}; only available if an actual bean property
* was affected.
*/
public @Nullable PropertyChangeEvent getPropertyChangeEvent() {
return this.propertyChangeEvent;
}
/**
* Return the name of the affected property, if available.
*/
public @Nullable String getPropertyName() {
return (this.propertyChangeEvent != null ? this.propertyChangeEvent.getPropertyName() : null);
}
/**
* Return the affected value that was about to be set, if any.
*/
public @Nullable Object getValue() {
return (this.propertyChangeEvent != null ? this.propertyChangeEvent.getNewValue() : null);
}
/**
* Return a corresponding error code for this type of exception.
*/
public abstract String getErrorCode();
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-beans/src/main/java/org/springframework/beans/PropertyAccessException.java
|
/*
* Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.engine
import io.ktor.utils.io.*
import kotlinx.coroutines.*
/**
* Base configuration for [HttpClientEngine].
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.HttpClientEngineConfig)
*/
@KtorDsl
public open class HttpClientEngineConfig {
/**
* Specifies network threads count advice.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.HttpClientEngineConfig.threadsCount)
*/
@Deprecated(
"The [threadsCount] property is deprecated. Consider setting [dispatcher] instead.",
level = DeprecationLevel.ERROR
)
public var threadsCount: Int = 4
/**
* Allow specifying the coroutine dispatcher to use for IO operations.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.HttpClientEngineConfig.dispatcher)
*/
public var dispatcher: CoroutineDispatcher? = null
/**
* Enables HTTP pipelining advice.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.HttpClientEngineConfig.pipelining)
*/
public var pipelining: Boolean = false
/**
* Specifies a proxy address to use.
* Uses a system proxy by default.
*
* You can learn more from [Proxy](https://ktor.io/docs/proxy.html).
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.engine.HttpClientEngineConfig.proxy)
*/
public var proxy: ProxyConfig? = null
}
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-client/ktor-client-core/common/src/io/ktor/client/engine/HttpClientEngineConfig.kt
|
"""model.py
The datamodel, which represents Person that has multiple
Address objects, each with PostalCode, City, Country.
Person --(1..n)--> Address
Address --(has a)--> PostalCode
PostalCode --(has a)--> City
City --(has a)--> Country
"""
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from .caching_query import FromCache, RelationshipCache
from .environment import Base, bootstrap
class Country(Base):
__tablename__ = 'country'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
def __init__(self, name):
self.name = name
class City(Base):
__tablename__ = 'city'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
country_id = Column(Integer, ForeignKey('country.id'), nullable=False)
country = relationship(Country)
def __init__(self, name, country):
self.name = name
self.country = country
class PostalCode(Base):
__tablename__ = 'postal_code'
id = Column(Integer, primary_key=True)
code = Column(String(10), nullable=False)
city_id = Column(Integer, ForeignKey('city.id'), nullable=False)
city = relationship(City)
@property
def country(self):
return self.city.country
def __init__(self, code, city):
self.code = code
self.city = city
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
person_id = Column(Integer, ForeignKey('person.id'), nullable=False)
street = Column(String(200), nullable=False)
postal_code_id = Column(Integer, ForeignKey('postal_code.id'))
postal_code = relationship(PostalCode)
@property
def city(self):
return self.postal_code.city
@property
def country(self):
return self.postal_code.country
def __str__(self):
return "%s\t"\
"%s, %s\t"\
"%s" % (self.street, self.city.name,
self.postal_code.code, self.country.name)
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
addresses = relationship(Address, collection_class=set)
def __init__(self, name, *addresses):
self.name = name
self.addresses = set(addresses)
def __str__(self):
return self.name
def __repr__(self):
return "Person(name=%r)" % self.name
def format_full(self):
return "\t".join([str(x) for x in [self] + list(self.addresses)])
# Caching options. A set of three RelationshipCache options
# which can be applied to Query(), causing the "lazy load"
# of these attributes to be loaded from cache.
cache_address_bits = RelationshipCache(PostalCode.city, "default").\
and_(
RelationshipCache(City.country, "default")
).and_(
RelationshipCache(Address.postal_code, "default")
)
bootstrap()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Base class to manage the interaction with a running kernel"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from jupyter_client.channels import major_protocol_version
from ipython_genutils.py3compat import string_types, iteritems
import zmq
from traitlets import (
Any, Instance, Type,
)
from .channelsabc import (ChannelABC, HBChannelABC)
from .clientabc import KernelClientABC
from .connect import ConnectionFileMixin
# some utilities to validate message structure, these might get moved elsewhere
# if they prove to have more generic utility
def validate_string_dict(dct):
"""Validate that the input is a dict with string keys and values.
Raises ValueError if not."""
for k,v in iteritems(dct):
if not isinstance(k, string_types):
raise ValueError('key %r in dict must be a string' % k)
if not isinstance(v, string_types):
raise ValueError('value %r in dict must be a string' % v)
class KernelClient(ConnectionFileMixin):
"""Communicates with a single kernel on any host via zmq channels.
There are four channels associated with each kernel:
* shell: for request/reply calls to the kernel.
* iopub: for the kernel to publish results to frontends.
* hb: for monitoring the kernel's heartbeat.
* stdin: for frontends to reply to raw_input calls in the kernel.
The messages that can be sent on these channels are exposed as methods of the
client (KernelClient.execute, complete, history, etc.). These methods only
send the message, they don't wait for a reply. To get results, use e.g.
:meth:`get_shell_msg` to fetch messages from the shell channel.
"""
# The PyZMQ Context to use for communication with the kernel.
context = Instance(zmq.Context)
def _context_default(self):
return zmq.Context.instance()
# The classes to use for the various channels
shell_channel_class = Type(ChannelABC)
iopub_channel_class = Type(ChannelABC)
stdin_channel_class = Type(ChannelABC)
hb_channel_class = Type(HBChannelABC)
# Protected traits
_shell_channel = Any()
_iopub_channel = Any()
_stdin_channel = Any()
_hb_channel = Any()
# flag for whether execute requests should be allowed to call raw_input:
allow_stdin = True
#--------------------------------------------------------------------------
# Channel proxy methods
#--------------------------------------------------------------------------
def get_shell_msg(self, *args, **kwargs):
"""Get a message from the shell channel"""
return self.shell_channel.get_msg(*args, **kwargs)
def get_iopub_msg(self, *args, **kwargs):
"""Get a message from the iopub channel"""
return self.iopub_channel.get_msg(*args, **kwargs)
def get_stdin_msg(self, *args, **kwargs):
"""Get a message from the stdin channel"""
return self.stdin_channel.get_msg(*args, **kwargs)
#--------------------------------------------------------------------------
# Channel management methods
#--------------------------------------------------------------------------
def start_channels(self, shell=True, iopub=True, stdin=True, hb=True):
"""Starts the channels for this kernel.
This will create the channels if they do not exist and then start
them (their activity runs in a thread). If port numbers of 0 are
being used (random ports) then you must first call
:meth:`start_kernel`. If the channels have been stopped and you
call this, :class:`RuntimeError` will be raised.
"""
if shell:
self.shell_channel.start()
self.kernel_info()
if iopub:
self.iopub_channel.start()
if stdin:
self.stdin_channel.start()
self.allow_stdin = True
else:
self.allow_stdin = False
if hb:
self.hb_channel.start()
def stop_channels(self):
"""Stops all the running channels for this kernel.
This stops their event loops and joins their threads.
"""
if self.shell_channel.is_alive():
self.shell_channel.stop()
if self.iopub_channel.is_alive():
self.iopub_channel.stop()
if self.stdin_channel.is_alive():
self.stdin_channel.stop()
if self.hb_channel.is_alive():
self.hb_channel.stop()
@property
def channels_running(self):
"""Are any of the channels created and running?"""
return (self.shell_channel.is_alive() or self.iopub_channel.is_alive() or
self.stdin_channel.is_alive() or self.hb_channel.is_alive())
ioloop = None # Overridden in subclasses that use pyzmq event loop
@property
def shell_channel(self):
"""Get the shell channel object for this kernel."""
if self._shell_channel is None:
url = self._make_url('shell')
self.log.debug("connecting shell channel to %s", url)
socket = self.connect_shell(identity=self.session.bsession)
self._shell_channel = self.shell_channel_class(
socket, self.session, self.ioloop
)
return self._shell_channel
@property
def iopub_channel(self):
"""Get the iopub channel object for this kernel."""
if self._iopub_channel is None:
url = self._make_url('iopub')
self.log.debug("connecting iopub channel to %s", url)
socket = self.connect_iopub()
self._iopub_channel = self.iopub_channel_class(
socket, self.session, self.ioloop
)
return self._iopub_channel
@property
def stdin_channel(self):
"""Get the stdin channel object for this kernel."""
if self._stdin_channel is None:
url = self._make_url('stdin')
self.log.debug("connecting stdin channel to %s", url)
socket = self.connect_stdin(identity=self.session.bsession)
self._stdin_channel = self.stdin_channel_class(
socket, self.session, self.ioloop
)
return self._stdin_channel
@property
def hb_channel(self):
"""Get the hb channel object for this kernel."""
if self._hb_channel is None:
url = self._make_url('hb')
self.log.debug("connecting heartbeat channel to %s", url)
self._hb_channel = self.hb_channel_class(
self.context, self.session, url
)
return self._hb_channel
def is_alive(self):
"""Is the kernel process still running?"""
from .manager import KernelManager
if isinstance(self.parent, KernelManager):
# This KernelClient was created by a KernelManager,
# we can ask the parent KernelManager:
return self.parent.is_alive()
if self._hb_channel is not None:
# We don't have access to the KernelManager,
# so we use the heartbeat.
return self._hb_channel.is_beating()
else:
# no heartbeat and not local, we can't tell if it's running,
# so naively return True
return True
# Methods to send specific messages on channels
def execute(self, code, silent=False, store_history=True,
user_expressions=None, allow_stdin=None, stop_on_error=True):
"""Execute code in the kernel.
Parameters
----------
code : str
A string of code in the kernel's language.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible, and
will force store_history to be False.
store_history : bool, optional (default True)
If set, the kernel will store command history. This is forced
to be False if silent is True.
user_expressions : dict, optional
A dict mapping names to expressions to be evaluated in the user's
dict. The expression values are returned as strings formatted using
:func:`repr`.
allow_stdin : bool, optional (default self.allow_stdin)
Flag for whether the kernel can send stdin requests to frontends.
Some frontends (e.g. the Notebook) do not support stdin requests.
If raw_input is called from code executed from such a frontend, a
StdinNotImplementedError will be raised.
stop_on_error: bool, optional (default True)
Flag whether to abort the execution queue, if an exception is encountered.
Returns
-------
The msg_id of the message sent.
"""
if user_expressions is None:
user_expressions = {}
if allow_stdin is None:
allow_stdin = self.allow_stdin
# Don't waste network traffic if inputs are invalid
if not isinstance(code, string_types):
raise ValueError('code %r must be a string' % code)
validate_string_dict(user_expressions)
# Create class for content/msg creation. Related to, but possibly
# not in Session.
content = dict(code=code, silent=silent, store_history=store_history,
user_expressions=user_expressions,
allow_stdin=allow_stdin, stop_on_error=stop_on_error
)
msg = self.session.msg('execute_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def complete(self, code, cursor_pos=None):
"""Tab complete text in the kernel's namespace.
Parameters
----------
code : str
The context in which completion is requested.
Can be anything between a variable name and an entire cell.
cursor_pos : int, optional
The position of the cursor in the block of code where the completion was requested.
Default: ``len(code)``
Returns
-------
The msg_id of the message sent.
"""
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos)
msg = self.session.msg('complete_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def inspect(self, code, cursor_pos=None, detail_level=0):
"""Get metadata information about an object in the kernel's namespace.
It is up to the kernel to determine the appropriate object to inspect.
Parameters
----------
code : str
The context in which info is requested.
Can be anything between a variable name and an entire cell.
cursor_pos : int, optional
The position of the cursor in the block of code where the info was requested.
Default: ``len(code)``
detail_level : int, optional
The level of detail for the introspection (0-2)
Returns
-------
The msg_id of the message sent.
"""
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos,
detail_level=detail_level,
)
msg = self.session.msg('inspect_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def history(self, raw=True, output=False, hist_access_type='range', **kwargs):
"""Get entries from the kernel's history list.
Parameters
----------
raw : bool
If True, return the raw input.
output : bool
If True, then return the output as well.
hist_access_type : str
'range' (fill in session, start and stop params), 'tail' (fill in n)
or 'search' (fill in pattern param).
session : int
For a range request, the session from which to get lines. Session
numbers are positive integers; negative ones count back from the
current session.
start : int
The first line number of a history range.
stop : int
The final (excluded) line number of a history range.
n : int
The number of lines of history to get for a tail request.
pattern : str
The glob-syntax pattern for a search request.
Returns
-------
The ID of the message sent.
"""
if hist_access_type == 'range':
kwargs.setdefault('session', 0)
kwargs.setdefault('start', 0)
content = dict(raw=raw, output=output, hist_access_type=hist_access_type,
**kwargs)
msg = self.session.msg('history_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def kernel_info(self):
"""Request kernel info
Returns
-------
The msg_id of the message sent
"""
msg = self.session.msg('kernel_info_request')
self.shell_channel.send(msg)
return msg['header']['msg_id']
def comm_info(self, target_name=None):
"""Request comm info
Returns
-------
The msg_id of the message sent
"""
if target_name is None:
content = {}
else:
content = dict(target_name=target_name)
msg = self.session.msg('comm_info_request', content)
self.shell_channel.send(msg)
return msg['header']['msg_id']
def _handle_kernel_info_reply(self, msg):
"""handle kernel info reply
sets protocol adaptation version. This might
be run from a separate thread.
"""
adapt_version = int(msg['content']['protocol_version'].split('.')[0])
if adapt_version != major_protocol_version:
self.session.adapt_version = adapt_version
def shutdown(self, restart=False):
"""Request an immediate kernel shutdown.
Upon receipt of the (empty) reply, client code can safely assume that
the kernel has shut down and it's safe to forcefully terminate it if
it's still alive.
The kernel will send the reply via a function registered with Python's
atexit module, ensuring it's truly done as the kernel is done with all
normal operation.
Returns
-------
The msg_id of the message sent
"""
# Send quit message to kernel. Once we implement kernel-side setattr,
# this should probably be done that way, but for now this will do.
msg = self.session.msg('shutdown_request', {'restart':restart})
self.shell_channel.send(msg)
return msg['header']['msg_id']
def is_complete(self, code):
"""Ask the kernel whether some code is complete and ready to execute."""
msg = self.session.msg('is_complete_request', {'code': code})
self.shell_channel.send(msg)
return msg['header']['msg_id']
def input(self, string):
"""Send a string of raw input to the kernel.
This should only be called in response to the kernel sending an
``input_request`` message on the stdin channel.
"""
content = dict(value=string)
msg = self.session.msg('input_reply', content)
self.stdin_channel.send(msg)
KernelClientABC.register(KernelClient)
|
unknown
|
codeparrot/codeparrot-clean
| ||
use crate::spec::base::apple::{Arch, TargetEnv, base};
use crate::spec::{Os, SanitizerSet, Target, TargetMetadata, TargetOptions};
pub(crate) fn target() -> Target {
let (opts, llvm_target, arch) = base(Os::IOs, Arch::Arm64, TargetEnv::MacCatalyst);
Target {
llvm_target,
metadata: TargetMetadata {
description: Some("ARM64 Apple Mac Catalyst".into()),
tier: Some(2),
host_tools: Some(false),
std: Some(true),
},
pointer_width: 64,
data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-Fn32"
.into(),
arch,
options: TargetOptions {
features: "+neon,+apple-a12".into(),
max_atomic_width: Some(128),
supported_sanitizers: SanitizerSet::ADDRESS | SanitizerSet::LEAK | SanitizerSet::THREAD,
..opts
},
}
}
|
rust
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_target/src/spec/targets/aarch64_apple_ios_macabi.rs
|
#!/usr/bin/python
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans.
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the boolean to configure
required: true
default: null
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot
required: false
default: no
choices: [ "yes", "no" ]
state:
description:
- Desired boolean value
required: true
default: null
choices: [ 'yes', 'no' ]
notes:
- Not tested on any debian based system
requirements: [ ]
author: Stephen Fromm
'''
EXAMPLES = '''
# Set (httpd_can_network_connect) flag on and keep it persistent across reboots
- seboolean: name=httpd_can_network_connect state=yes persistent=yes
'''
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import semanage
HAVE_SEMANAGE=True
except ImportError:
HAVE_SEMANAGE=False
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError, e:
module.fail_json(msg="Failed to get list of boolean names")
if name in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError, e:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
handle = semanage.semanage_handle_create()
if handle is None:
module.fail_json(msg="Failed to create semanage library handle")
try:
managed = semanage.semanage_is_managed(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
module.fail_json(msg="Failed to connect to semanage")
if semanage.semanage_begin_transaction(handle) < 0:
module.fail_json(msg="Failed to begin semanage transaction")
rc, sebool = semanage.semanage_bool_create(handle)
if rc < 0:
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, sebool, name) < 0:
module.fail_json(msg="Failed to set seboolean name with semanage")
semanage.semanage_bool_set_value(sebool, value)
rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool)
if rc < 0:
module.fail_json(msg="Failed to extract boolean key with semanage")
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(sebool)
semanage.semanage_set_reload(handle, 0)
if semanage.semanage_commit(handle) < 0:
module.fail_json(msg="Failed to commit changes to semanage")
semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
except Exception, e:
module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e)))
return True
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError, e:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True),
persistent=dict(default='no', type='bool'),
state=dict(required=True, type='bool')
),
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python support")
if not HAVE_SEMANAGE:
module.fail_json(msg="This module requires libsemanage-python support")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = {}
result['name'] = name
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
cur_value = get_boolean_value(module, name)
if cur_value == state:
result['state'] = cur_value
result['changed'] = False
module.exit_json(**result)
if module.check_mode:
module.exit_json(changed=True)
if persistent:
r = semanage_boolean_value(module, name, state)
else:
r = set_boolean_value(module, name, state)
result['changed'] = r
if not r:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
try:
selinux.security_commit_booleans()
except:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl;
import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.util.Time;
import javax.annotation.Nullable;
import static org.apache.hadoop.metrics2.lib.Interns.*;
/**
* <p>
* This class maintains a group of rolling average metrics. It implements the
* algorithm of rolling average, i.e. a number of sliding windows are kept to
* roll over and evict old subsets of samples. Each window has a subset of
* samples in a stream, where sub-sum and sub-total are collected. All sub-sums
* and sub-totals in all windows will be aggregated to final-sum and final-total
* used to compute final average, which is called rolling average.
* </p>
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MutableRollingAverages extends MutableMetric implements Closeable {
private MutableRatesWithAggregation innerMetrics =
new MutableRatesWithAggregation();
@VisibleForTesting
static final ScheduledExecutorService SCHEDULER = Executors
.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("MutableRollingAverages-%d").build());
private ScheduledFuture<?> scheduledTask = null;
@Nullable
private Map<String, MutableRate> currentSnapshot;
private final String avgInfoNameTemplate;
private final String avgInfoDescTemplate;
private int numWindows;
/**
* This class maintains sub-sum and sub-total of SampleStat.
*/
private static class SumAndCount {
private final double sum;
private final long count;
private final long snapshotTimeStamp;
/**
* Constructor for {@link SumAndCount}.
*
* @param sum sub-sum in sliding windows
* @param count sub-total in sliding windows
* @param snapshotTimeStamp when is a new SampleStat snapshot.
*/
SumAndCount(final double sum, final long count,
final long snapshotTimeStamp) {
this.sum = sum;
this.count = count;
this.snapshotTimeStamp = snapshotTimeStamp;
}
public double getSum() {
return sum;
}
public long getCount() {
return count;
}
public long getSnapshotTimeStamp() {
return snapshotTimeStamp;
}
}
/**
* <p>
* key: metric name
* </p>
* <p>
* value: deque where sub-sums and sub-totals for sliding windows are
* maintained.
* </p>
*/
private Map<String, LinkedBlockingDeque<SumAndCount>> averages =
new ConcurrentHashMap<>();
private static final long WINDOW_SIZE_MS_DEFAULT = 300_000;
private static final int NUM_WINDOWS_DEFAULT = 36;
/**
* Time duration after which a record is considered stale.
* {@link MutableRollingAverages} should be time-sensitive, and it should use
* the time window length(i.e. NUM_WINDOWS_DEFAULT * WINDOW_SIZE_MS_DEFAULT)
* as the valid time to make sure some too old record won't be use to compute
* average.
*/
private long recordValidityMs =
NUM_WINDOWS_DEFAULT * WINDOW_SIZE_MS_DEFAULT;
/**
* Constructor for {@link MutableRollingAverages}.
* @param metricValueName input metricValueName.
*/
public MutableRollingAverages(String metricValueName) {
if (metricValueName == null) {
metricValueName = "";
}
avgInfoNameTemplate = "[%s]" + "RollingAvg" +
StringUtils.capitalize(metricValueName);
avgInfoDescTemplate = "Rolling average " +
StringUtils.uncapitalize(metricValueName) +" for "+ "%s";
numWindows = NUM_WINDOWS_DEFAULT;
scheduledTask = SCHEDULER.scheduleAtFixedRate(new RatesRoller(this),
WINDOW_SIZE_MS_DEFAULT, WINDOW_SIZE_MS_DEFAULT, TimeUnit.MILLISECONDS);
}
/**
* This method is for testing only to replace the scheduledTask.
*/
@VisibleForTesting
synchronized void replaceScheduledTask(int windows, long interval,
TimeUnit timeUnit) {
numWindows = windows;
scheduledTask.cancel(true);
scheduledTask = SCHEDULER.scheduleAtFixedRate(new RatesRoller(this),
interval, interval, timeUnit);
}
@Override
public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) {
if (all || changed()) {
for (final Entry<String, LinkedBlockingDeque<SumAndCount>> entry
: averages.entrySet()) {
final String name = entry.getKey();
final MetricsInfo avgInfo = info(
String.format(avgInfoNameTemplate, StringUtils.capitalize(name)),
String.format(avgInfoDescTemplate, StringUtils.uncapitalize(name)));
double totalSum = 0;
long totalCount = 0;
for (final SumAndCount sumAndCount : entry.getValue()) {
if (Time.monotonicNow() - sumAndCount.getSnapshotTimeStamp()
< recordValidityMs) {
totalCount += sumAndCount.getCount();
totalSum += sumAndCount.getSum();
}
}
if (totalCount != 0) {
builder.addGauge(avgInfo, totalSum / totalCount);
}
}
if (changed()) {
clearChanged();
}
}
}
/**
* Collects states maintained in {@link ThreadLocal}, if any.
*/
public void collectThreadLocalStates() {
innerMetrics.collectThreadLocalStates();
}
/**
* @param name
* name of metric
* @param value
* value of metric
*/
public void add(final String name, final long value) {
innerMetrics.add(name, value);
}
private static class RatesRoller implements Runnable {
private final MutableRollingAverages parent;
RatesRoller(final MutableRollingAverages parent) {
this.parent = parent;
}
@Override
public void run() {
synchronized (parent) {
final MetricsCollectorImpl mc = new MetricsCollectorImpl();
final MetricsRecordBuilder rb = mc.addRecord("RatesRoller");
/**
* snapshot all metrics regardless of being changed or not, in case no
* ops since last snapshot, we will get 0.
*/
parent.innerMetrics.snapshot(rb, true);
Preconditions.checkState(mc.getRecords().size() == 1,
"There must be only one record and it's named with 'RatesRoller'");
parent.currentSnapshot = parent.innerMetrics.getGlobalMetrics();
parent.rollOverAvgs();
}
parent.setChanged();
}
}
/**
* Iterates over snapshot to capture all Avg metrics into rolling structure
* {@link MutableRollingAverages#averages}.
*/
private synchronized void rollOverAvgs() {
if (currentSnapshot == null) {
return;
}
for (Map.Entry<String, MutableRate> entry : currentSnapshot.entrySet()) {
final MutableRate rate = entry.getValue();
final LinkedBlockingDeque<SumAndCount> deque = averages.computeIfAbsent(
entry.getKey(),
new Function<String, LinkedBlockingDeque<SumAndCount>>() {
@Override
public LinkedBlockingDeque<SumAndCount> apply(String k) {
return new LinkedBlockingDeque<>(numWindows);
}
});
final SumAndCount sumAndCount = new SumAndCount(
rate.lastStat().total(),
rate.lastStat().numSamples(),
rate.getSnapshotTimeStamp());
/* put newest sum and count to the end */
if (!deque.offerLast(sumAndCount)) {
deque.pollFirst();
deque.offerLast(sumAndCount);
}
}
setChanged();
}
@Override
public void close() throws IOException {
if (scheduledTask != null) {
scheduledTask.cancel(false);
}
scheduledTask = null;
}
/**
* Retrieve a map of metric name {@literal ->} (aggregate).
* Filter out entries that don't have at least minSamples.
*
* @param minSamples input minSamples.
* @return a map of peer DataNode Id to the average latency to that
* node seen over the measurement period.
*/
public synchronized Map<String, Double> getStats(long minSamples) {
final Map<String, Double> stats = new HashMap<>();
for (final Entry<String, LinkedBlockingDeque<SumAndCount>> entry
: averages.entrySet()) {
final String name = entry.getKey();
double totalSum = 0;
long totalCount = 0;
for (final SumAndCount sumAndCount : entry.getValue()) {
if (Time.monotonicNow() - sumAndCount.getSnapshotTimeStamp()
< recordValidityMs) {
totalCount += sumAndCount.getCount();
totalSum += sumAndCount.getSum();
}
}
if (totalCount > minSamples) {
stats.put(name, totalSum / totalCount);
}
}
return stats;
}
/**
* Use for test only.
* @param value input value.
*/
@VisibleForTesting
public synchronized void setRecordValidityMs(long value) {
this.recordValidityMs = value;
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/controller/replication/config
// +k8s:conversion-gen-external-types=k8s.io/kube-controller-manager/config/v1alpha1
package v1alpha1
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/controller/replication/config/v1alpha1/doc.go
|
from __future__ import print_function
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import traceback
import yaml
import dependency_check.version_comparer as version_comparer
from datetime import datetime
from .jira_client import JiraClient
_JIRA_PROJECT_NAME = 'BEAM'
_JIRA_COMPONENT = 'dependencies'
_ISSUE_SUMMARY_PREFIX = 'Beam Dependency Update Request: '
_ISSUE_REOPEN_DAYS = 180
class JiraManager:
def __init__(self, jira_url, jira_username, jira_password, owners_file):
options = {
'server': jira_url
}
basic_auth = (jira_username, jira_password)
self.jira = JiraClient(options, basic_auth, _JIRA_PROJECT_NAME)
with open(owners_file) as f:
owners = yaml.load(f, Loader=yaml.BaseLoader)
self.owners_map = owners['deps']
logging.getLogger().setLevel(logging.INFO)
def run(self, dep_name,
dep_current_version,
dep_latest_version,
sdk_type,
group_id=None):
"""
Manage the jira issue for a dependency
Args:
dep_name,
dep_current_version,
dep_latest_version,
sdk_type: Java, Python
group_id (optional): only required for Java dependencies
Return: Jira Issue
"""
logging.info("Start handling the JIRA issues for {0} dependency: {1} {2}".format(
sdk_type, dep_name, dep_latest_version))
try:
# find the parent issue for Java deps base on the groupID
parent_issue = None
if sdk_type == 'Java':
summary = _ISSUE_SUMMARY_PREFIX + group_id
parent_issues = self._search_issues(summary)
for i in parent_issues:
if i.fields.summary == summary:
parent_issue = i
break
# Create a new parent issue if no existing found
if not parent_issue:
logging.info("""Did not find existing issue with name {0}. \n
Created a parent issue for {1}""".format(summary, group_id))
try:
parent_issue = self._create_issue(group_id, None, None)
except:
logging.error("""Failed creating a parent issue for {0}.
Stop handling the JIRA issue for {1}, {2}""".format(group_id, dep_name, dep_latest_version))
return
# Reopen the existing parent issue if it was closed
elif parent_issue.fields.status.name not in ['Open', 'Reopened', 'Triage Needed']:
logging.info("""The parent issue {0} is not opening (status: {1}). Attempt reopening the issue""".format(
parent_issue.key, parent_issue.fields.status.name))
try:
self.jira.reopen_issue(parent_issue)
except:
traceback.print_exc()
logging.error("""Failed reopening the parent issue {0}.
Stop handling the JIRA issue for {1}, {2}""".format(parent_issue.key, dep_name, dep_latest_version))
return
logging.info("Found the parent issue {0}. Continuous to create or update the sub-task for {1}".format(parent_issue.key, dep_name))
# creating a new issue/sub-task or updating on the existing issue of the dep
summary = _ISSUE_SUMMARY_PREFIX + dep_name
issues = self._search_issues(summary)
issue = None
for i in issues:
if i.fields.summary == summary:
issue = i
break
# Create a new JIRA if no existing one.
if not issue:
if sdk_type == 'Java':
issue = self._create_issue(dep_name, dep_current_version, dep_latest_version, is_subtask=True, parent_key=parent_issue.key)
else:
issue = self._create_issue(dep_name, dep_current_version, dep_latest_version)
logging.info('Created a new issue {0} of {1} {2}'.format(issue.key, dep_name, dep_latest_version))
# Add descriptions in to the opening issue.
elif issue.fields.status.name in ['Open', 'Reopened', 'Triage Needed']:
self._append_descriptions(issue, dep_name, dep_current_version, dep_latest_version)
logging.info('Updated the existing issue {0} of {1} {2}'.format(issue.key, dep_name, dep_latest_version))
# Check if we need reopen the issue if it was closed. If so, reopen it then add descriptions.
elif self._need_reopen(issue, dep_latest_version):
self.jira.reopen_issue(issue)
self._append_descriptions(issue, dep_name, dep_current_version, dep_latest_version)
logging.info("Reopened the issue {0} for {1} {2}".format(issue.key, dep_name, dep_latest_version))
return issue
except:
raise
def _create_issue(self, dep_name, dep_current_version, dep_latest_version, is_subtask=False, parent_key=None):
"""
Create a new issue or subtask
Args:
dep_name,
dep_latest_version,
is_subtask,
parent_key: only required if the 'is_subtask'is true.
"""
logging.info("Creating a new JIRA issue to track {0} upgrade process".format(dep_name))
summary = _ISSUE_SUMMARY_PREFIX + dep_name
description = self._create_descriptions(dep_name, dep_current_version, dep_latest_version)
try:
if not is_subtask:
issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description)
else:
issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description, parent_key=parent_key)
except Exception as e:
logging.error("Failed creating issue: "+ str(e))
raise e
return issue
def _search_issues(self, summary):
"""
Search issues by using issues' summary.
Args:
summary: a string
Return:
A list of issues
"""
try:
issues = self.jira.get_issues_by_summary(summary)
except Exception as e:
logging.error("Failed searching issues: "+ str(e))
return []
return issues
def _append_descriptions(self, issue, dep_name, dep_current_version, dep_latest_version):
"""
Add descriptions on an existing issue.
Args:
issue: Jira issue
dep_name
dep_latest_version
"""
logging.info("Updating JIRA issue {0} to track {1} upgrade process".format(
issue.key,
dep_name))
description = self._create_descriptions(dep_name, dep_current_version, dep_latest_version, issue=issue)
try:
self.jira.update_issue(issue, description=description)
except Exception as e:
traceback.print_exc()
logging.error("Failed updating issue: "+ str(e))
def _create_descriptions(self, dep_name, dep_current_version, dep_latest_version, issue = None):
"""
Create descriptions for JIRA issues.
Args:
dep_name
dep_latest_version
issue
"""
description = ""
if issue:
description = issue.fields.description
description += """\n\n ------------------------- {0} -------------------------\n
Please consider upgrading the dependency {1}. \n
The current version is {2}. The latest version is {3} \n
cc: """.format(
datetime.today(),
dep_name,
dep_current_version,
dep_latest_version
)
owners = self._find_owners(dep_name)
for owner in owners:
description += "[~{0}], ".format(owner)
description += ("\n Please refer to "
"[Beam Dependency Guide |https://beam.apache.org/contribute/dependencies/]"
"for more information. \n"
"Do Not Modify The Description Above. \n")
return description
def _find_owners(self, dep_name):
"""
Find owners for a dependency/
Args:
dep_name
Return:
primary: The primary owner of the dep. The Jira issue will be assigned to the primary owner.
others: A list of other owners of the dep. Owners will be cc'ed in the description.
"""
try:
dep_info = self.owners_map[dep_name]
owners = dep_info['owners']
if not owners:
logging.warning("Could not find owners for " + dep_name)
return []
except KeyError:
traceback.print_exc()
logging.warning("Could not find the dependency info of {0} in the OWNERS configurations.".format(dep_name))
return []
except Exception as e:
traceback.print_exc()
logging.error("Failed finding dependency owners: "+ str(e))
return None
logging.info("Found owners of {0}: {1}".format(dep_name, owners))
owners = owners.split(',')
owners = map(str, owners)
owners = map(str.strip, owners)
owners = list(filter(None, owners))
return owners
def _need_reopen(self, issue, dep_latest_version):
"""
Return a boolean that indicates whether reopen the closed issue.
"""
# Check if the issue was closed with a "fix version/s"
# Reopen the issue if it hits the next release version.
next_release_version = self._get_next_release_version()
for fix_version in issue.fields.fixVersions:
if fix_version.name in next_release_version:
return True
# Check if there is other new versions released.
# Reopen the issue if 3 new versions have been released in 6 month since closure.
try:
if issue.fields.resolutiondate:
closing_date = datetime.strptime(issue.fields.resolutiondate[:19], "%Y-%m-%dT%H:%M:%S")
if (datetime.today() - closing_date).days >= _ISSUE_REOPEN_DAYS:
# Extract the previous version when JIRA closed.
descriptions = issue.fields.description.splitlines()
descriptions = descriptions[len(descriptions)-5]
# The version info has been stored in the JIRA description in a specific format.
# Such as "Please review and upgrade the <dep name> to the latest version <version>"
previous_version = descriptions.split("The latest version is", 1)[1].strip()
if version_comparer.compare_dependency_versions(previous_version, dep_latest_version):
return True
except Exception as e:
traceback.print_exc()
logging.error("Failed deciding to reopen the issue." + str(e))
return False
return False
def _get_next_release_version(self):
"""
Return the incoming release version from sdks/python/apache_beam/version.py
"""
global_names = {}
exec(
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../../sdks/python/',
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
|
unknown
|
codeparrot/codeparrot-clean
| ||
import copyreg
import io
import pickle
import re
import warnings
from unittest.mock import Mock
import joblib
import numpy as np
import pytest
from joblib.numpy_pickle import NumpyPickler
from numpy.testing import assert_allclose, assert_array_equal
import sklearn.ensemble._hist_gradient_boosting.gradient_boosting as hgb_module
from sklearn._loss.loss import (
AbsoluteError,
HalfBinomialLoss,
HalfSquaredError,
PinballLoss,
)
from sklearn.base import BaseEstimator, TransformerMixin, clone, is_regressor
from sklearn.compose import make_column_transformer
from sklearn.datasets import make_classification, make_low_rank_matrix, make_regression
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.exceptions import NotFittedError
from sklearn.metrics import get_scorer, mean_gamma_deviance, mean_poisson_deviance
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder
from sklearn.utils import check_random_state, shuffle
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._testing import _convert_container
from sklearn.utils.fixes import _IS_32BIT
n_threads = _openmp_effective_n_threads()
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
X_multi_classification, y_multi_classification = make_classification(
n_classes=3, n_informative=3, random_state=0
)
def _make_dumb_dataset(n_samples):
"""Make a dumb dataset to test early stopping."""
rng = np.random.RandomState(42)
X_dumb = rng.randn(n_samples, 1)
y_dumb = (X_dumb[:, 0] > 0).astype("int64")
return X_dumb, y_dumb
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"interaction_cst": [0, 1]},
"Interaction constraints must be a sequence of tuples or lists",
),
(
{"interaction_cst": [{0, 9999}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
(
{"interaction_cst": [{-1, 0}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
(
{"interaction_cst": [{0.5}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
],
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
@pytest.mark.parametrize(
"scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
[
("neg_mean_squared_error", 0.1, True, 5, 1e-7), # use scorer
("neg_mean_squared_error", None, True, 5, 1e-1), # use scorer on train
(None, 0.1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
("loss", 0.1, True, 5, 1e-7), # use loss
("loss", None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, 0.0), # no early stopping
],
)
def test_early_stopping_regression(
scoring, validation_fraction, early_stopping, n_iter_no_change, tol
):
max_iter = 200
X, y = make_regression(n_samples=50, random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0,
)
gb.fit(X, y)
if early_stopping:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
"data",
(
make_classification(n_samples=30, random_state=0),
make_classification(
n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0
),
),
)
@pytest.mark.parametrize(
"scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
[
("accuracy", 0.1, True, 5, 1e-7), # use scorer
("accuracy", None, True, 5, 1e-1), # use scorer on training data
(None, 0.1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
("loss", 0.1, True, 5, 1e-7), # use loss
("loss", None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, 0.0), # no early stopping
],
)
def test_early_stopping_classification(
data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol
):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=2, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0,
)
gb.fit(X, y)
if early_stopping is True:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),
(HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10001)),
],
)
def test_early_stopping_default(GradientBoosting, X, y):
# Test that early stopping is enabled by default if and only if there
# are more than 10000 samples
gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)
gb.fit(X, y)
if X.shape[0] > 10000:
assert gb.n_iter_ < gb.max_iter
else:
assert gb.n_iter_ == gb.max_iter
@pytest.mark.parametrize(
"scores, n_iter_no_change, tol, stopping",
[
([], 1, 0.001, False), # not enough iterations
([1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
([1] * 6, 5, 0.0, True), # no significant improvement
([1] * 6, 5, 0.001, True), # no significant improvement
([1] * 6, 5, 5, True), # no significant improvement
],
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol)
assert gbdt._should_stop(scores) == stopping
def test_absolute_error():
# For coverage only.
X, y = make_regression(n_samples=500, random_state=0)
gbdt = HistGradientBoostingRegressor(loss="absolute_error", random_state=0)
gbdt.fit(X, y)
assert gbdt.score(X, y) > 0.9
def test_absolute_error_sample_weight():
# non regression test for issue #19400
# make sure no error is thrown during fit of
# HistGradientBoostingRegressor with absolute_error loss function
# and passing sample_weight
rng = np.random.RandomState(0)
n_samples = 100
X = rng.uniform(-1, 1, size=(n_samples, 2))
y = rng.uniform(-1, 1, size=n_samples)
sample_weight = rng.uniform(0, 1, size=n_samples)
gbdt = HistGradientBoostingRegressor(loss="absolute_error")
gbdt.fit(X, y, sample_weight=sample_weight)
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 1.0, 2.0])])
def test_gamma_y_positive(y):
# Test that ValueError is raised if any y_i <= 0.
err_msg = r"loss='gamma' requires strictly positive y."
gbdt = HistGradientBoostingRegressor(loss="gamma", random_state=0)
with pytest.raises(ValueError, match=err_msg):
gbdt.fit(np.zeros(shape=(len(y), 1)), y)
def test_gamma():
# For a Gamma distributed target, we expect an HGBT trained with the Gamma deviance
# (loss) to give better results than an HGBT with any other loss function, measured
# in out-of-sample Gamma deviance as metric/score.
# Note that squared error could potentially predict negative values which is
# invalid (np.inf) for the Gamma deviance. A Poisson HGBT (having a log link)
# does not have that defect.
# Important note: It seems that a Poisson HGBT almost always has better
# out-of-sample performance than the Gamma HGBT, measured in Gamma deviance.
# LightGBM shows the same behaviour. Hence, we only compare to a squared error
# HGBT, but not to a Poisson deviance HGBT.
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 100, 20
X = make_low_rank_matrix(
n_samples=n_train + n_test,
n_features=n_features,
random_state=rng,
)
# We create a log-linear Gamma model. This gives y.min ~ 1e-2, y.max ~ 1e2
coef = rng.uniform(low=-10, high=20, size=n_features)
# Numpy parametrizes gamma(shape=k, scale=theta) with mean = k * theta and
# variance = k * theta^2. We parametrize it instead with mean = exp(X @ coef)
# and variance = dispersion * mean^2 by setting k = 1 / dispersion,
# theta = dispersion * mean.
dispersion = 0.5
y = rng.gamma(shape=1 / dispersion, scale=dispersion * np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
gbdt_gamma = HistGradientBoostingRegressor(loss="gamma", random_state=123)
gbdt_mse = HistGradientBoostingRegressor(loss="squared_error", random_state=123)
dummy = DummyRegressor(strategy="mean")
for model in (gbdt_gamma, gbdt_mse, dummy):
model.fit(X_train, y_train)
for X, y in [(X_train, y_train), (X_test, y_test)]:
loss_gbdt_gamma = mean_gamma_deviance(y, gbdt_gamma.predict(X))
# We restrict the squared error HGBT to predict at least the minimum seen y at
# train time to make it strictly positive.
loss_gbdt_mse = mean_gamma_deviance(
y, np.maximum(np.min(y_train), gbdt_mse.predict(X))
)
loss_dummy = mean_gamma_deviance(y, dummy.predict(X))
assert loss_gbdt_gamma < loss_dummy
assert loss_gbdt_gamma < loss_gbdt_mse
@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8])
def test_quantile_asymmetric_error(quantile):
"""Test quantile regression for asymmetric distributed targets."""
n_samples = 10_000
rng = np.random.RandomState(42)
# take care that X @ coef + intercept > 0
X = np.concatenate(
(
np.abs(rng.randn(n_samples)[:, None]),
-rng.randint(2, size=(n_samples, 1)),
),
axis=1,
)
intercept = 1.23
coef = np.array([0.5, -2])
# For an exponential distribution with rate lambda, e.g. exp(-lambda * x),
# the quantile at level q is:
# quantile(q) = - log(1 - q) / lambda
# scale = 1/lambda = -quantile(q) / log(1-q)
y = rng.exponential(
scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples
)
model = HistGradientBoostingRegressor(
loss="quantile",
quantile=quantile,
max_iter=25,
random_state=0,
max_leaf_nodes=10,
).fit(X, y)
assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2)
pinball_loss = PinballLoss(quantile=quantile)
loss_true_quantile = pinball_loss(y, X @ coef + intercept)
loss_pred_quantile = pinball_loss(y, model.predict(X))
# we are overfitting
assert loss_pred_quantile <= loss_true_quantile
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
def test_poisson_y_positive(y):
# Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.
err_msg = r"loss='poisson' requires non-negative y and sum\(y\) > 0."
gbdt = HistGradientBoostingRegressor(loss="poisson", random_state=0)
with pytest.raises(ValueError, match=err_msg):
gbdt.fit(np.zeros(shape=(len(y), 1)), y)
def test_poisson():
# For Poisson distributed target, Poisson loss should give better results
# than least squares measured in Poisson deviance as metric.
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 100, 100
X = make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
gbdt_pois = HistGradientBoostingRegressor(loss="poisson", random_state=rng)
gbdt_ls = HistGradientBoostingRegressor(loss="squared_error", random_state=rng)
gbdt_pois.fit(X_train, y_train)
gbdt_ls.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y in [(X_train, y_train), (X_test, y_test)]:
metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))
# squared_error might produce non-positive predictions => clip
metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None))
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
assert metric_pois < metric_ls
assert metric_pois < metric_dummy
def test_binning_train_validation_are_separated():
# Make sure training and validation data are binned separately.
# See issue 13926
rng = np.random.RandomState(0)
validation_fraction = 0.2
gb = HistGradientBoostingClassifier(
early_stopping=True, validation_fraction=validation_fraction, random_state=rng
)
gb.fit(X_classification, y_classification)
mapper_training_data = gb._bin_mapper
# Note that since the data is small there is no subsampling and the
# random_state doesn't matter
mapper_whole_data = _BinMapper(random_state=0)
mapper_whole_data.fit(X_classification)
n_samples = X_classification.shape[0]
assert np.all(
mapper_training_data.n_bins_non_missing_
== int((1 - validation_fraction) * n_samples)
)
assert np.all(
mapper_training_data.n_bins_non_missing_
!= mapper_whole_data.n_bins_non_missing_
)
def test_missing_values_trivial():
# sanity check for missing values support. With only one feature and
# y == isnan(X), the gbdt is supposed to reach perfect accuracy on the
# training set.
n_samples = 100
n_features = 1
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
mask = rng.binomial(1, 0.5, size=X.shape).astype(bool)
X[mask] = np.nan
y = mask.ravel()
gb = HistGradientBoostingClassifier()
gb.fit(X, y)
assert gb.score(X, y) == pytest.approx(1)
@pytest.mark.parametrize("problem", ("classification", "regression"))
@pytest.mark.parametrize(
(
"missing_proportion, expected_min_score_classification, "
"expected_min_score_regression"
),
[(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)],
)
def test_missing_values_resilience(
problem,
missing_proportion,
expected_min_score_classification,
expected_min_score_regression,
):
# Make sure the estimators can deal with missing values and still yield
# decent predictions
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
if problem == "regression":
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
random_state=rng,
)
gb = HistGradientBoostingRegressor()
expected_min_score = expected_min_score_regression
else:
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_repeated=0,
random_state=rng,
)
gb = HistGradientBoostingClassifier()
expected_min_score = expected_min_score_classification
mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool)
X[mask] = np.nan
gb.fit(X, y)
assert gb.score(X, y) > expected_min_score
@pytest.mark.parametrize(
"data",
[
make_classification(random_state=0, n_classes=2),
make_classification(random_state=0, n_classes=3, n_informative=3),
],
ids=["binary_log_loss", "multiclass_log_loss"],
)
def test_zero_division_hessians(data):
# non regression test for issue #14018
# make sure we avoid zero division errors when computing the leaves values.
# If the learning rate is too high, the raw predictions are bad and will
# saturate the softmax (or sigmoid in binary classif). This leads to
# probabilities being exactly 0 or 1, gradients being constant, and
# hessians being zero.
X, y = data
gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)
gb.fit(X, y)
def test_small_trainset():
# Make sure that the small trainset is stratified and has the expected
# length (10k samples)
n_samples = 20000
original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
rng = np.random.RandomState(42)
X = rng.randn(n_samples).reshape(n_samples, 1)
y = [
[class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items()
]
y = shuffle(np.concatenate(y))
gb = HistGradientBoostingClassifier()
# Compute the small training set
X_small, y_small, *_ = gb._get_small_trainset(
X, y, seed=42, sample_weight_train=None
)
# Compute the class distribution in the small training set
unique, counts = np.unique(y_small, return_counts=True)
small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)}
# Test that the small training set has the expected length
assert X_small.shape[0] == 10000
assert y_small.shape[0] == 10000
# Test that the class distributions in the whole dataset and in the small
# training set are identical
assert small_distrib == pytest.approx(original_distrib)
def test_missing_values_minmax_imputation():
# Compare the buit-in missing value handling of Histogram GBC with an
# a-priori missing value imputation strategy that should yield the same
# results in terms of decision function.
#
# Each feature (containing NaNs) is replaced by 2 features:
# - one where the nans are replaced by min(feature) - 1
# - one where the nans are replaced by max(feature) + 1
# A split where nans go to the left has an equivalent split in the
# first (min) feature, and a split where nans go to the right has an
# equivalent split in the second (max) feature.
#
# Assuming the data is such that there is never a tie to select the best
# feature to split on during training, the learned decision trees should be
# strictly equivalent (learn a sequence of splits that encode the same
# decision function).
#
# The MinMaxImputer transformer is meant to be a toy implementation of the
# "Missing In Attributes" (MIA) missing value handling for decision trees
# https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305
# The implementation of MIA as an imputation transformer was suggested by
# "Remark 3" in :arxiv:'<1902.06931>`
class MinMaxImputer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
mm = MinMaxScaler().fit(X)
self.data_min_ = mm.data_min_
self.data_max_ = mm.data_max_
return self
def transform(self, X):
X_min, X_max = X.copy(), X.copy()
for feature_idx in range(X.shape[1]):
nan_mask = np.isnan(X[:, feature_idx])
X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
return np.concatenate([X_min, X_max], axis=1)
def make_missing_value_data(n_samples=int(1e4), seed=0):
rng = np.random.RandomState(seed)
X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng)
# Pre-bin the data to ensure a deterministic handling by the 2
# strategies and also make it easier to insert np.nan in a structured
# way:
X = KBinsDiscretizer(
n_bins=42, encode="ordinal", quantile_method="averaged_inverted_cdf"
).fit_transform(X)
# First feature has missing values completely at random:
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
# Second and third features have missing values for extreme values
# (censoring missingness):
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
# Make the last feature nan pattern very informative:
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
# Check that there is at least one missing value in each feature:
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
# Let's use a test set to check that the learned decision function is
# the same as evaluated on unseen data. Otherwise it could just be the
# case that we find two independent ways to overfit the training set.
return train_test_split(X, y, random_state=rng)
# n_samples need to be large enough to minimize the likelihood of having
# several candidate splits with the same gain value in a given tree.
X_train, X_test, y_train, y_test = make_missing_value_data(
n_samples=int(1e4), seed=0
)
# Use a small number of leaf nodes and iterations so as to keep
# under-fitting models to minimize the likelihood of ties when training the
# model.
gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)
gbm1.fit(X_train, y_train)
gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
gbm2.fit(X_train, y_train)
# Check that the model reach the same score:
assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))
assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))
# Check the individual prediction match as a finer grained
# decision function check.
assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
def test_infinite_values():
# Basic test for infinite values
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)
gbdt.fit(X, y)
np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)
def test_consistent_lengths():
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
sample_weight = np.array([0.1, 0.3, 0.1])
gbdt = HistGradientBoostingRegressor()
with pytest.raises(ValueError, match=r"sample_weight.shape == \(3,\), expected"):
gbdt.fit(X, y, sample_weight)
with pytest.raises(
ValueError, match="Found input variables with inconsistent number"
):
gbdt.fit(X, y[1:])
def test_infinite_values_missing_values():
# High level test making sure that inf and nan values are properly handled
# when both are present. This is similar to
# test_split_on_nan_with_infinite_values() in test_grower.py, though we
# cannot check the predictions for binned values here.
X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)
y_isnan = np.isnan(X.ravel())
y_isinf = X.ravel() == np.inf
stump_clf = HistGradientBoostingClassifier(
min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2
)
assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1
assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1
@pytest.mark.parametrize("scoring", [None, "loss"])
def test_string_target_early_stopping(scoring):
# Regression tests for #14709 where the targets need to be encoded before
# to compute the score
rng = np.random.RandomState(42)
X = rng.randn(100, 10)
y = np.array(["x"] * 50 + ["y"] * 50, dtype=object)
gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)
gbrt.fit(X, y)
def test_zero_sample_weights_regression():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingRegressor(min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert gb.predict([[1, 0]])[0] > 0.5
def test_zero_sample_weights_classification():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]]
y = [0, 0, 1, 0, 2]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1, 1]
gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
@pytest.mark.parametrize(
"problem", ("regression", "binary_classification", "multiclass_classification")
)
@pytest.mark.parametrize("duplication", ("half", "all"))
def test_sample_weight_effect(problem, duplication):
# High level test to make sure that duplicating a sample is equivalent to
# giving it weight of 2.
# fails for n_samples > 255 because binning does not take sample weights
# into account. Keeping n_samples <= 255 makes
# sure only unique values are used so SW have no effect on binning.
n_samples = 255
n_features = 2
if problem == "regression":
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
random_state=0,
)
Klass = HistGradientBoostingRegressor
else:
n_classes = 2 if problem == "binary_classification" else 3
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_clusters_per_class=1,
n_classes=n_classes,
random_state=0,
)
Klass = HistGradientBoostingClassifier
# This test can't pass if min_samples_leaf > 1 because that would force 2
# samples to be in the same node in est_sw, while these samples would be
# free to be separate in est_dup: est_dup would just group together the
# duplicated samples.
est = Klass(min_samples_leaf=1)
# Create dataset with duplicate and corresponding sample weights
if duplication == "half":
lim = n_samples // 2
else:
lim = n_samples
X_dup = np.r_[X, X[:lim]]
y_dup = np.r_[y, y[:lim]]
sample_weight = np.ones(shape=(n_samples))
sample_weight[:lim] = 2
est_sw = clone(est).fit(X, y, sample_weight=sample_weight)
est_dup = clone(est).fit(X_dup, y_dup)
# checking raw_predict is stricter than just predict for classification
assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup))
@pytest.mark.parametrize("Loss", (HalfSquaredError, AbsoluteError))
def test_sum_hessians_are_sample_weight(Loss):
# For losses with constant hessians, the sum_hessians field of the
# histograms must be equal to the sum of the sample weight of samples at
# the corresponding bin.
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng)
bin_mapper = _BinMapper()
X_binned = bin_mapper.fit_transform(X)
# While sample weights are supposed to be positive, this still works.
sample_weight = rng.normal(size=n_samples)
loss = Loss(sample_weight=sample_weight)
gradients, hessians = loss.init_gradient_and_hessian(
n_samples=n_samples, dtype=G_H_DTYPE
)
gradients, hessians = gradients.reshape((-1, 1)), hessians.reshape((-1, 1))
raw_predictions = rng.normal(size=(n_samples, 1))
loss.gradient_hessian(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
gradient_out=gradients,
hessian_out=hessians,
n_threads=n_threads,
)
# build sum_sample_weight which contains the sum of the sample weights at
# each bin (for each feature). This must be equal to the sum_hessians
# field of the corresponding histogram
sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))
for feature_idx in range(n_features):
for sample_idx in range(n_samples):
sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[
sample_idx
]
# Build histogram
grower = TreeGrower(
X_binned, gradients[:, 0], hessians[:, 0], n_bins=bin_mapper.n_bins
)
histograms = grower.histogram_builder.compute_histograms_brute(
grower.root.sample_indices
)
for feature_idx in range(n_features):
for bin_idx in range(bin_mapper.n_bins):
assert histograms[feature_idx, bin_idx]["sum_hessians"] == (
pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5)
)
def test_max_depth_max_leaf_nodes():
# Non regression test for
# https://github.com/scikit-learn/scikit-learn/issues/16179
# there was a bug when the max_depth and the max_leaf_nodes criteria were
# met at the same time, which would lead to max_leaf_nodes not being
# respected.
X, y = make_classification(random_state=0)
est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit(
X, y
)
tree = est._predictors[0][0]
assert tree.get_max_depth() == 2
assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix
def test_early_stopping_on_test_set_with_warm_start():
# Non regression test for #16661 where second fit fails with
# warm_start=True, early_stopping is on, and no validation set
X, y = make_classification(random_state=0)
gb = HistGradientBoostingClassifier(
max_iter=1,
scoring="loss",
warm_start=True,
early_stopping=True,
n_iter_no_change=1,
validation_fraction=None,
)
gb.fit(X, y)
# does not raise on second call
gb.set_params(max_iter=2)
gb.fit(X, y)
def test_early_stopping_with_sample_weights(monkeypatch):
"""Check that sample weights is passed in to the scorer and _raw_predict is not
called."""
mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
def mock_check_scoring(estimator, scoring):
assert scoring == "neg_median_absolute_error"
return mock_scorer
monkeypatch.setattr(hgb_module, "check_scoring", mock_check_scoring)
X, y = make_regression(random_state=0)
sample_weight = np.ones_like(y)
hist = HistGradientBoostingRegressor(
max_iter=2,
early_stopping=True,
random_state=0,
scoring="neg_median_absolute_error",
)
mock_raw_predict = Mock(side_effect=hist._raw_predict)
hist._raw_predict = mock_raw_predict
hist.fit(X, y, sample_weight=sample_weight)
# _raw_predict should never be called with scoring as a string
assert mock_raw_predict.call_count == 0
# For scorer is called twice (train and val) for the baseline score, and twice
# per iteration (train and val) after that. So 6 times in total for `max_iter=2`.
assert mock_scorer.call_count == 6
for arg_list in mock_scorer.call_args_list:
assert "sample_weight" in arg_list[1]
def test_raw_predict_is_called_with_custom_scorer():
"""Custom scorer will still call _raw_predict."""
mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
X, y = make_regression(random_state=0)
hist = HistGradientBoostingRegressor(
max_iter=2,
early_stopping=True,
random_state=0,
scoring=mock_scorer,
)
mock_raw_predict = Mock(side_effect=hist._raw_predict)
hist._raw_predict = mock_raw_predict
hist.fit(X, y)
# `_raw_predict` and scorer is called twice (train and val) for the baseline score,
# and twice per iteration (train and val) after that. So 6 times in total for
# `max_iter=2`.
assert mock_raw_predict.call_count == 6
assert mock_scorer.call_count == 6
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_single_node_trees(Est):
# Make sure it's still possible to build single-node trees. In that case
# the value of the root is set to 0. That's a correct value: if the tree is
# single-node that's because min_gain_to_split is not respected right from
# the root, so we don't want the tree to have any impact on the
# predictions.
X, y = make_classification(random_state=0)
y[:] = 1 # constant target will lead to a single root node
est = Est(max_iter=20)
est.fit(X, y)
assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors)
assert all(predictor[0].nodes[0]["value"] == 0 for predictor in est._predictors)
# Still gives correct predictions thanks to the baseline prediction
assert_allclose(est.predict(X), y)
@pytest.mark.parametrize(
"Est, loss, X, y",
[
(
HistGradientBoostingClassifier,
HalfBinomialLoss(sample_weight=None),
X_classification,
y_classification,
),
(
HistGradientBoostingRegressor,
HalfSquaredError(sample_weight=None),
X_regression,
y_regression,
),
],
)
def test_custom_loss(Est, loss, X, y):
est = Est(loss=loss, max_iter=20)
est.fit(X, y)
@pytest.mark.parametrize(
"HistGradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
(
HistGradientBoostingClassifier,
X_multi_classification,
y_multi_classification,
),
],
)
def test_staged_predict(HistGradientBoosting, X, y):
# Test whether staged predictor eventually gives
# the same prediction.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0
)
gb = HistGradientBoosting(max_iter=10)
# test raise NotFittedError if not fitted
with pytest.raises(NotFittedError):
next(gb.staged_predict(X_test))
gb.fit(X_train, y_train)
# test if the staged predictions of each iteration
# are equal to the corresponding predictions of the same estimator
# trained from scratch.
# this also test limit case when max_iter = 1
method_names = (
["predict"]
if is_regressor(gb)
else ["predict", "predict_proba", "decision_function"]
)
for method_name in method_names:
staged_method = getattr(gb, "staged_" + method_name)
staged_predictions = list(staged_method(X_test))
assert len(staged_predictions) == gb.n_iter_
for n_iter, staged_predictions in enumerate(staged_method(X_test), 1):
aux = HistGradientBoosting(max_iter=n_iter)
aux.fit(X_train, y_train)
pred_aux = getattr(aux, method_name)(X_test)
assert_allclose(staged_predictions, pred_aux)
assert staged_predictions.shape == pred_aux.shape
@pytest.mark.parametrize("insert_missing", [False, True])
@pytest.mark.parametrize(
"Est", (HistGradientBoostingRegressor, HistGradientBoostingClassifier)
)
@pytest.mark.parametrize("bool_categorical_parameter", [True, False])
@pytest.mark.parametrize("missing_value", [np.nan, -1])
def test_unknown_categories_nan(
insert_missing, Est, bool_categorical_parameter, missing_value
):
# Make sure no error is raised at predict if a category wasn't seen during
# fit. We also make sure they're treated as nans.
rng = np.random.RandomState(0)
n_samples = 1000
f1 = rng.rand(n_samples)
f2 = rng.randint(4, size=n_samples)
X = np.c_[f1, f2]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
if bool_categorical_parameter:
categorical_features = [False, True]
else:
categorical_features = [1]
if insert_missing:
mask = rng.binomial(1, 0.01, size=X.shape).astype(bool)
assert mask.sum() > 0
X[mask] = missing_value
est = Est(max_iter=20, categorical_features=categorical_features).fit(X, y)
assert_array_equal(est.is_categorical_, [False, True])
# Make sure no error is raised on unknown categories and nans
# unknown categories will be treated as nans
X_test = np.zeros((10, X.shape[1]), dtype=float)
X_test[:5, 1] = 30
X_test[5:, 1] = missing_value
assert len(np.unique(est.predict(X_test))) == 1
def test_categorical_encoding_strategies():
# Check native categorical handling vs different encoding strategies. We
# make sure that native encoding needs only 1 split to achieve a perfect
# prediction on a simple dataset. In contrast, OneHotEncoded data needs
# more depth / splits, and treating categories as ordered (just using
# OrdinalEncoder) requires even more depth.
# dataset with one random continuous feature, and one categorical feature
# with values in [0, 5], e.g. from an OrdinalEncoder.
# class == 1 iff categorical value in {0, 2, 4}
rng = np.random.RandomState(0)
n_samples = 10_000
f1 = rng.rand(n_samples)
f2 = rng.randint(6, size=n_samples)
X = np.c_[f1, f2]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
# make sure dataset is balanced so that the baseline_prediction doesn't
# influence predictions too much with max_iter = 1
assert 0.49 < y.mean() < 0.51
native_cat_specs = [
[False, True],
[1],
]
try:
import pandas as pd
X = pd.DataFrame(X, columns=["f_0", "f_1"])
native_cat_specs.append(["f_1"])
except ImportError:
pass
for native_cat_spec in native_cat_specs:
clf_cat = HistGradientBoostingClassifier(
max_iter=1, max_depth=1, categorical_features=native_cat_spec
)
clf_cat.fit(X, y)
# Using native categorical encoding, we get perfect predictions with just
# one split
assert cross_val_score(clf_cat, X, y).mean() == 1
# quick sanity check for the bitset: 0, 2, 4 = 2**0 + 2**2 + 2**4 = 21
expected_left_bitset = [21, 0, 0, 0, 0, 0, 0, 0]
left_bitset = clf_cat.fit(X, y)._predictors[0][0].raw_left_cat_bitsets[0]
assert_array_equal(left_bitset, expected_left_bitset)
# Treating categories as ordered, we need more depth / more splits to get
# the same predictions
clf_no_cat = HistGradientBoostingClassifier(
max_iter=1, max_depth=4, categorical_features=None
)
assert cross_val_score(clf_no_cat, X, y).mean() < 0.9
clf_no_cat.set_params(max_depth=5)
assert cross_val_score(clf_no_cat, X, y).mean() == 1
# Using OHEd data, we need less splits than with pure OEd data, but we
# still need more splits than with the native categorical splits
ct = make_column_transformer(
(OneHotEncoder(sparse_output=False), [1]), remainder="passthrough"
)
X_ohe = ct.fit_transform(X)
clf_no_cat.set_params(max_depth=2)
assert cross_val_score(clf_no_cat, X_ohe, y).mean() < 0.9
clf_no_cat.set_params(max_depth=3)
assert cross_val_score(clf_no_cat, X_ohe, y).mean() == 1
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
@pytest.mark.parametrize(
"categorical_features, monotonic_cst, expected_msg",
[
(
[b"hello", b"world"],
None,
re.escape(
"categorical_features must be an array-like of bool, int or str, "
"got: bytes40."
),
),
(
np.array([b"hello", 1.3], dtype=object),
None,
re.escape(
"categorical_features must be an array-like of bool, int or str, "
"got: bytes, float."
),
),
(
[0, -1],
None,
re.escape(
"categorical_features set as integer indices must be in "
"[0, n_features - 1]"
),
),
(
[True, True, False, False, True],
None,
re.escape(
"categorical_features set as a boolean mask must have shape "
"(n_features,)"
),
),
(
[True, True, False, False],
[0, -1, 0, 1],
"Categorical features cannot have monotonic constraints",
),
],
)
def test_categorical_spec_errors(
Est, categorical_features, monotonic_cst, expected_msg
):
# Test errors when categories are specified incorrectly
n_samples = 100
X, y = make_classification(random_state=0, n_features=4, n_samples=n_samples)
rng = np.random.RandomState(0)
X[:, 0] = rng.randint(0, 10, size=n_samples)
X[:, 1] = rng.randint(0, 10, size=n_samples)
est = Est(categorical_features=categorical_features, monotonic_cst=monotonic_cst)
with pytest.raises(ValueError, match=expected_msg):
est.fit(X, y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_categorical_spec_errors_with_feature_names(Est):
pd = pytest.importorskip("pandas")
n_samples = 10
X = pd.DataFrame(
{
"f0": range(n_samples),
"f1": range(n_samples),
"f2": [1.0] * n_samples,
}
)
y = [0, 1] * (n_samples // 2)
est = Est(categorical_features=["f0", "f1", "f3"])
expected_msg = re.escape(
"categorical_features has an item value 'f3' which is not a valid "
"feature name of the training data."
)
with pytest.raises(ValueError, match=expected_msg):
est.fit(X, y)
est = Est(categorical_features=["f0", "f1"])
expected_msg = re.escape(
"categorical_features should be passed as an array of integers or "
"as a boolean mask when the model is fitted on data without feature "
"names."
)
with pytest.raises(ValueError, match=expected_msg):
est.fit(X.to_numpy(), y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
@pytest.mark.parametrize("categorical_features", ([False, False], []))
@pytest.mark.parametrize("as_array", (True, False))
def test_categorical_spec_no_categories(Est, categorical_features, as_array):
# Make sure we can properly detect that no categorical features are present
# even if the categorical_features parameter is not None
X = np.arange(10).reshape(5, 2)
y = np.arange(5)
if as_array:
categorical_features = np.asarray(categorical_features)
est = Est(categorical_features=categorical_features).fit(X, y)
assert est.is_categorical_ is None
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
@pytest.mark.parametrize(
"use_pandas, feature_name", [(False, "at index 0"), (True, "'f0'")]
)
def test_categorical_bad_encoding_errors(Est, use_pandas, feature_name):
# Test errors when categories are encoded incorrectly
gb = Est(categorical_features=[True], max_bins=2)
if use_pandas:
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"f0": [0, 1, 2]})
else:
X = np.array([[0, 1, 2]]).T
y = np.arange(3)
msg = (
f"Categorical feature {feature_name} is expected to have a "
"cardinality <= 2 but actually has a cardinality of 3."
)
with pytest.raises(ValueError, match=msg):
gb.fit(X, y)
# nans are ignored in the counts
X = np.array([[0, 1, np.nan]]).T
y = np.arange(3)
gb.fit(X, y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_uint8_predict(Est):
# Non regression test for
# https://github.com/scikit-learn/scikit-learn/issues/18408
# Make sure X can be of dtype uint8 (i.e. X_BINNED_DTYPE) in predict. It
# will be converted to X_DTYPE.
rng = np.random.RandomState(0)
X = rng.randint(0, 100, size=(10, 2)).astype(np.uint8)
y = rng.randint(0, 2, size=10).astype(np.uint8)
est = Est()
est.fit(X, y)
est.predict(X)
@pytest.mark.parametrize(
"interaction_cst, n_features, result",
[
(None, 931, None),
([{0, 1}], 2, [{0, 1}]),
("pairwise", 2, [{0, 1}]),
("pairwise", 4, [{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}]),
("no_interactions", 2, [{0}, {1}]),
("no_interactions", 4, [{0}, {1}, {2}, {3}]),
([(1, 0), [5, 1]], 6, [{0, 1}, {1, 5}, {2, 3, 4}]),
],
)
def test_check_interaction_cst(interaction_cst, n_features, result):
"""Check that _check_interaction_cst returns the expected list of sets"""
est = HistGradientBoostingRegressor()
est.set_params(interaction_cst=interaction_cst)
assert est._check_interaction_cst(n_features) == result
def test_interaction_cst_numerically():
"""Check that interaction constraints have no forbidden interactions."""
rng = np.random.RandomState(42)
n_samples = 1000
X = rng.uniform(size=(n_samples, 2))
# Construct y with a strong interaction term
# y = x0 + x1 + 5 * x0 * x1
y = np.hstack((X, 5 * X[:, [0]] * X[:, [1]])).sum(axis=1)
est = HistGradientBoostingRegressor(random_state=42)
est.fit(X, y)
est_no_interactions = HistGradientBoostingRegressor(
interaction_cst=[{0}, {1}], random_state=42
)
est_no_interactions.fit(X, y)
delta = 0.25
# Make sure we do not extrapolate out of the training set as tree-based estimators
# are very bad in doing so.
X_test = X[(X[:, 0] < 1 - delta) & (X[:, 1] < 1 - delta)]
X_delta_d_0 = X_test + [delta, 0]
X_delta_0_d = X_test + [0, delta]
X_delta_d_d = X_test + [delta, delta]
# Note: For the y from above as a function of x0 and x1, we have
# y(x0+d, x1+d) = y(x0, x1) + 5 * d * (2/5 + x0 + x1) + 5 * d**2
# y(x0+d, x1) = y(x0, x1) + 5 * d * (1/5 + x1)
# y(x0, x1+d) = y(x0, x1) + 5 * d * (1/5 + x0)
# Without interaction constraints, we would expect a result of 5 * d**2 for the
# following expression, but zero with constraints in place.
assert_allclose(
est_no_interactions.predict(X_delta_d_d)
+ est_no_interactions.predict(X_test)
- est_no_interactions.predict(X_delta_d_0)
- est_no_interactions.predict(X_delta_0_d),
0,
atol=1e-12,
)
# Correct result of the expressions is 5 * delta**2. But this is hard to achieve by
# a fitted tree-based model. However, with 100 iterations the expression should
# at least be positive!
assert np.all(
est.predict(X_delta_d_d)
+ est.predict(X_test)
- est.predict(X_delta_d_0)
- est.predict(X_delta_0_d)
> 0.01
)
def test_no_user_warning_with_scoring():
"""Check that no UserWarning is raised when scoring is set.
Non-regression test for #22907.
"""
pd = pytest.importorskip("pandas")
X, y = make_regression(n_samples=50, random_state=0)
X_df = pd.DataFrame(X, columns=[f"col{i}" for i in range(X.shape[1])])
est = HistGradientBoostingRegressor(
random_state=0, scoring="neg_mean_absolute_error", early_stopping=True
)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
est.fit(X_df, y)
def test_class_weights():
"""High level test to check class_weights."""
n_samples = 255
n_features = 2
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_clusters_per_class=1,
n_classes=2,
random_state=0,
)
y_is_1 = y == 1
# class_weight is the same as sample weights with the corresponding class
clf = HistGradientBoostingClassifier(
min_samples_leaf=2, random_state=0, max_depth=2
)
sample_weight = np.ones(shape=(n_samples))
sample_weight[y_is_1] = 3.0
clf.fit(X, y, sample_weight=sample_weight)
class_weight = {0: 1.0, 1: 3.0}
clf_class_weighted = clone(clf).set_params(class_weight=class_weight)
clf_class_weighted.fit(X, y)
assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X))
# Check that sample_weight and class_weight are multiplicative
clf.fit(X, y, sample_weight=sample_weight**2)
clf_class_weighted.fit(X, y, sample_weight=sample_weight)
assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X))
# Make imbalanced dataset
X_imb = np.concatenate((X[~y_is_1], X[y_is_1][:10]))
y_imb = np.concatenate((y[~y_is_1], y[y_is_1][:10]))
# class_weight="balanced" is the same as sample_weights to be
# inversely proportional to n_samples / (n_classes * np.bincount(y))
clf_balanced = clone(clf).set_params(class_weight="balanced")
clf_balanced.fit(X_imb, y_imb)
class_weight = y_imb.shape[0] / (2 * np.bincount(y_imb))
sample_weight = class_weight[y_imb]
clf_sample_weight = clone(clf).set_params(class_weight=None)
clf_sample_weight.fit(X_imb, y_imb, sample_weight=sample_weight)
assert_allclose(
clf_balanced.decision_function(X_imb),
clf_sample_weight.decision_function(X_imb),
)
def test_unknown_category_that_are_negative():
"""Check that unknown categories that are negative does not error.
Non-regression test for #24274.
"""
rng = np.random.RandomState(42)
n_samples = 1000
X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
hist = HistGradientBoostingRegressor(
random_state=0,
categorical_features=[False, True],
max_iter=10,
).fit(X, y)
# Check that negative values from the second column are treated like a
# missing category
X_test_neg = np.asarray([[1, -2], [3, -4]])
X_test_nan = np.asarray([[1, np.nan], [3, np.nan]])
assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan))
@pytest.mark.parametrize(
("GradientBoosting", "make_X_y"),
[
(HistGradientBoostingClassifier, make_classification),
(HistGradientBoostingRegressor, make_regression),
],
)
@pytest.mark.parametrize("sample_weight", [False, True])
def test_X_val_in_fit(GradientBoosting, make_X_y, sample_weight, global_random_seed):
"""Test that passing X_val, y_val in fit is same as validation fraction."""
rng = np.random.RandomState(42)
n_samples = 100
X, y = make_X_y(n_samples=n_samples, random_state=rng)
if sample_weight:
sample_weight = np.abs(rng.normal(size=n_samples))
data = (X, y, sample_weight)
else:
sample_weight = None
data = (X, y)
rng_seed = global_random_seed
# Fit with validation fraction and early stopping.
m1 = GradientBoosting(
early_stopping=True,
validation_fraction=0.5,
random_state=rng_seed,
)
m1.fit(X, y, sample_weight)
# Do train-test split ourselves.
rng = check_random_state(rng_seed)
# We do the same as in the fit method.
stratify = y if isinstance(m1, HistGradientBoostingClassifier) else None
random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
X_train, X_val, y_train, y_val, *sw = train_test_split(
*data,
test_size=0.5,
stratify=stratify,
random_state=random_seed,
)
if sample_weight is not None:
sample_weight_train = sw[0]
sample_weight_val = sw[1]
else:
sample_weight_train = None
sample_weight_val = None
m2 = GradientBoosting(
early_stopping=True,
random_state=rng_seed,
)
m2.fit(
X_train,
y_train,
sample_weight=sample_weight_train,
X_val=X_val,
y_val=y_val,
sample_weight_val=sample_weight_val,
)
assert_allclose(m2.n_iter_, m1.n_iter_)
assert_allclose(m2.predict(X), m1.predict(X))
def test_X_val_raises_missing_y_val():
"""Test that an error is raised if X_val given but y_val None."""
X, y = make_classification(n_samples=4)
X, X_val = X[:2], X[2:]
y, y_val = y[:2], y[2:]
with pytest.raises(
ValueError,
match="X_val is provided, but y_val was not provided",
):
HistGradientBoostingClassifier().fit(X, y, X_val=X_val)
with pytest.raises(
ValueError,
match="y_val is provided, but X_val was not provided",
):
HistGradientBoostingClassifier().fit(X, y, y_val=y_val)
def test_X_val_raises_with_early_stopping_false():
"""Test that an error is raised if X_val given but early_stopping is False."""
X, y = make_regression(n_samples=4)
X, X_val = X[:2], X[2:]
y, y_val = y[:2], y[2:]
with pytest.raises(
ValueError,
match="X_val and y_val are passed to fit while at the same time",
):
HistGradientBoostingRegressor(early_stopping=False).fit(
X, y, X_val=X_val, y_val=y_val
)
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
@pytest.mark.parametrize(
"HistGradientBoosting",
[HistGradientBoostingClassifier, HistGradientBoostingRegressor],
)
def test_dataframe_categorical_results_same_as_ndarray(
dataframe_lib, HistGradientBoosting
):
"""Check that pandas categorical give the same results as ndarray."""
pytest.importorskip(dataframe_lib)
rng = np.random.RandomState(42)
n_samples = 5_000
n_cardinality = 50
max_bins = 100
f_num = rng.rand(n_samples)
f_cat = rng.randint(n_cardinality, size=n_samples)
# Make f_cat an informative feature
y = (f_cat % 3 == 0) & (f_num > 0.2)
X = np.c_[f_num, f_cat]
f_cat = [f"cat{c:0>3}" for c in f_cat]
X_df = _convert_container(
np.asarray([f_num, f_cat]).T,
dataframe_lib,
["f_num", "f_cat"],
categorical_feature_names=["f_cat"],
)
X_train, X_test, X_train_df, X_test_df, y_train, y_test = train_test_split(
X, X_df, y, random_state=0
)
hist_kwargs = dict(max_iter=10, max_bins=max_bins, random_state=0)
hist_np = HistGradientBoosting(categorical_features=[False, True], **hist_kwargs)
hist_np.fit(X_train, y_train)
hist_pd = HistGradientBoosting(categorical_features="from_dtype", **hist_kwargs)
hist_pd.fit(X_train_df, y_train)
# Check categories are correct and sorted
categories = hist_pd._preprocessor.named_transformers_["encoder"].categories_[0]
assert_array_equal(categories, np.unique(f_cat))
assert len(hist_np._predictors) == len(hist_pd._predictors)
for predictor_1, predictor_2 in zip(hist_np._predictors, hist_pd._predictors):
assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
score_np = hist_np.score(X_test, y_test)
score_pd = hist_pd.score(X_test_df, y_test)
assert score_np == pytest.approx(score_pd)
assert_allclose(hist_np.predict(X_test), hist_pd.predict(X_test_df))
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
@pytest.mark.parametrize(
"HistGradientBoosting",
[HistGradientBoostingClassifier, HistGradientBoostingRegressor],
)
def test_dataframe_categorical_errors(dataframe_lib, HistGradientBoosting):
"""Check error cases for pandas categorical feature."""
pytest.importorskip(dataframe_lib)
msg = "Categorical feature 'f_cat' is expected to have a cardinality <= 16"
hist = HistGradientBoosting(categorical_features="from_dtype", max_bins=16)
rng = np.random.RandomState(42)
f_cat = rng.randint(0, high=100, size=100).astype(str)
X_df = _convert_container(
f_cat[:, None], dataframe_lib, ["f_cat"], categorical_feature_names=["f_cat"]
)
y = rng.randint(0, high=2, size=100)
with pytest.raises(ValueError, match=msg):
hist.fit(X_df, y)
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
def test_categorical_different_order_same_model(dataframe_lib):
"""Check that the order of the categorical gives same model."""
pytest.importorskip(dataframe_lib)
rng = np.random.RandomState(42)
n_samples = 1_000
f_ints = rng.randint(low=0, high=2, size=n_samples)
# Construct a target with some noise
y = f_ints.copy()
flipped = rng.choice([True, False], size=n_samples, p=[0.1, 0.9])
y[flipped] = 1 - y[flipped]
# Construct categorical where 0 -> A and 1 -> B and 1 -> A and 0 -> B
f_cat_a_b = np.asarray(["A", "B"])[f_ints]
f_cat_b_a = np.asarray(["B", "A"])[f_ints]
df_a_b = _convert_container(
f_cat_a_b[:, None],
dataframe_lib,
["f_cat"],
categorical_feature_names=["f_cat"],
)
df_b_a = _convert_container(
f_cat_b_a[:, None],
dataframe_lib,
["f_cat"],
categorical_feature_names=["f_cat"],
)
hist_a_b = HistGradientBoostingClassifier(
categorical_features="from_dtype", random_state=0
)
hist_b_a = HistGradientBoostingClassifier(
categorical_features="from_dtype", random_state=0
)
hist_a_b.fit(df_a_b, y)
hist_b_a.fit(df_b_a, y)
assert len(hist_a_b._predictors) == len(hist_b_a._predictors)
for predictor_1, predictor_2 in zip(hist_a_b._predictors, hist_b_a._predictors):
assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
def get_different_bitness_node_ndarray(node_ndarray):
new_dtype_for_indexing_fields = np.int64 if _IS_32BIT else np.int32
# field names in Node struct with np.intp types (see
# sklearn/ensemble/_hist_gradient_boosting/common.pyx)
indexing_field_names = ["feature_idx"]
new_dtype_dict = {
name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items()
}
for name in indexing_field_names:
new_dtype_dict[name] = new_dtype_for_indexing_fields
new_dtype = np.dtype(
{"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())}
)
return node_ndarray.astype(new_dtype, casting="same_kind")
def reduce_predictor_with_different_bitness(predictor):
cls, args, state = predictor.__reduce__()
new_state = state.copy()
new_state["nodes"] = get_different_bitness_node_ndarray(new_state["nodes"])
return (cls, args, new_state)
def test_different_bitness_pickle():
X, y = make_classification(random_state=0)
clf = HistGradientBoostingClassifier(random_state=0, max_depth=3)
clf.fit(X, y)
score = clf.score(X, y)
def pickle_dump_with_different_bitness():
f = io.BytesIO()
p = pickle.Pickler(f)
p.dispatch_table = copyreg.dispatch_table.copy()
p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness
p.dump(clf)
f.seek(0)
return f
# Simulate loading a pickle of the same model trained on a platform with different
# bitness that than the platform it will be used to make predictions on:
new_clf = pickle.load(pickle_dump_with_different_bitness())
new_score = new_clf.score(X, y)
assert score == pytest.approx(new_score)
def test_different_bitness_joblib_pickle():
# Make sure that a platform specific pickle generated on a 64 bit
# platform can be converted at pickle load time into an estimator
# with Cython code that works with the host's native integer precision
# to index nodes in the tree data structure when the host is a 32 bit
# platform (and vice versa).
#
# This is in particular useful to be able to train a model on a 64 bit Linux
# server and deploy the model as part of a (32 bit) WASM in-browser
# application using pyodide.
X, y = make_classification(random_state=0)
clf = HistGradientBoostingClassifier(random_state=0, max_depth=3)
clf.fit(X, y)
score = clf.score(X, y)
def joblib_dump_with_different_bitness():
f = io.BytesIO()
p = NumpyPickler(f)
p.dispatch_table = copyreg.dispatch_table.copy()
p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness
p.dump(clf)
f.seek(0)
return f
new_clf = joblib.load(joblib_dump_with_different_bitness())
new_score = new_clf.score(X, y)
assert score == pytest.approx(new_score)
def test_pandas_nullable_dtype():
# Non regression test for https://github.com/scikit-learn/scikit-learn/issues/28317
pd = pytest.importorskip("pandas")
rng = np.random.default_rng(0)
X = pd.DataFrame({"a": rng.integers(10, size=100)}).astype(pd.Int64Dtype())
y = rng.integers(2, size=100)
clf = HistGradientBoostingClassifier()
clf.fit(X, y)
|
python
|
github
|
https://github.com/scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
# Copyright (C) 2016 A10 Networks Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from a10_openstack_lib.resources import validators
EXTENSION = 'a10-scaling-group'
SERVICE = "A10_SCALING_GROUP"
SCALING_GROUPS = 'a10_scaling_groups'
SCALING_GROUP = 'a10_scaling_group'
SCALING_GROUP_WORKERS = 'a10_scaling_group_workers'
SCALING_GROUP_WORKER = 'a10_scaling_group_worker'
SCALING_POLICIES = 'a10_scaling_policies'
SCALING_POLICY = 'a10_scaling_policy'
SCALING_ALARMS = 'a10_scaling_alarms'
SCALING_ALARM = 'a10_scaling_alarm'
SCALING_ACTIONS = 'a10_scaling_actions'
SCALING_ACTION = 'a10_scaling_action'
ALARM_UNITS = ['count', 'percentage', 'bytes']
ALARM_AGGREGATIONS = ['avg', 'min', 'max', 'sum']
ALARM_MEASUREMENTS = ['connections', 'memory', 'cpu', 'interface']
ALARM_OPERATORS = ['>=', '>', '<=', '<']
ALARM_PERIOD_UNITS = ['minute', 'hour', 'day']
ACTIONS = ['scale-in', 'scale-out']
RESOURCE_ATTRIBUTE_MAP = {
SCALING_GROUPS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'scaling_policy_id': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:a10_nullable': {
'type:uuid': None,
'type:a10_reference': SCALING_POLICY
}
},
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_GROUP_WORKERS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'scaling_group_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:uuid': None,
'type:a10_reference': SCALING_GROUP
},
'is_visible': True
},
'host': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True
},
'username': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True
},
'password': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': False
},
'api_version': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:values': ['2.1', '3.0']
},
'is_visible': True
},
'protocol': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:values': ['http', 'https']
},
'convert_to': lambda attr: validators.convert_to_lower,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'port': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:range': [0, 65535]
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'nova_instance_id': {
'allow_post': False,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'nova_flavor': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:string': None
},
'is_visible': False,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_POLICIES: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'cooldown': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': 300,
},
'min_instances': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': 1,
},
'max_instances': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:a10_nullable': {
'type:non_negative': None
}
},
'convert_to': lambda attr: validators.convert_nullable(attr.convert_to_int),
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'reactions': {
'allow_post': True,
'allow_put': True,
'convert_list_to': lambda attr: attr.convert_kvp_list_to_dict,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_ALARMS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'aggregation': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['avg', 'min', 'max', 'sum']
},
'is_visible': True,
'convert_to': lambda attr: validators.convert_to_lower,
'default': 'avg'
},
'measurement': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['connections', 'memory', 'cpu', 'interface']
},
'convert_to': lambda attr: validators.convert_to_lower,
'is_visible': True
},
'operator': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['>=', '>', '<=', '<']
},
'is_visible': True
},
'threshold': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:a10_float': None
},
'convert_to': lambda attr: validators.convert_to_float,
'is_visible': True
},
'unit': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['count', 'percentage', 'bytes']
},
'convert_to': lambda attr: validators.convert_to_lower,
'is_visible': True
},
'period': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
},
'period_unit': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['minute', 'hour', 'day']
},
'convert_to': lambda attr: validators.convert_to_lower,
'is_visible': True
}
},
SCALING_ACTIONS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'action': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['scale-in', 'scale-out']
},
'convert_to': lambda attr: validators.convert_to_lower,
'is_visible': True
},
'amount': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
},
}
}
VALIDATORS = validators.VALIDATORS
|
unknown
|
codeparrot/codeparrot-clean
| ||
import re
def extract_features(node):
features = []
features.append("text:" + node.text.lower())
features.append("tag:" + node.label)
if node.previousLeaf:
features.append("text_p:" + node.previousLeaf.text.lower())
features.append("text_cp:" + node.text.lower() + "_" + node.previousLeaf.text.lower())
features.append("tag_p:" + node.previousLeaf.label)
features.append("tag_cp:" + node.previousLeaf.label + "_" + node.label)
if node.nextLeaf:
features.append("text_n:" + node.nextLeaf.text.lower())
features.append("text_cn:" + node.text.lower() + "_" + node.nextLeaf.text.lower())
features.append("tag_n:" + node.nextLeaf.label)
features.append("tag_cn:" + node.label + "_" + node.nextLeaf.label)
features.append("index:%d" % (10.0 * node.leaf_index / len(node.root.leaves)))
features.append("parent:" + node.parent.label)
if node.parent.previousSibling:
features.append("parent_p:" + node.parent.previousSibling.label)
else:
features.append("parent_p:none")
if node.parent.nextSibling:
features.append("parent_p:" + node.parent.nextSibling.label)
else:
features.append("parent_n:none")
features.append("parent_index:%d" % node.parent.index)
if node.leaf_index < len(node.root.leaves) / 2:
features.append("first_half:1")
count = 0
for peer in node.root:
if peer.label.startswith("NP") and peer.leaves[-1].leaf_index < node.leaf_index:
common = peer.getCommonParent(node)
features.append("np_common:%s" % common.label)
features.append("np_before_common:%s" % common.label)
features.append("np_before_leaf_first:%s" % peer.leaves[0].label)
features.append("np_before_leaf_last:%s" % peer.leaves[-1].label)
count += 1
features.append("np_before:%d" % count)
count = 0
for peer in node.root:
if peer.label.startswith("NP") and peer.leaves[0].leaf_index > node.leaf_index:
common = peer.getCommonParent(node)
features.append("np_common:%s" % common.label)
features.append("np_after_common:%s" % common.label)
features.append("np_after_leaf_first:%s" % peer.leaves[0].label)
features.append("np_after_leaf_last:%s" % peer.leaves[-1].label)
count += 1
features.append("np_after:%d" % count)
count = 0
for peer in node.root.leaves:
if re.match(r'(\'\'|``|")', peer.text) and peer.leaf_index > node.leaf_index:
count += 1
count = 0
for peer in node.root.leaves:
if re.match(r'(\'\'|``|")', peer.text) and peer.leaf_index > node.leaf_index:
count += 1
features.append("quotes_after:%d" % count)
count = 0
for peer in node.root.leaves:
if re.match(r'(\'\'|``|")', peer.text) and peer.leaf_index < node.leaf_index:
count += 1
features.append("quotes_before:%d" % count)
prp_before = 0
for peer in node.root.leaves:
if peer.label.startswith("PRP") and peer.leaf_index < node.leaf_index:
prp_before += 1
features.append("prp_before:%d" % count)
repeats = 0
for peer in node.root.leaves:
if peer.label.startswith("PRP") and peer.text.lower() == node.text.lower() and peer.leaf_index < node.leaf_index:
repeats += 1
features.append("repeats:%d" % repeats)
if repeats == 0 and prp_before > 0:
features.append("other_prp_before:1")
features.append("length:%d" % (len(node.root.leaves) / 5))
features = [x.replace(",", "<COMMA>") for x in features]
return features
if __name__ == "__main__":
import icsiboost, sys, treenode
if len(sys.argv) != 3:
sys.stderr.write('USAGE: %s <model> <threshold>\n')
sys.exit(1)
classifier = icsiboost.Classifier(sys.argv[1])
threshold = float(sys.argv[2])
for tree in treenode.TreeNode.readAllTrees(sys.stdin):
max = 0
for node in tree.leaves:
if node.label.startswith('PRP'):
features = extract_features(node)
posteriors = classifier.compute_posteriors([features])
if posteriors[0] > max:
max = posteriors[0]
print max
|
unknown
|
codeparrot/codeparrot-clean
| ||
from ImageScripter import *
from elan import *
ScriptSettings.Threshold = .96
#Messaging#################################################
Say("Checking the messaging page")
Configurator.messaging.Click()
#Configurator.messagingpage.SetThreshold(.98)
Configurator.messagingpage.Wait(seconds=10)
Say("Checking the messaging page looks great")
################################################
Configurator.globaloptions.Click()
#Configurator.messagingglobaloptionspage.SetThreshold(.98)
Configurator.messagingglobaloptionspage.Wait(seconds=10)
Configurator.communicationdevices.Click()
#Configurator.messagingcommunicationdevicespage.SetThreshold(.98)
Configurator.messagingcommunicationdevicespage.Wait(seconds=10)
Configurator.telephonesystems.Click()
#Configurator.telephonesystemspage.SetThreshold(.98)
Configurator.telephonesystemspage.Wait(seconds=10)
Configurator.intercomdevices.Click()
#Configurator.intercomdevicespage.SetThreshold(.98)
Configurator.intercomdevicespage.Wait(seconds=10)
Configurator.voicemailboxes.Click()
#Configurator.voicemailboxespage.SetThreshold(.98)
Configurator.voicemailboxespage.Wait(seconds=10)
Configurator.house.Click()
#Configurator.housepage.SetThreshold(.98)
Configurator.housepage.Wait(seconds=10)
Configurator.emailaccounts.Click()
#Configurator.emailaccountspage.SetThreshold(.98)
Configurator.emailaccountspage.Wait(seconds=10)
Configurator.emailmessagesoutbound.Click()
#Configurator.emailmessagesoutboundpage.SetThreshold(.98)
Configurator.emailmessagesoutboundpage.Wait(seconds=10)
Configurator.remotesystems.Click()
#Configurator.remotesystemspage.SetThreshold(.98)
Configurator.remotesystemspage.Wait(seconds=10)
Configurator.doorbell.Click()
#Configurator.doorbellpage.SetThreshold(.98)
Configurator.doorbellpage.Wait(seconds=10)
Configurator.pushmessages.Click()
#Configurator.pushmessagespage.SetThreshold(.98)
Configurator.pushmessagespage.Wait(seconds=10)
Configurator.calendars.Click()
#Configurator.calendarspage.SetThreshold(.98)
Configurator.calendarspage.Wait(seconds=10)
Configurator.calendargroups.Click()
#onfigurator.calendargroupspage.SetThreshold(.98)
Configurator.calendargroupspage.Wait(seconds=10)
Configurator.custompages.Click()
#Configurator.messagingcustompagespage.SetThreshold(.98)
Configurator.messagingcustompagespage.Wait(seconds=10)
###Messaging Right Click
Configurator.communicationdevices.RightClick()
Configurator.communicationdevicesrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.telephonesystems.RightClick()
Configurator.telephonesystemsrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.voicemailboxes.RightClick()
Configurator.voicemailboxesrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
try:
Configurator.house.RightClick()
except:
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.house.RightClick()
Configurator.houserightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
try:
Configurator.emailaccounts.RightClick()
except:
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.emailaccounts.RightClick()
Configurator.emailaccountsrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
try:
Configurator.emailmessagesoutbound.RightClick()
except:
Configurator.system.Click()
Configurator.messaging.Click()
sleep(5)
Configurator.emailmessagesoutbound.RightClick()
Configurator.emailmessagesoutboundrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.remotesystem.RightClick()
Configurator.remotesystemrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.doorbell.RightClick()
Configurator.doorbellrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.pushmessages.RightClick()
Configurator.pushmessagesrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.calendars.RightClick()
Configurator.calendarsrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
###FIX
Configurator.system.RealClick()
Configurator.messaging.RealClick()
Configurator.calendargroups.RightClick()
Configurator.calendargroupsrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.custompages.RightClick()
Configurator.custompagesrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
sleep(3)
Configurator.irrigation.Click()
Configurator.system.Click()
#######################################WINDOW##############################
############################Communication DEvice
#Configurator.system.Click()
Configurator.messaging.Click()
Configurator.communicationdevices.RightClick()
Configurator.addnewcommunicationdevice.RealClick()
Configurator.addnewcommunicationdevicewindow.Wait()
Add.PushButton.Click('Cancel')
############################Telephone System
Press('enter')
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.telephonesystems.RightClick()
Configurator.addnewdevice.RealClick()
Configurator.addnewdevicewindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
####
############################Voice Mail Boxes
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.voicemailboxes.RightClick()
Configurator.addnewvoicemailbox.RealClick()
Configurator.addnewvoicemailboxwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################House#############
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.house.RightClick()
Configurator.addnewvoicemailbox.RealClick()
Configurator.addnewvoicemailboxwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Email Accounts###
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.emailsccounts.RightClick()
Configurator.addnewemailaccount.RealClick()
Configurator.addnewemailaccountwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################email messages outbound
Configurator.system.Click()####################
Configurator.messaging.Click()
Configurator.emailmessagesoutbound.RightClick()
Configurator.addnewemailmessage.RealClick()
Configurator.addnewemailmessagewindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Remote System####################
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.remotesystem.RightClick()
Configurator.addnewremotesystem.RealClick()
Configurator.addnewremotesystemwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
###########################
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.remotesystem.RightClick()
Configurator.addnewremoteintercomtarget.RealClick()
Configurator.addnewremoteintercomtargetwindow.Wait(threshold = .92)
Add.PushButton.Click('Cancel')
Press('enter')
############################Door Bell
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.doorbell.RightClick()
Configurator.addnewdevice.RealClick()
Configurator.addnewdevicewindowbell.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Push Messages
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.pushmessages.RightClick()
Configurator.addnewpushmessage.RealClick()
Configurator.addnewpushmessagewindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Calendars
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.calendars.RightClick()
Configurator.addnewcalendaraccount.RealClick()
Configurator.addnewcalendaraccountwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Calendar Groups
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.calendargroups.RightClick()
Configurator.addnewcalendargroup.RealClick()
Configurator.addnewcalendargroupwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Custom PAge
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.custompages.RightClick()
Configurator.addnewcustompage.RealClick()
Configurator.addnewcustompagewindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
###Reset
Configurator.media.Click()
Configurator.system.Click()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
from trezorlib import lisk
from trezorlib.tools import parse_path
from .common import TrezorTest
LISK_PATH = parse_path("m/44h/134h/0h/0h")
@pytest.mark.lisk
class TestMsgLiskGetPublicKey(TrezorTest):
def test_lisk_get_public_key(self):
self.setup_mnemonic_nopin_nopassphrase()
sig = lisk.get_public_key(self.client, LISK_PATH)
assert (
sig.public_key.hex()
== "eb56d7bbb5e8ea9269405f7a8527fe126023d1db2c973cfac6f760b60ae27294"
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package stackeval
import (
"context"
"fmt"
"github.com/hashicorp/hcl/v2"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/promising"
"github.com/hashicorp/terraform/internal/stacks/stackaddrs"
"github.com/hashicorp/terraform/internal/tfdiags"
)
type InspectOpts struct {
// Optional values to use when asked for the values of input variables.
//
// Any that are not specified will appear in expressions as an unknown
// value using the declared type constraint, thereby acting as
// placeholders for whatever real values might be defined as planning
// options.
InputVariableValues map[stackaddrs.InputVariable]ExternalInputValue
// Provider factories to use for operations that involve provider clients.
//
// Populating this is optional but if not populated then operations which
// expect to call into providers will return errors.
ProviderFactories ProviderFactories
// TestOnlyGlobals is optional and if set makes it possible to use
// references like _test_only_global.name to refer to values from this
// map from anywhere in the entire stack configuration.
//
// This is intended as a kind of "test double" so that we can write more
// minimal unit tests that can avoid relying on too many language features
// all at once, so that hopefully future maintenance will not require
// making broad changes across many different tests at once, which would
// then risk inadvertently treating a regression as expected behavior.
//
// Configurations that refer to test-only globals are not valid for use
// outside of the test suite of this package.
TestOnlyGlobals map[string]cty.Value
}
// EvalExpr evaluates an arbitrary expression in the main scope of the
// specified stack instance using the approach that's appropriate for the
// specified evaluation phase.
//
// Typical use of this method would be with a Main configured for "inspecting",
// using [InspectPhase] as the phase. This method can be used for any phase
// that supports dynamic expression evaluation in principle, but in that case
// evaluation might cause relatively-expensive effects such as creating
// plans for components.
func (m *Main) EvalExpr(ctx context.Context, expr hcl.Expression, scopeStackInst stackaddrs.StackInstance, phase EvalPhase) (cty.Value, tfdiags.Diagnostics) {
ret, err := promising.MainTask(ctx, func(ctx context.Context) (withDiagnostics[cty.Value], error) {
var diags tfdiags.Diagnostics
scope := m.Stack(ctx, scopeStackInst, phase)
if scope == nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Evaluating expression in undeclared stack",
fmt.Sprintf("Cannot evaluate an expression in %s, because it's not declared by the current configuration.", scopeStackInst),
))
return withDiagnostics[cty.Value]{
Result: cty.DynamicVal,
Diagnostics: diags,
}, nil
}
val, moreDiags := EvalExpr(ctx, expr, phase, scope)
diags = diags.Append(moreDiags)
return withDiagnostics[cty.Value]{
Result: val,
Diagnostics: diags,
}, nil
})
if err != nil {
ret.Diagnostics = ret.Diagnostics.Append(diagnosticsForPromisingTaskError(err))
}
return ret.Result, ret.Diagnostics
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/stacks/stackruntime/internal/stackeval/main_inspect.go
|
# Adrian deWynter, 2016
'''
Adrian deWynter (2016)
Notebook corresponding to an Apache Spark class I once took.
This one pertains to analysis of texts, more specifically word count.
'''
#####
# Remember, databricks has a built-in function (display) that isn't available elsewhere.
# This code isn't meant to run anywhere that isn't Spark -- and some databricks functions
# may still be lunging around.
# I removed testing code and most of the stuff that could be used to correctly identify
# this file when someone is looking up the answers.
#
# Index:
# ------
# 1 - Basic string operations
# 2 - DF operations with words
# 3 - Shakesepearian Analysis
#####
'''
#############################
# 1 - Basic string operations
#############################
'''
# Perform an operation that adds an 's' to each word in a df
wordsDF = sqlContext.createdf([('cat',), ('elephant',), ('rat',), ('rat',), ('cat', )], ['word'])
wordsDF.show()
print type(wordsDF)
wordsDF.printSchema()
from pyspark.sql.functions import lit, concat
pluralDF = wordsDF.select(concat(wordsDF.word, lit('s')).alias('word'))
pluralDF.show()
# Use the SQL length function to find the number of characters in each word
from pyspark.sql.functions import length
pluralLengthsDF = pluralDF.select(length('word'))
pluralLengthsDF.show()
'''
##############################
# 2 - DF operations with words
##############################
'''
# Find the counts of words and the number of times that these words occur.
wordCountsDF = (wordsDF.groupBy(wordsDF.word).count())
wordCountsDF.show()
# Calculate the number of unique words in wordsDF
from spark_notebook_helpers import printdfs
#This function returns all the dfs in the notebook and their corresponding column names.
printdfs(True)
uniqueWordsCount = wordCountsDF.groupBy(wordCountsDF.word).count()
uniqueWordsCount = uniqueWordsCount.count()
print uniqueWordsCount
# Find the mean number of occurrences of words in wordCountsDF.
averageCount = (wordCountsDF.groupBy().mean('count')).head()[0]
print averageCount
# Creates a df with word counts.
# Args: wordListDF (df of str): A df consisting of one string column called 'word'.
# Returns df of (str, int): A df containing 'word' and 'count' columns.
def wordCount(wordListDF):
return wordListDF.groupBy(wordListDF.word).count()
wordCount(wordsDF).show()
from pyspark.sql.functions import regexp_replace, trim, col, lower
# Removes punctuation, changes to lower case, and strips leading and trailing spaces.
# Args: a Column containing a sentence.
# Returns a Column named 'sentence' with clean-up operations applied.
def removePunctuation(column):
return lower(trim(regexp_replace(column, "[^0-9a-zA-Z ]", ""))).alias("sentence")
sentenceDF = sqlContext.createdf([('Hi, you!',),
(' No under_score!',),
(' * Remove punctuation then spaces * ',)], ['sentence'])
sentenceDF.show(truncate=False)
(sentenceDF
.select(removePunctuation(col('sentence')))
.show(truncate=False))
'''
#############################
# 1 - Shakesepearian Analysis
#############################
'''
# Use http://www.gutenberg.org/ebooks/100
fileName = ""
shakespeareDF = sqlContext.read.text(fileName).select(removePunctuation(col('value')))
shakespeareDF.show(15, truncate=False)
# Split each 'sentence' in the df by its spaces
# Transform from a df that contains lists of words into a df with each word in its own row.
# Remove the rows that contain ''.
from pyspark.sql.functions import split, explode
shakeWordsDF = shakespeareDF.select(split(shakespeareDF.sentence, ' ').alias("sentence"))
shakeWordsDF = shakeWordsDF.select(explode(shakeWordsDF.sentence).alias("word"))
shakeWordsDF = shakeWordsDF.filter("word != ''")
shakeWordsDF.show()
shakeWordsDFCount = shakeWordsDF.count()
print shakeWordsDFCount
# Apply the wordCount() function to produce a list of word counts.
from pyspark.sql.functions import desc
topWordsAndCountsDF = wordCount(shakeWordsDF).orderBy("count", ascending=False)
topWordsAndCountsDF.show()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Plinth module to configure OpenVPN server.
"""
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from plinth import actions
from plinth import action_utils
from plinth import cfg
from plinth import frontpage
from plinth import service as service_module
from plinth.utils import format_lazy
version = 1
depends = ['apps']
service = None
managed_services = ['openvpn@freedombox']
managed_packages = ['openvpn', 'easy-rsa']
title = _('Virtual Private Network (OpenVPN)')
description = [
format_lazy(
_('Virtual Private Network (VPN) is a technique for securely '
'connecting two devices in order to access resources of a '
'private network. While you are away from home, you can connect '
'to your {box_name} in order to join your home network and '
'access private/internal services provided by {box_name}. '
'You can also access the rest of the Internet via {box_name} '
'for added security and anonymity.'), box_name=_(cfg.box_name))
]
def init():
"""Initialize the OpenVPN module."""
menu = cfg.main_menu.get('apps:index')
menu.add_urlname(title, 'glyphicon-lock', 'openvpn:index')
global service
setup_helper = globals()['setup_helper']
if setup_helper.get_state() != 'needs-setup':
service = service_module.Service(
managed_services[0], title, ports=['openvpn'], is_external=True)
if service.is_enabled() and is_setup():
add_shortcut()
def setup(helper, old_version=None):
"""Install and configure the module."""
helper.install(managed_packages)
global service
if service is None:
service = service_module.Service(
managed_services[0], title, ports=['openvpn'], is_external=True,
enable=enable, disable=disable)
def add_shortcut():
"""Add shortcut in frontpage."""
download_profile = \
format_lazy(_('<a class="btn btn-primary btn-sm" href="{link}">'
'Download Profile</a>'),
link=reverse_lazy('openvpn:profile'))
frontpage.add_shortcut('openvpn', title,
details=description + [download_profile],
configure_url=reverse_lazy('openvpn:index'),
login_required=True)
def is_setup():
"""Return whether the service is running."""
return actions.superuser_run('openvpn', ['is-setup']).strip() == 'true'
def enable():
"""Enable the module."""
actions.superuser_run('service', ['enable', managed_services[0]])
add_shortcut()
def disable():
"""Enable the module."""
actions.superuser_run('service', ['disable', managed_services[0]])
frontpage.remove_shortcut('openvpn')
def diagnose():
"""Run diagnostics and return the results."""
return [action_utils.diagnose_port_listening(1194, 'udp4')]
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python2.4
# -*- encoding: utf-8 -*-
# GLADE_VCP
# Copyright 2010 Chris Morley
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import gtk
import hal
import gtk.glade
import gobject
import getopt
from hal_widgets import _HalWidgetBase
from led import HAL_LED
from hal_glib import GComponent
from gladevcp.gladebuilder import widget_name
class GladePanel():
def on_window_destroy(self, widget, data=None):
self.hal.exit()
gobject.source_remove(self.timer)
gtk.main_quit()
def __init__(self,halcomp,xmlname,builder,buildertype):
self.builder = builder
self.hal = GComponent(halcomp)
self.widgets = {}
for widget in builder.get_objects():
idname = widget_name(widget)
if idname is None:
continue
if isinstance(widget, _HalWidgetBase):
widget.hal_init(self.hal, idname)
self.widgets[idname] = widget
self.timer = gobject.timeout_add(100, self.update)
def update(self):
for obj in self.widgets.values():
obj.hal_update()
return True
def __getitem__(self, item):
return self.widgets[item]
def __setitem__(self, item, value):
self.widgets[item] = value
if __name__ == "__main__":
print "Gladevcp_make_pins cannot be run on its own"
print "It must be called by gladevcp or a python program"
print "that loads and displays the glade panel and creates a HAL component"
# vim: sts=4 sw=4 et
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! /usr/bin/env python
import string
import sys
import re
import fasta
from operator import itemgetter
# from heapq import nlargest
# a library for parsing the cd-hit output
import cdhit_parse
from optparse import OptionParser
# This script takes CD-HIT output and creates four output files
# *.fasta_clusters is a file with all the clusters in fasta format, sorted from clusters with the
# most sequences to those with the least
# *_unique.fa is a fasta file of all the unique sequences, taking the representative sequence
# from each cluster
# *.cluster_summary is a summary of the sequences and the number of clusters in each file
# *.cluser_sizes is a list of the number of clusters of each size
"""
Usage: extract-clusters-html.py <filename.clstr> <filename.fa> <output_file>
<initial base pair requirement> <desired output format (text/html)> <input filename>
"""
# This uses the -i flag to indicate the input file name, since it might contain spaces
parser = OptionParser()
parser.add_option("-i", "--input", dest="filename")
(options, args) = parser.parse_args()
# The number of base pairs to use to check the beginning of the sequence
bp_match = int(sys.argv[4])
# The desired output file type - text or html
# By default this is 'text' for the command line scripts and 'html' for the cgi script
output_type = sys.argv[5]
# The input file name
# This is required because the input file name in not conserved through the cgi scripts
# file_input = sys.argv[6]
try:
# Open the CD-HIT clustered file *.clstr
cluster_file = open(sys.argv[1], 'r')
# Open the fasta file used as input for CD-HIT
fasta_file = open(sys.argv[2], 'r')
# Output files
outfile = sys.argv[3]
except:
print """
Your input files cannot be found.
"""
sys.exit(2)
# Read in the cd-hit clustered file
try:
cluster_list = cluster_file.read()
except:
print 'Cannot open', cluster_file, '\n'
sys.exit(2)
# Read in the FASTA file and check to make sure it's in FASTA format
try:
fasta_dict_raw = fasta.load(fasta_file)
except:
print '\n', sys.argv[2], 'does not appear to be a fasta file\n'
sys.exit(2)
fasta_dict = {}
# Just take everything before the first space in the first line of the FASTA file as
# the key. This is also how cd-hit takes the name, so the keys will match.
for fasta_key in fasta_dict_raw:
new_fasta = fasta_key.split(' ')
new_key = new_fasta[0]
fasta_dict[new_key] = fasta_dict_raw[fasta_key]
# Output file for the list of all the sequences in each cluster
n_output = outfile + '.fasta_clusters'
try:
output = open(n_output, 'wt')
except:
print 'Cannot open', n_output, 'for writing'
sys.exit(2)
# Output file for the summary
n_summary = outfile + '.cluster_summary'
# n_plot = outfile + '.plot'
try:
output_summary = open(n_summary, 'wt')
# plot_summary = open(n_plot, 'wt')
except:
print 'Cannot open', n_summary, 'for writing'
sys.exit(2)
# Output file for plotting the number of clusters of each size
n_clstr_size = outfile + '.cluster_sizes'
try:
output_clstr_size = open(n_clstr_size, 'wt')
except:
print 'Cannot open', n_clstr_size, 'for writing'
sys.exit(2)
# Output file for a fasta file of unique sequences
unique = outfile + '_unique.fa'
try:
output_unique = open(unique, 'w')
except:
print 'Cannot open', unique, 'for writing'
sys.exit(2)
# Parse the cd-hit *.clstr file, to extract the information about what sequences
# are in each cluster
cluster_db, cluster_db_count, unique_list = cdhit_parse.read_clusters(cluster_list)
cluster_size_db = {}
cluster_keys = sorted(cluster_db_count.iteritems(), key=itemgetter(1), reverse=True)
# ************** Analyze clusters to see if the first 3 bp match ********
# A function for comparing the beginning base pairs and creating appropriate
# clusters. Us bp_match as the number of initial base pairs to check.
def compare_bp(cluster_sequences, fasta_dictionary):
# The template is the first three bp of the first sequence in the cluster
template_seq = fasta_dictionary[cluster_sequences[0]]
template = template_seq[0:bp_match]
# Creating new clusters after checking the first base pairs
new_cluster_list = []
# If a sequence doesn't match in the first bps, then create a new cluster
revised_cluster_list = []
bp3_dict = {}
for seq in cluster_sequences:
bp3 = fasta_dictionary[seq][0:bp_match]
bp3_dict[seq] = bp3
new_rkey = 0
for bp3_key in bp3_dict:
if bp3_dict[bp3_key] == template:
new_cluster_list.append(bp3_key)
elif bp3_dict[bp3_key] != template:
new_rkey = new_rkey + 1
revised_cluster_list.append(bp3_key)
return new_cluster_list, revised_cluster_list
# For each cluster go through and check to make sure the first bp_match base pairs
# are the same. If they're not, make new clusters. If bp_match is set to 0, then
# clusters will not be affected by comparing the initial base pairs.
new_key = 0
cluster_set = {}
for cluster in cluster_keys:
new_key = new_key + 1
cluster_seqs = cluster_db[cluster[0]]
(new_cluster_list, revised_cluster_list) = compare_bp(cluster_seqs, fasta_dict)
cluster_set[new_key] = new_cluster_list
# If some clusters have sequences that don't match, make them into their own clusters
while revised_cluster_list:
(new_rev_cl, rev_rev_cl) = compare_bp(revised_cluster_list, fasta_dict)
new_key = new_key + 1
cluster_set[new_key] = new_rev_cl
if rev_rev_cl:
revised_cluster_list = rev_rev_cl
continue
else:
break
# print 'new clusters', cluster_set
# Create a dictionary with the cluster number as the key and the number of sequences
# in that cluster as the info
# Create a dictionary with the cluster number as the key and the reference sequence
# as the info
cluster_num_seq = {}
cluster_ref_seq = {}
for j in cluster_set:
ref_seq = cluster_set[j][0]
ref_seq_fasta = fasta_dict[cluster_set[j][0]]
for s in cluster_set[j]:
if (len(fasta_dict[s]) > len(ref_seq_fasta)):
ref_seq = s
cluster_ref_seq[j] = ref_seq
cluster_num_seq[j] = len(cluster_set[j])
# Use this information for the output
# get a list of the clusters in order of most clusters to least clusters
sorted_clusters = sorted(cluster_num_seq.iteritems(), key=itemgetter(1), reverse=True)
# largest = nlargest(10, cluster_num_seq.iteritems(), itemgetter(1))
largest = sorted_clusters[0:10]
num_seq = float(len(fasta_dict))
num_unique = float(len(cluster_set.keys()))
percent_float = (num_seq - num_unique)/num_seq*100
percent = round(percent_float, 2)
# Output to terminal
# If this is coming from the cgi script the output will be HTML. If it's from
# the command line script, it will be text.
if (output_type == 'html'):
print '<dl><dd>Number of reads:', int(num_seq), '<dd>Number of unique reads:', int(num_unique), '<dd>Percent of reads that are replicates:', percent, '%</dl>'
print '<p>10 largest clusters <dl>'
for l in largest:
lkey = l[0]
print '<dd>Cluster', lkey, 'Number of sequences:', cluster_num_seq[lkey]
print '</dl>'
elif (output_type == 'text'):
print 'Number of reads:', int(num_seq), '\nNumber of unique reads:', int(num_unique), '\nPercent of reads that are replicates:', percent, '%\n'
print '10 largest clusters\n'
for l in largest:
lkey = l[0]
print 'Cluster', lkey, 'Number of sequences:', cluster_num_seq[lkey]
# ***************
# Output to files
# Output summary
output_summary.write('File analyzed: %s\n454 Replicate Filter version 0.3\nNumber of sequences: %s Number of unique reads: %s Percent of repeats %s\n' % (options.filename, num_seq, num_unique, percent,))
output_summary.write('Cluster\tRef sequence\tNum of seq\n')
# ++++++++++
# Writing each sequence out, so that you have a file with all the sequences in
# each cluster
# for incrementing for plot output
n = 0
# Right now the output is in order from most sequences in a cluster to least, except
# where clusters are split after the initial base pair check.
# If you want the output in order of most sequences in a cluster to least for all clusters, uncomment
# this and replace - for cluster_id in cluster_set and uncomment - cluster_id = cluster_id_raw[0]
# for cluster_id_raw in sorted_clusters:
output.write('File analyzed: %s' % (options.filename))
for cluster_id in cluster_set:
n = n + 1
# cluster_id = cluster_id_raw[0]
output.write('\n----------------------------------------\nCluster %s Reference sequence: %s Number of sequences: %s\n' % (cluster_id, cluster_ref_seq[cluster_id], cluster_num_seq[cluster_id],))
for item in cluster_set[cluster_id]:
output.write('>%s\n%s\n' % (item, fasta_dict[item],))
# ++++++++++
# Detail for output summary file
output_summary.write('%s\t%s\t%s\n' % (cluster_id, cluster_ref_seq[cluster_id], cluster_num_seq[cluster_id],))
# ~~~~~~ Create a file that can be used for plotting the distribution of cluster numbers
# for clusters that have more than one sequence
# key2_num = int(cluster_num_seq[cluster_id])
# if key2_num > 1:
# plot_summary.write('%s\t%s\n' % (n, sorted_clusters[cluster_id][1],))
# ~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~ Create a file that has the number of sequences in a cluster versus
# the number of clusters there are of that size
# Determine the number of clusters with a given number of reads in it
if cluster_size_db.has_key(cluster_num_seq[cluster_id]):
num_clusters = cluster_size_db[cluster_num_seq[cluster_id]]
num_clusters = num_clusters + 1
cluster_size_db[cluster_num_seq[cluster_id]] = num_clusters
else:
cluster_size_db[cluster_num_seq[cluster_id]] = 1
cluster_size_keys = sorted(cluster_size_db.iteritems(), reverse=False)
output_clstr_size.write('File analyzed:\n%s\nCluster size\tNumber of clusters\n' % (sys.argv[2],))
for clstr_num_temp in cluster_size_keys:
clstr_num = clstr_num_temp[0]
output_clstr_size.write('%s\t%s\n' % (clstr_num, cluster_size_db[clstr_num],))
# ~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~ Create a file with all the unique sequences
for q in cluster_ref_seq:
output_unique.write('>%s\n%s\n' % (cluster_ref_seq[q], fasta_dict[cluster_ref_seq[q]]))
# ~~~~~~~~~~~~~
output.close()
output_summary.close()
# plot_summary.close()
output_clstr_size.close()
output_unique.close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#ifndef _NGX_EVENT_TIMER_H_INCLUDED_
#define _NGX_EVENT_TIMER_H_INCLUDED_
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
#define NGX_TIMER_INFINITE (ngx_msec_t) -1
#define NGX_TIMER_LAZY_DELAY 300
ngx_int_t ngx_event_timer_init(ngx_log_t *log);
ngx_msec_t ngx_event_find_timer(void);
void ngx_event_expire_timers(void);
ngx_int_t ngx_event_no_timers_left(void);
extern ngx_rbtree_t ngx_event_timer_rbtree;
static ngx_inline void
ngx_event_del_timer(ngx_event_t *ev)
{
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"event timer del: %d: %M",
ngx_event_ident(ev->data), ev->timer.key);
ngx_rbtree_delete(&ngx_event_timer_rbtree, &ev->timer);
#if (NGX_DEBUG)
ev->timer.left = NULL;
ev->timer.right = NULL;
ev->timer.parent = NULL;
#endif
ev->timer_set = 0;
}
static ngx_inline void
ngx_event_add_timer(ngx_event_t *ev, ngx_msec_t timer)
{
ngx_msec_t key;
ngx_msec_int_t diff;
key = ngx_current_msec + timer;
if (ev->timer_set) {
/*
* Use a previous timer value if difference between it and a new
* value is less than NGX_TIMER_LAZY_DELAY milliseconds: this allows
* to minimize the rbtree operations for fast connections.
*/
diff = (ngx_msec_int_t) (key - ev->timer.key);
if (ngx_abs(diff) < NGX_TIMER_LAZY_DELAY) {
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"event timer: %d, old: %M, new: %M",
ngx_event_ident(ev->data), ev->timer.key, key);
return;
}
ngx_del_timer(ev);
}
ev->timer.key = key;
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"event timer add: %d: %M:%M",
ngx_event_ident(ev->data), timer, ev->timer.key);
ngx_rbtree_insert(&ngx_event_timer_rbtree, &ev->timer);
ev->timer_set = 1;
}
#endif /* _NGX_EVENT_TIMER_H_INCLUDED_ */
|
c
|
github
|
https://github.com/nginx/nginx
|
src/event/ngx_event_timer.h
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 11:50:02 2017
@author: sebastian
"""
## retrieve second set of auxiliary ECMWF forecast data,
## to augment temperature forecasts and previously downloaded auxiliary data
## based on example from
## https://software.ecmwf.int/wiki/display/WEBAPI/TIGGE+retrieval+efficiency
## surface variables: (see https://software.ecmwf.int/wiki/display/TIGGE/Parameters)
# cloud cover, surface pressure, CAPE
# ECMWF forecasts from TIGGE data set:
# variables: 146/147/165/166/168/176/177/228039
# all available full years, 2007-2016
# init time 00 UTC
# 36/48 h ahead forecasts (= valid at 12 UTC and 00 UTC)
# 0.5° resolution
# area: -10E, 30E; 30N, 70N (large part of Europe centered around Germany)
#!/usr/bin/env python
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
def retrieve_tigge_data():
date1 = [str(i) + "-01-01" for i in xrange(2016,2017)]
date2 = [str(i) + "-12-31" for i in xrange(2016,2017)]
dates = date1
for j in range(0,1):
dates[j] = date1[j] + "/to/" + date2[j]
data_dir = "/media/sebastian/Elements/Postproc_NN/data/forecasts/auxiliary/"
for date in dates:
target = data_dir + "ecmwf_aux_surface_more_" + date[:4] + ".grib"
tigge_request(date, target)
def tigge_request(date, target):
'''
A TIGGE request for ECMWF perturbed forecasts of auxiliary surface variables.
'''
server.retrieve({
'origin' : "ecmf",
'levtype' : "sfc",
'number' : mem_numbers,
'expver' : "prod",
'dataset' : "tigge",
'step' : "36/48",
'grid' : "0.5/0.5",
'param' : "146/147/165/166/168/176/177/228039",
'area' : "70/-10/30/30",
'time' : "00",
'date' : date,
'type' : "pf",
'class' : "ti",
'target' : target,
})
if __name__ == '__main__':
mem_numbers = ''.join([''.join([str(i) + "/" for i in xrange(1,50)]),'50'])
retrieve_tigge_data()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""The WaveBlocks Project
Compute the action of the position operator applied to a Hagedorn wavepacket.
@author: R. Bourquin
@copyright: Copyright (C) 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import zeros, complexfloating, conjugate, squeeze
from scipy import sqrt
from WaveBlocksND.WavepacketPosition import WavepacketPosition
__all__ = ["PositionHAWP"]
class PositionHAWP(WavepacketPosition):
r"""This class implements the computation of the action of the
position operator :math:`x` applied to a Hagedorn wavepacket :math:`\Psi`.
"""
def apply_position_component(self, wavepacket, component):
r"""Compute the effect of the position operator :math:`x` on the basis functions :math:`\phi(x)`
of a component :math:`\Phi_i` of the Hagedorn wavepacket :math:`\Psi`.
:param wavepacket: The wavepacket :math:`\Psi` containing :math:`\Phi_i`.
:type wavepacket: A :py:class:`HagedornWavepacketBase` subclass instance.
:param component: The index :math:`i` of the component :math:`\Phi_i`.
:type component: Integer.
:return: Extended basis shape :math:`\mathfrak{\dot{K}}` and new coefficients :math:`c^\prime`
for component :math:`\Phi_i`. The coefficients are stored column-wise with
one column per dimension :math:`d`. The :math:`c^\prime` array is of shape
:math:`|\mathfrak{\dot{K}}| \times D`.
"""
D = wavepacket.get_dimension()
eps = wavepacket.get_eps()
q, p, Q, P, _ = wavepacket.get_parameters(component=component)
Qbar = conjugate(Q)
coeffs = wavepacket.get_coefficients(component=component)
# Prepare storage for new coefficients
K = wavepacket.get_basis_shapes(component=component)
Ke = K.extend()
size = Ke.get_basis_size()
cnew = zeros((size, D), dtype=complexfloating)
# We implement the more efficient scatter type stencil here
for k in K.get_node_iterator():
# Central phi_i coefficient
cnew[Ke[k], :] += squeeze(coeffs[K[k]] * q)
# Backward neighbours phi_{i - e_d}
nbw = Ke.get_neighbours(k, selection="backward")
for d, nb in nbw:
cnew[Ke[nb], :] += sqrt(eps**2 / 2.0) * sqrt(k[d]) * coeffs[K[k]] * Qbar[:, d]
# Forward neighbours phi_{i + e_d}
nfw = Ke.get_neighbours(k, selection="forward")
for d, nb in nfw:
cnew[Ke[nb], :] += sqrt(eps**2 / 2.0) * sqrt(k[d] + 1.0) * coeffs[K[k]] * Q[:, d]
return (Ke, cnew)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64 || arm64 || loong64 || ppc64 || ppc64le || riscv64 || s390x || wasm
package math
const haveArchFloor = true
func archFloor(x float64) float64
const haveArchCeil = true
func archCeil(x float64) float64
const haveArchTrunc = true
func archTrunc(x float64) float64
|
go
|
github
|
https://github.com/golang/go
|
src/math/floor_asm.go
|
__all__ = ["runsimple"]
import sys, os
from SimpleHTTPServer import SimpleHTTPRequestHandler
import urllib
import posixpath
import webapi as web
import net
import utils
def runbasic(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout, socket_timeout:
return
except:
print >> web.debug, traceback.format_exc(),
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/static/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status[:status.find(' ')]
status_msg = status[status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, func, server_address):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print "http://%s:%d/" % server_address
WSGIServer(func, server_address).serve_forever()
# The WSGIServer instance.
# Made global so that it can be stopped in embedded mode.
server = None
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs [CherryPy][cp] WSGI server hosting WSGI app `func`.
The directory `static/` is hosted statically.
[cp]: http://www.cherrypy.org
"""
global server
func = StaticMiddleware(func)
func = LogMiddleware(func)
server = WSGIServer(server_address, func)
if server.ssl_adapter:
print "https://%s:%d/" % server_address
else:
print "http://%s:%d/" % server_address
try:
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
server = None
def WSGIServer(server_address, wsgi_app):
"""Creates CherryPy WSGI server listening at `server_address` to serve `wsgi_app`.
This function can be overwritten to customize the webserver or use a different webserver.
"""
import wsgiserver
# Default values of wsgiserver.ssl_adapters uses cherrypy.wsgiserver
# prefix. Overwriting it make it work with web.wsgiserver.
wsgiserver.ssl_adapters = {
'builtin': 'web.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'web.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
server = wsgiserver.CherryPyWSGIServer(server_address, wsgi_app, server_name="localhost")
def create_ssl_adapter(cert, key):
# wsgiserver tries to import submodules as cherrypy.wsgiserver.foo.
# That doesn't work as not it is web.wsgiserver.
# Patching sys.modules temporarily to make it work.
import types
cherrypy = types.ModuleType('cherrypy')
cherrypy.wsgiserver = wsgiserver
sys.modules['cherrypy'] = cherrypy
sys.modules['cherrypy.wsgiserver'] = wsgiserver
from wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
adapter = pyOpenSSLAdapter(cert, key)
# We are done with our work. Cleanup the patches.
del sys.modules['cherrypy']
del sys.modules['cherrypy.wsgiserver']
return adapter
# SSL backward compatibility
if (server.ssl_adapter is None and
getattr(server, 'ssl_certificate', None) and
getattr(server, 'ssl_private_key', None)):
server.ssl_adapter = create_ssl_adapter(server.ssl_certificate, server.ssl_private_key)
server.nodelay = not sys.platform.startswith('java') # TCP_NODELAY isn't supported on the JVM
return server
class StaticApp(SimpleHTTPRequestHandler):
"""WSGI application for serving static files."""
def __init__(self, environ, start_response):
self.headers = []
self.environ = environ
self.start_response = start_response
def send_response(self, status, msg=""):
self.status = str(status) + " " + msg
def send_header(self, name, value):
self.headers.append((name, value))
def end_headers(self):
pass
def log_message(*a): pass
def __iter__(self):
environ = self.environ
self.path = environ.get('PATH_INFO', '')
self.client_address = environ.get('REMOTE_ADDR','-'), \
environ.get('REMOTE_PORT','-')
self.command = environ.get('REQUEST_METHOD', '-')
from cStringIO import StringIO
self.wfile = StringIO() # for capturing error
try:
path = self.translate_path(self.path)
etag = '"%s"' % os.path.getmtime(path)
client_etag = environ.get('HTTP_IF_NONE_MATCH')
self.send_header('ETag', etag)
if etag == client_etag:
self.send_response(304, "Not Modified")
self.start_response(self.status, self.headers)
raise StopIteration
except OSError:
pass # Probably a 404
f = self.send_head()
self.start_response(self.status, self.headers)
if f:
block_size = 16 * 1024
while True:
buf = f.read(block_size)
if not buf:
break
yield buf
f.close()
else:
value = self.wfile.getvalue()
yield value
class StaticMiddleware:
"""WSGI middleware for serving static files."""
def __init__(self, app, prefix='/static/'):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
path = environ.get('PATH_INFO', '')
path = self.normpath(path)
if path.startswith(self.prefix):
return StaticApp(environ, start_response)
else:
return self.app(environ, start_response)
def normpath(self, path):
path2 = posixpath.normpath(urllib.unquote(path))
if path.endswith("/"):
path2 += "/"
return path2
class LogMiddleware:
"""WSGI middleware for logging the status."""
def __init__(self, app):
self.app = app
self.format = '%s - - [%s] "%s %s %s" - %s'
from BaseHTTPServer import BaseHTTPRequestHandler
import StringIO
f = StringIO.StringIO()
class FakeSocket:
def makefile(self, *a):
return f
# take log_date_time_string method from BaseHTTPRequestHandler
self.log_date_time_string = BaseHTTPRequestHandler(FakeSocket(), None, None).log_date_time_string
def __call__(self, environ, start_response):
def xstart_response(status, response_headers, *args):
out = start_response(status, response_headers, *args)
self.log(status, environ)
return out
return self.app(environ, xstart_response)
def log(self, status, environ):
outfile = environ.get('wsgi.errors', web.debug)
req = environ.get('PATH_INFO', '_')
protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
method = environ.get('REQUEST_METHOD', '-')
host = "%s:%s" % (environ.get('REMOTE_ADDR','-'),
environ.get('REMOTE_PORT','-'))
time = self.log_date_time_string()
msg = self.format % (host, time, protocol, method, req, status)
print >> outfile, utils.safestr(msg)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
from collections import OrderedDict
import pytest
from pandas.util._validators import validate_bool_kwarg, validate_kwargs
_fname = "func"
def test_bad_kwarg():
good_arg = "f"
bad_arg = good_arg + "o"
compat_args = OrderedDict()
compat_args[good_arg] = "foo"
compat_args[bad_arg + "o"] = "bar"
kwargs = {good_arg: "foo", bad_arg: "bar"}
msg = (r"{fname}\(\) got an unexpected "
r"keyword argument '{arg}'".format(fname=_fname, arg=bad_arg))
with pytest.raises(TypeError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("i", range(1, 3))
def test_not_all_none(i):
bad_arg = "foo"
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=_fname))
compat_args = OrderedDict()
compat_args["foo"] = 1
compat_args["bar"] = "s"
compat_args["baz"] = None
kwarg_keys = ("foo", "bar", "baz")
kwarg_vals = (2, "s", None)
kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i]))
with pytest.raises(ValueError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = OrderedDict()
compat_args["f"] = None
compat_args["b"] = 1
compat_args["ba"] = "s"
kwargs = dict(f=None, b=1)
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = ("For argument \"%s\" expected type bool, received type %s" %
(name, type(value).__name__))
with pytest.raises(ValueError, match=msg):
validate_bool_kwarg(value, name)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [True, False, None])
def test_validate_bool_kwarg(name, value):
assert validate_bool_kwarg(value, name) == value
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class analytic_entries_report(osv.osv):
_name = "analytic.entries.report"
_description = "Analytic Entries Statistics"
_auto = False
_columns = {
'date': fields.date('Date', readonly=True),
'user_id': fields.many2one('res.users', 'User',readonly=True),
'name': fields.char('Description', size=64, readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'currency_id': fields.many2one('res.currency', 'Currency', required=True),
'account_id': fields.many2one('account.analytic.account', 'Account', required=False),
'general_account_id': fields.many2one('account.account', 'General Account', required=True),
'journal_id': fields.many2one('account.analytic.journal', 'Journal', required=True),
'move_id': fields.many2one('account.move.line', 'Move', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'amount': fields.float('Amount', readonly=True),
'unit_amount': fields.integer('Unit Amount', readonly=True),
'nbr': fields.integer('# Entries', readonly=True), # TDE FIXME master: rename into nbr_entries
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'analytic_entries_report')
cr.execute("""
create or replace view analytic_entries_report as (
select
min(a.id) as id,
count(distinct a.id) as nbr,
a.date as date,
a.user_id as user_id,
a.name as name,
analytic.partner_id as partner_id,
a.company_id as company_id,
a.currency_id as currency_id,
a.account_id as account_id,
a.general_account_id as general_account_id,
a.journal_id as journal_id,
a.move_id as move_id,
a.product_id as product_id,
a.product_uom_id as product_uom_id,
sum(a.amount) as amount,
sum(a.unit_amount) as unit_amount
from
account_analytic_line a, account_analytic_account analytic
where analytic.id = a.account_id
group by
a.date, a.user_id,a.name,analytic.partner_id,a.company_id,a.currency_id,
a.account_id,a.general_account_id,a.journal_id,
a.move_id,a.product_id,a.product_uom_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""Dask workflow for generating model data for the SIP example ICAL workflow.
This is code based on the test ICAL pipeline notebook from ARL.
"""
import logging
import pickle
import os
import sys
import json
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from data_models.data_model_helpers import export_blockvisibility_to_hdf5
from data_models.polarisation import PolarisationFrame
from workflows.arlexecute.execution_support.arlexecute import arlexecute
from workflows.arlexecute.execution_support.dask_init import get_dask_Client
from processing_components.imaging.base import advise_wide_field
from workflows.arlexecute.imaging.imaging_arlexecute import predict_arlexecute
from workflows.arlexecute.simulation.simulation_arlexecute import simulate_arlexecute, \
corrupt_arlexecute
from processing_components.simulation.testing_support import \
create_low_test_image_from_gleam
LOG = logging.getLogger('sip.ical.generate_data')
RESULTS_DIR = 'results'
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
def init_logging():
"""Initialise Python logging."""
# fmt = '%(thread)s %(asctime)s,%(msecs)d %(name)s %(levelname)s ' \
# '%(message)s'
# logging.basicConfig(filename='%s/imaging_modeling.log' % RESULTS_DIR,
# filemode='a', format=fmt, datefmt='%H:%M:%S',
# level=logging.INFO)
fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \
'| %(message)s'
logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
def main():
"""Workflow stage application."""
init_logging()
# Get Dask client
arlexecute.set_client(get_dask_Client())
arlexecute.run(init_logging)
LOG.info('Results dir = %s', RESULTS_DIR)
LOG.info("Starting imaging-modeling")
# Read parameters
PARFILE = 'parameters.json'
if len(sys.argv) > 1:
PARFILE = sys.argv[1]
LOG.info("JSON parameter file = %s", PARFILE)
try:
with open(PARFILE, "r") as par_file:
jspar = json.load(par_file)
except AssertionError as error:
LOG.critical('ERROR %s', error)
return
# Model parameters
configuration= jspar["modeling"]["configuration"]["name"]
num_freq_win = jspar["modeling"]["configuration"]["num_freq_win"] # 7
num_times = jspar["modeling"]["configuration"]["num_times"] # 11
r_max = jspar["modeling"]["configuration"]["r_max"] # 300.0
fstart = jspar["modeling"]["configuration"]["fstart"]
fend = jspar["modeling"]["configuration"]["fend"]
timestart_pi = jspar["modeling"]["configuration"]["timestart_pi"] # -1/3
timeend_pi = jspar["modeling"]["configuration"]["timeend_pi"] # 1/3
polframe = jspar["modeling"]["configuration"]["PolarisationFrame"] # StokesI
frequency = numpy.linspace(fstart, fend, num_freq_win)
channel_bw = numpy.array(num_freq_win * [frequency[1] - frequency[0]]) # 0.9e8 ... 1.1e8
times = numpy.linspace(numpy.pi * timestart_pi, numpy.pi * timeend_pi, num_times)
phase_centre = SkyCoord( ra =jspar["modeling"]["phasecentre"]["RA"] * u.deg,
dec =jspar["modeling"]["phasecentre"]["Dec"] * u.deg,
frame =jspar["modeling"]["phasecentre"]["frame"],
equinox=jspar["modeling"]["phasecentre"]["equinox"])
# Simulate visibilities
vis_list = simulate_arlexecute(configuration,
frequency=frequency,
channel_bandwidth=channel_bw,
times=times,
phasecentre=phase_centre,
order=jspar["modeling"]["simulate"]["order"],
rmax=r_max)
LOG.info('%d elements in vis_list', len(vis_list))
LOG.info('About to make visibility')
vis_list = arlexecute.compute(vis_list, sync=True)
LOG.debug('vis_list type: %s', type(vis_list))
LOG.debug('vis_list element type: %s', type(vis_list[0]))
try:
export_blockvisibility_to_hdf5(vis_list,
'%s/%s' % (RESULTS_DIR, jspar["files"]["vis_list"]))
except AssertionError as error:
LOG.critical('ERROR %s', error)
return
wprojection_planes = jspar["advice"]["wprojection_planes"]
guard_band_image = jspar["advice"]["guard_band_image"]
delA = jspar["advice"]["delA"]
advice_low = advise_wide_field(vis_list[0], guard_band_image=guard_band_image,
delA=delA,
wprojection_planes=wprojection_planes)
advice_high = advise_wide_field(vis_list[-1], guard_band_image=guard_band_image,
delA=delA,
wprojection_planes=wprojection_planes)
vis_slices = advice_low['vis_slices']
num_pixels = advice_high['npixels2']
cellsize = min(advice_low['cellsize'], advice_high['cellsize'])
# Create GLEAM model
gleam_model = [
arlexecute.execute(create_low_test_image_from_gleam)(
npixel=num_pixels,
frequency=[frequency[f]],
channel_bandwidth=[channel_bw[f]],
cellsize=cellsize,
phasecentre=phase_centre,
polarisation_frame=PolarisationFrame(polframe),
flux_limit=jspar["modeling"]["gleam_model"]["flux_limit"], # 1.0,
applybeam =jspar["modeling"]["gleam_model"]["applybeam"]) # True
for f, freq in enumerate(frequency)
]
LOG.info('About to make GLEAM model')
gleam_model = arlexecute.compute(gleam_model, sync=True)
# future_gleam_model = arlexecute.scatter(gleam_model)
# Get predicted visibilities for GLEAM model
LOG.info('About to run predict to get predicted visibility')
future_vis_graph = arlexecute.scatter(vis_list)
predicted_vis_list = predict_arlexecute(future_vis_graph, gleam_model,
context=jspar["modeling"]["predict"]["context"], #'wstack'
vis_slices=vis_slices)
predicted_vis_list = arlexecute.compute(predicted_vis_list, sync=True)
corrupted_vis_list = corrupt_arlexecute(predicted_vis_list, phase_error=jspar["modeling"]["corrupt"]["phase_error"]) #1.0
LOG.info('About to run corrupt to get corrupted visibility')
corrupted_vis_list = arlexecute.compute(corrupted_vis_list, sync=True)
LOG.info('About to output predicted_vislist.hdf')
export_blockvisibility_to_hdf5(predicted_vis_list,
'%s/%s' % (RESULTS_DIR,jspar["files"]["predicted_vis_list"]))
LOG.info('About to output corrupted_vislist.hdf')
export_blockvisibility_to_hdf5(corrupted_vis_list,
'%s/%s' % (RESULTS_DIR, jspar["files"]["corrupted_vis_list"]))
# Close Dask client
arlexecute.close()
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Authors: Tomas Babej <tbabej@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import os
from textwrap import wrap
from ipalib import api
from ipalib.plugable import Plugin, API
from ipalib.errors import ValidationError
from ipapython import admintool
from ipapython.ipa_log_manager import log_mgr
"""
To add configuration instructions for a new use case, define a new class that
inherits from Advice class.
You should create a plugin file for it in ipaserver/advise/plugins folder.
The class can run any arbitrary code or IPA command via api.Command['command']()
calls. It needs to override get_info() method, which returns the formatted
advice string.
>>> class sample_advice(Advice):
>>> description = 'Instructions for machine with SSSD 1.0 setup.'
Description provided shows itself as a header and in the list of all advices
currently available via ipa-advise.
Optionally, you can require root privileges for your plugin:
>>> require_root = True
The following method should be implemented in your plugin:
>>> def get_info():
>>> self.log.debug('Entering execute() method')
>>> self.log.comment('Providing useful advice just for you')
>>> self.log.command('yum update sssd -y')
As you can see, Advice's log has 3 different levels. Debug lines are printed
out with '# DEBUG:' prefix if --verbose had been used. Comment lines utilize
'# ' prefix and command lines are printed raw.
Please note that comments are automatically wrapped after 70 characters.
Use wrapped=False option to force the unwrapped line in the comment.
>>> self.log.comment("This line should not be wrapped", wrapped=False)
As a result, you can redirect the advice's output directly to a script file.
# ipa-advise sample-advice > script.sh
# ./script.sh
Important! Do not forget to register the class to the API.
>>> api.register(sample_advice)
"""
class _AdviceOutput(object):
def __init__(self):
self.content = []
self.prefix = '# '
self.options = None
def comment(self, line, wrapped=True):
if wrapped:
for wrapped_line in wrap(line, 70):
self.content.append(self.prefix + wrapped_line)
else:
self.content.append(self.prefix + line)
def debug(self, line):
if self.options.verbose:
self.comment('DEBUG: ' + line)
def command(self, line):
self.content.append(line)
class Advice(Plugin):
"""
Base class for advices, plugins for ipa-advise.
"""
options = None
require_root = False
description = ''
def __init__(self, api):
super(Advice, self).__init__(api)
self.log = _AdviceOutput()
def set_options(self, options):
self.options = options
self.log.options = options
def get_info(self):
"""
This method should be overridden by child Advices.
Returns a string with instructions.
"""
raise NotImplementedError
class AdviseAPI(API):
bases = (Advice,)
modules = ('ipaserver.advise.plugins.*',)
advise_api = AdviseAPI()
class IpaAdvise(admintool.AdminTool):
"""
Admin tool that given systems's configuration provides instructions how to
configure the systems for various use cases.
"""
command_name = 'ipa-advise'
usage = "%prog ADVICE"
description = "Provides configuration advice for various use cases. To "\
"see the list of possible ADVICEs, run ipa-advise without "\
"any arguments."
def __init__(self, options, args):
super(IpaAdvise, self).__init__(options, args)
@classmethod
def add_options(cls, parser):
super(IpaAdvise, cls).add_options(parser)
def validate_options(self):
super(IpaAdvise, self).validate_options(needs_root=False)
if len(self.args) > 1:
raise self.option_parser.error("You can only provide one "
"positional argument.")
def log_success(self):
pass
def print_config_list(self):
self.print_header('List of available advices')
max_keyword_len = max((len(keyword) for keyword in advise_api.Advice))
for keyword in advise_api.Advice:
advice = getattr(advise_api.Advice, keyword, '')
description = getattr(advice, 'description', '')
keyword = keyword.replace('_', '-')
# Compute the number of spaces needed for the table to be aligned
offset = max_keyword_len - len(keyword)
prefix = " {key} {off}: ".format(key=keyword, off=' ' * offset)
wrapped_description = wrap(description, 80 - len(prefix))
# Print the first line with the prefix (keyword)
print(prefix + wrapped_description[0])
# Print the rest wrapped behind the colon
for line in wrapped_description[1:]:
print("{off}{line}".format(off=' ' * len(prefix), line=line))
def print_header(self, header, print_shell=False):
header_size = len(header)
prefix = ''
if print_shell:
prefix = '# '
print('#!/bin/sh')
# Do not print out empty header
if header_size > 0:
print((prefix + '-' * 70))
for line in wrap(header, 70):
print((prefix + line))
print((prefix + '-' * 70))
def print_advice(self, keyword):
advice = getattr(advise_api.Advice, keyword, None)
# Ensure that Configuration class for given --setup option value exists
if advice is None:
raise ValidationError(
name="advice",
error="No instructions are available for '{con}'. "
"See the list of available configuration "
"by invoking the ipa-advise command with no argument."
.format(con=keyword.replace('_', '-')))
# Check whether root privileges are needed
if advice.require_root and os.getegid() != 0:
raise admintool.ScriptError(
'Must be root to get advice for {adv}'
.format(adv=keyword.replace('_', '-')), 1)
# Print out nicely formatted header
self.print_header(advice.description, print_shell=True)
# Set options so that plugin can use verbose/quiet options
advice.set_options(self.options)
# Print out the actual advice
api.Backend.rpcclient.connect()
advice.get_info()
api.Backend.rpcclient.disconnect()
for line in advice.log.content:
print(line)
def run(self):
super(IpaAdvise, self).run()
api.bootstrap(in_server=False, context='cli')
api.finalize()
advise_api.bootstrap(in_server=False, context='cli')
advise_api.finalize()
if not self.options.verbose:
# Do not print connection information by default
logger_name = r'ipa\.ipalib\.plugins\.rpcclient'
log_mgr.configure(dict(logger_regexps=[(logger_name, 'warning')]))
# With no argument, print the list out and exit
if not self.args:
self.print_config_list()
return
else:
keyword = self.args[0].replace('-', '_')
self.print_advice(keyword)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
package syscall_test
import (
"bytes"
"net"
"os"
"syscall"
"testing"
)
// TestSCMCredentials tests the sending and receiving of credentials
// (PID, UID, GID) in an ancillary message between two UNIX
// sockets. The SO_PASSCRED socket option is enabled on the sending
// socket for this to work.
func TestSCMCredentials(t *testing.T) {
socketTypeTests := []struct {
socketType int
dataLen int
}{
{
syscall.SOCK_STREAM,
1,
}, {
syscall.SOCK_DGRAM,
0,
},
}
for _, tt := range socketTypeTests {
fds, err := syscall.Socketpair(syscall.AF_LOCAL, tt.socketType, 0)
if err != nil {
t.Fatalf("Socketpair: %v", err)
}
err = syscall.SetsockoptInt(fds[0], syscall.SOL_SOCKET, syscall.SO_PASSCRED, 1)
if err != nil {
syscall.Close(fds[0])
syscall.Close(fds[1])
t.Fatalf("SetsockoptInt: %v", err)
}
srvFile := os.NewFile(uintptr(fds[0]), "server")
cliFile := os.NewFile(uintptr(fds[1]), "client")
defer srvFile.Close()
defer cliFile.Close()
srv, err := net.FileConn(srvFile)
if err != nil {
t.Errorf("FileConn: %v", err)
return
}
defer srv.Close()
cli, err := net.FileConn(cliFile)
if err != nil {
t.Errorf("FileConn: %v", err)
return
}
defer cli.Close()
var ucred syscall.Ucred
if os.Getuid() != 0 {
ucred.Pid = int32(os.Getpid())
ucred.Uid = 0
ucred.Gid = 0
oob := syscall.UnixCredentials(&ucred)
_, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
if op, ok := err.(*net.OpError); ok {
err = op.Err
}
if sys, ok := err.(*os.SyscallError); ok {
err = sys.Err
}
switch err {
case syscall.EPERM, syscall.EINVAL:
default:
t.Fatalf("WriteMsgUnix failed with %v, want EPERM or EINVAL", err)
}
}
ucred.Pid = int32(os.Getpid())
ucred.Uid = uint32(os.Getuid())
ucred.Gid = uint32(os.Getgid())
oob := syscall.UnixCredentials(&ucred)
// On SOCK_STREAM, this is internally going to send a dummy byte
n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
if err != nil {
t.Fatalf("WriteMsgUnix: %v", err)
}
if n != 0 {
t.Fatalf("WriteMsgUnix n = %d, want 0", n)
}
if oobn != len(oob) {
t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob))
}
oob2 := make([]byte, 10*len(oob))
n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2)
if err != nil {
t.Fatalf("ReadMsgUnix: %v", err)
}
if flags != syscall.MSG_CMSG_CLOEXEC {
t.Fatalf("ReadMsgUnix flags = %#x, want %#x (MSG_CMSG_CLOEXEC)", flags, syscall.MSG_CMSG_CLOEXEC)
}
if n != tt.dataLen {
t.Fatalf("ReadMsgUnix n = %d, want %d", n, tt.dataLen)
}
if oobn2 != oobn {
// without SO_PASSCRED set on the socket, ReadMsgUnix will
// return zero oob bytes
t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn)
}
oob2 = oob2[:oobn2]
if !bytes.Equal(oob, oob2) {
t.Fatal("ReadMsgUnix oob bytes don't match")
}
scm, err := syscall.ParseSocketControlMessage(oob2)
if err != nil {
t.Fatalf("ParseSocketControlMessage: %v", err)
}
newUcred, err := syscall.ParseUnixCredentials(&scm[0])
if err != nil {
t.Fatalf("ParseUnixCredentials: %v", err)
}
if *newUcred != ucred {
t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred)
}
}
}
|
go
|
github
|
https://github.com/golang/go
|
src/syscall/creds_test.go
|
import sys
import numpy as np
from lxmls.parsing.dependency_reader import *
from lxmls.parsing.dependency_writer import *
from lxmls.parsing.dependency_features import *
from lxmls.parsing.dependency_decoder import *
from lxmls.util.my_math_utils import *
class DependencyParser():
'''
Dependency parser class
'''
def __init__(self):
self.trained = False
self.projective = False
self.language = ""
self.weights = []
self.decoder = DependencyDecoder()
self.reader = DependencyReader()
self.writer = DependencyWriter()
self.features = DependencyFeatures()
def read_data(self, language):
self.language = language
self.reader.load(language)
self.features.create_dictionary(self.reader.train_instances)
def train_perceptron(self, n_epochs):
'''Trains the parser by running the averaged perceptron algorithm for n_epochs.'''
self.weights = np.zeros(self.features.n_feats)
total = np.zeros(self.features.n_feats)
for epoch in range(n_epochs):
print "Epoch {0}".format(epoch+1)
n_mistakes = 0
n_tokens = 0
n_instances = 0
for instance in self.reader.train_instances:
feats = self.features.create_features(instance)
scores = self.features.compute_scores(feats, self.weights)
if self.projective:
heads_pred = self.decoder.parse_proj(scores)
else:
heads_pred = self.decoder.parse_nonproj(scores)
for m in range(np.size(heads_pred)):
if heads_pred[m] != instance.heads[m]: # mistake
for f in feats[instance.heads[m]][m]:
if f < 0:
continue
self.weights[f] += 1.0
for f in feats[heads_pred[m]][m]:
if f < 0:
continue
self.weights[f] -= 1.0
n_mistakes += 1
n_tokens += 1
n_instances += 1
print "Training accuracy: {0}".format(np.double(n_tokens - n_mistakes)/np.double(n_tokens))
total += self.weights
self.weights = total / np.double(n_epochs)
def train_crf_sgd(self, n_epochs, sigma, eta0 = 0.001):
'''Trains the parser by running the online MaxEnt algorithm for n_epochs, regularization coefficient sigma,
and initial stepsize eta0 (which anneals as O(1/(sigma*t))).'''
self.weights = np.zeros(self.features.n_feats)
t = 0
t0 = 1.0 / (sigma * eta0)
for epoch in range(n_epochs):
print "Epoch {0}".format(epoch+1)
n_mistakes = 0
n_tokens = 0
n_instances = 0
objective = 0.0
for instance in self.reader.train_instances:
eta = 1.0 / (sigma * (t + t0))
feats = self.features.create_features(instance)
scores = self.features.compute_scores(feats, self.weights)
# Compute marginals and log-partition function, and move away from that direction
marginals, logZ = self.decoder.parse_marginals_nonproj(scores)
self.weights -= eta * sigma * self.weights # Scale the weight vector
for h in range(np.size(marginals,0)):
for m in range(1, np.size(marginals,1)):
if feats[h][m] == None:
continue
for f in feats[h][m]:
if f < 0:
continue
self.weights[f] -= eta * marginals[h,m]
# Compute score of the correct parse, and move the weight vector towards that direction.
score_corr = 0.0
for m in range(1, np.size(instance.heads)):
h = instance.heads[m]
score_corr += scores[h,m]
for f in feats[h][m]:
if f < 0:
continue
self.weights[f] += eta
# Compute objective (w.r.t. this instance only)
objective += 0.5 * sigma * np.dot(self.weights,self.weights) - score_corr + logZ
n_instances += 1
t += 1
print "Training objective: {0}".format(objective / n_instances)
def test(self):
n_mistakes = 0
n_tokens = 0
n_instances = 0
arr_heads_pred = [];
for instance in self.reader.test_instances:
feats = self.features.create_features(instance)
scores = self.features.compute_scores(feats, self.weights)
if self.projective:
heads_pred = self.decoder.parse_proj(scores)
else:
heads_pred = self.decoder.parse_nonproj(scores)
for m in range(np.size(heads_pred)):
if heads_pred[m] != instance.heads[m]: # mistake
for f in feats[instance.heads[m]][m]:
if f < 0:
continue
for f in feats[heads_pred[m]][m]:
if f < 0:
continue
n_mistakes += 1
n_tokens += 1
n_instances += 1
arr_heads_pred.append(heads_pred)
print "Test accuracy ({0} test instances): {1}".format(n_instances, np.double(n_tokens - n_mistakes)/np.double(n_tokens))
self.writer.save(self.language, arr_heads_pred)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Juergen Dammers <j.dammers@fz-juelich.de>
#
# License: BSD (3-clause)
from inspect import isfunction
from collections import namedtuple
from copy import deepcopy
from numbers import Integral
from time import time
import math
import os
import json
import numpy as np
from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,
create_ecg_epochs)
from .eog import _find_eog_events, _get_eog_channel_index
from .infomax_ import infomax
from ..cov import compute_whitener
from .. import Covariance, Evoked
from ..io.pick import (pick_types, pick_channels, pick_info,
_picks_to_idx, _get_channel_types, _DATA_CH_TYPES_SPLIT)
from ..io.proj import make_projector
from ..io.write import (write_double_matrix, write_string,
write_name_list, write_int, start_block,
end_block)
from ..io.tree import dir_tree_find
from ..io.open import fiff_open
from ..io.tag import read_tag
from ..io.meas_info import write_meas_info, read_meas_info
from ..io.constants import FIFF
from ..io.base import BaseRaw
from ..io.eeglab.eeglab import _get_info, _check_load_mat
from ..epochs import BaseEpochs
from ..viz import (plot_ica_components, plot_ica_scores,
plot_ica_sources, plot_ica_overlay)
from ..viz.ica import plot_ica_properties
from ..viz.topomap import _plot_corrmap
from ..channels.channels import _contains_ch_type, ContainsMixin
from ..io.write import start_file, end_file, write_id
from ..utils import (check_version, logger, check_fname, verbose,
_reject_data_segments, check_random_state, _validate_type,
compute_corr, _get_inst_data, _ensure_int,
copy_function_doc_to_method_doc, _pl, warn, Bunch,
_check_preload, _check_compensation_grade, fill_doc,
_check_option, _PCA, int_like,
_check_all_same_channel_names)
from ..fixes import _get_args, _safe_svd
from ..filter import filter_data
from .bads import _find_outliers
from .ctps_ import ctps
from ..io.pick import pick_channels_regexp
__all__ = ('ICA', 'ica_find_ecg_events', 'ica_find_eog_events',
'get_score_funcs', 'read_ica', 'read_ica_eeglab')
def _make_xy_sfunc(func, ndim_output=False):
"""Aux function."""
if ndim_output:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])[:, 0]
else:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])
sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])
sfunc.__doc__ = func.__doc__
return sfunc
# Violate our assumption that the output is 1D so can't be used.
# Could eventually be added but probably not worth the effort unless someone
# requests it.
_BLOCKLIST = {'somersd'}
# makes score funcs attr accessible for users
def get_score_funcs():
"""Get the score functions.
Returns
-------
score_funcs : dict
The score functions.
"""
from scipy import stats
from scipy.spatial import distance
score_funcs = Bunch()
xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items()
if isfunction(f) and not n.startswith('_') and
n not in _BLOCKLIST]
xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items()
if isfunction(f) and not n.startswith('_') and
n not in _BLOCKLIST]
score_funcs.update({n: _make_xy_sfunc(f)
for n, f in xy_arg_dist_funcs
if _get_args(f) == ['u', 'v']})
score_funcs.update({n: _make_xy_sfunc(f, ndim_output=True)
for n, f in xy_arg_stats_funcs
if _get_args(f) == ['x', 'y']})
return score_funcs
def _check_for_unsupported_ica_channels(picks, info, allow_ref_meg=False):
"""Check for channels in picks that are not considered valid channels.
Accepted channels are the data channels
('seeg', 'dbs', 'ecog', 'eeg', 'hbo', 'hbr', 'mag', and 'grad'), 'eog'
and 'ref_meg'.
This prevents the program from crashing without
feedback when a bad channel is provided to ICA whitening.
"""
types = _DATA_CH_TYPES_SPLIT + ('eog',)
types += ('ref_meg',) if allow_ref_meg else ()
chs = _get_channel_types(info, picks, unique=True, only_data_chs=False)
check = all([ch in types for ch in chs])
if not check:
raise ValueError('Invalid channel type%s passed for ICA: %s.'
'Only the following types are supported: %s'
% (_pl(chs), chs, types))
_KNOWN_ICA_METHODS = ('fastica', 'infomax', 'picard')
@fill_doc
class ICA(ContainsMixin):
u"""Data decomposition using Independent Component Analysis (ICA).
This object estimates independent components from :class:`mne.io.Raw`,
:class:`mne.Epochs`, or :class:`mne.Evoked` objects. Components can
optionally be removed (for artifact repair) prior to signal reconstruction.
.. warning:: ICA is sensitive to low-frequency drifts and therefore
requires the data to be high-pass filtered prior to fitting.
Typically, a cutoff frequency of 1 Hz is recommended.
Parameters
----------
n_components : int | float | None
Number of principal components (from the pre-whitening PCA step) that
are passed to the ICA algorithm during fitting:
- :class:`int`
Must be greater than 1 and less than or equal to the number of
channels.
- :class:`float` between 0 and 1 (exclusive)
Will select the smallest number of components required to explain
the cumulative variance of the data greater than ``n_components``.
Consider this hypothetical example: we have 3 components, the first
explaining 70%%, the second 20%%, and the third the remaining 10%%
of the variance. Passing 0.8 here (corresponding to 80%% of
explained variance) would yield the first two components,
explaining 90%% of the variance: only by using both components the
requested threshold of 80%% explained variance can be exceeded. The
third component, on the other hand, would be excluded.
- ``None``
``0.999999`` will be used. This is done to avoid numerical
stability problems when whitening, particularly when working with
rank-deficient data.
Defaults to ``None``. The actual number used when executing the
:meth:`ICA.fit` method will be stored in the attribute
``n_components_`` (note the trailing underscore).
.. versionchanged:: 0.22
For a :class:`python:float`, the number of components will account
for *greater than* the given variance level instead of *less than or
equal to* it. The default (None) will also take into account the
rank deficiency of the data.
noise_cov : None | instance of Covariance
Noise covariance used for pre-whitening. If None (default), channels
are scaled to unit variance ("z-standardized") as a group by channel
type prior to the whitening by PCA.
%(random_state)s
As estimation can be non-deterministic it can be useful to fix the
random state to have reproducible results.
method : {'fastica', 'infomax', 'picard'}
The ICA method to use in the fit method. Use the ``fit_params`` argument
to set additional parameters. Specifically, if you want Extended
Infomax, set ``method='infomax'`` and ``fit_params=dict(extended=True)``
(this also works for ``method='picard'``). Defaults to ``'fastica'``.
For reference, see :footcite:`Hyvarinen1999,BellSejnowski1995,LeeEtAl1999,AblinEtAl2018`.
fit_params : dict | None
Additional parameters passed to the ICA estimator as specified by
``method``.
max_iter : int | 'auto'
Maximum number of iterations during fit. If ``'auto'``, it
will set maximum iterations to ``1000`` for ``'fastica'``
and to ``500`` for ``'infomax'`` or ``'picard'``. The actual number of
iterations it took :meth:`ICA.fit` to complete will be stored in the
``n_iter_`` attribute.
allow_ref_meg : bool
Allow ICA on MEG reference channels. Defaults to False.
.. versionadded:: 0.18
%(verbose)s
Attributes
----------
current_fit : str
Flag informing about which data type (raw or epochs) was used for the
fit.
ch_names : list-like
Channel names resulting from initial picking.
n_components_ : int
If fit, the actual number of PCA components used for ICA decomposition.
pre_whitener_ : ndarray, shape (n_channels, 1) or (n_channels, n_channels)
If fit, array used to pre-whiten the data prior to PCA.
pca_components_ : ndarray, shape ``(n_channels, n_channels)``
If fit, the PCA components.
pca_mean_ : ndarray, shape (n_channels,)
If fit, the mean vector used to center the data before doing the PCA.
pca_explained_variance_ : ndarray, shape ``(n_channels,)``
If fit, the variance explained by each PCA component.
mixing_matrix_ : ndarray, shape ``(n_components_, n_components_)``
If fit, the whitened mixing matrix to go back from ICA space to PCA
space.
It is, in combination with the ``pca_components_``, used by
:meth:`ICA.apply` and :meth:`ICA.get_components` to re-mix/project
a subset of the ICA components into the observed channel space.
The former method also removes the pre-whitening (z-scaling) and the
de-meaning.
unmixing_matrix_ : ndarray, shape ``(n_components_, n_components_)``
If fit, the whitened matrix to go from PCA space to ICA space.
Used, in combination with the ``pca_components_``, by the methods
:meth:`ICA.get_sources` and :meth:`ICA.apply` to unmix the observed
data.
exclude : array-like of int
List or np.array of sources indices to exclude when re-mixing the data
in the :meth:`ICA.apply` method, i.e. artifactual ICA components.
The components identified manually and by the various automatic
artifact detection methods should be (manually) appended
(e.g. ``ica.exclude.extend(eog_inds)``).
(There is also an ``exclude`` parameter in the :meth:`ICA.apply`
method.) To scrap all marked components, set this attribute to an empty
list.
info : None | instance of Info
The measurement info copied from the object fitted.
n_samples_ : int
The number of samples used on fit.
labels_ : dict
A dictionary of independent component indices, grouped by types of
independent components. This attribute is set by some of the artifact
detection functions.
n_iter_ : int
If fit, the number of iterations required to complete ICA.
Notes
-----
.. versionchanged:: 0.23
Version 0.23 introduced the ``max_iter='auto'`` settings for maximum
iterations. With version 0.24 ``'auto'`` will be the new
default, replacing the current ``max_iter=200``.
.. versionchanged:: 0.23
Warn if `~mne.Epochs` were baseline-corrected.
.. note:: If you intend to clean fit ICA on `~mne.Epochs`, it is
recommended to high-pass filter, but **not** baseline correct the
data for good ICA performance. A warning will be emitted
otherwise.
A trailing ``_`` in an attribute name signifies that the attribute was
added to the object during fitting, consistent with standard scikit-learn
practice.
ICA :meth:`fit` in MNE proceeds in two steps:
1. :term:`Whitening <whitening>` the data by means of a pre-whitening step
(using ``noise_cov`` if provided, or the standard deviation of each
channel type) and then principal component analysis (PCA).
2. Passing the ``n_components`` largest-variance components to the ICA
algorithm to obtain the unmixing matrix (and by pseudoinversion, the
mixing matrix).
ICA :meth:`apply` then:
1. Unmixes the data with the ``unmixing_matrix_``.
2. Includes ICA components based on ``ica.include`` and ``ica.exclude``.
3. Re-mixes the data with ``mixing_matrix_``.
4. Restores any data not passed to the ICA algorithm, i.e., the PCA
components between ``n_components`` and ``n_pca_components``.
``n_pca_components`` determines how many PCA components will be kept when
reconstructing the data when calling :meth:`apply`. This parameter can be
used for dimensionality reduction of the data, or dealing with low-rank
data (such as those with projections, or MEG data processed by SSS). It is
important to remove any numerically-zero-variance components in the data,
otherwise numerical instability causes problems when computing the mixing
matrix. Alternatively, using ``n_components`` as a float will also avoid
numerical stability problems.
The ``n_components`` parameter determines how many components out of
the ``n_channels`` PCA components the ICA algorithm will actually fit.
This is not typically used for EEG data, but for MEG data, it's common to
use ``n_components < n_channels``. For example, full-rank
306-channel MEG data might use ``n_components=40`` to find (and
later exclude) only large, dominating artifacts in the data, but still
reconstruct the data using all 306 PCA components. Setting
``n_pca_components=40``, on the other hand, would actually reduce the
rank of the reconstructed data to 40, which is typically undesirable.
If you are migrating from EEGLAB and intend to reduce dimensionality via
PCA, similarly to EEGLAB's ``runica(..., 'pca', n)`` functionality,
pass ``n_components=n`` during initialization and then
``n_pca_components=n`` during :meth:`apply`. The resulting reconstructed
data after :meth:`apply` will have rank ``n``.
.. note:: Commonly used for reasons of i) computational efficiency and
ii) additional noise reduction, it is a matter of current debate
whether pre-ICA dimensionality reduction could decrease the
reliability and stability of the ICA, at least for EEG data and
especially during preprocessing :footcite:`ArtoniEtAl2018`.
(But see also :footcite:`Montoya-MartinezEtAl2017` for a
possibly confounding effect of the different whitening/sphering
methods used in this paper (ZCA vs. PCA).)
On the other hand, for rank-deficient data such as EEG data after
average reference or interpolation, it is recommended to reduce
the dimensionality (by 1 for average reference and 1 for each
interpolated channel) for optimal ICA performance (see the
`EEGLAB wiki <eeglab_wiki_>`_).
Caveat! If supplying a noise covariance, keep track of the projections
available in the cov or in the raw object. For example, if you are
interested in EOG or ECG artifacts, EOG and ECG projections should be
temporally removed before fitting ICA, for example::
>> projs, raw.info['projs'] = raw.info['projs'], []
>> ica.fit(raw)
>> raw.info['projs'] = projs
Methods currently implemented are FastICA (default), Infomax, and Picard.
Standard Infomax can be quite sensitive to differences in floating point
arithmetic. Extended Infomax seems to be more stable in this respect,
enhancing reproducibility and stability of results; use Extended Infomax
via ``method='infomax', fit_params=dict(extended=True)``. Allowed entries
in ``fit_params`` are determined by the various algorithm implementations:
see :class:`~sklearn.decomposition.FastICA`, :func:`~picard.picard`,
:func:`~mne.preprocessing.infomax`.
.. note:: Picard can be used to solve the same problems as FastICA,
Infomax, and extended Infomax, but typically converges faster
than either of those methods. To make use of Picard's speed while
still obtaining the same solution as with other algorithms, you
need to specify ``method='picard'`` and ``fit_params`` as a
dictionary with the following combination of keys:
- ``dict(ortho=False, extended=False)`` for Infomax
- ``dict(ortho=False, extended=True)`` for extended Infomax
- ``dict(ortho=True, extended=True)`` for FastICA
Reducing the tolerance (set in ``fit_params``) speeds up estimation at the
cost of consistency of the obtained results. It is difficult to directly
compare tolerance levels between Infomax and Picard, but for Picard and
FastICA a good rule of thumb is ``tol_fastica == tol_picard ** 2``.
.. _eeglab_wiki: https://eeglab.org/tutorials/06_RejectArtifacts/RunICA.html#how-to-deal-with-corrupted-ica-decompositions
References
----------
.. footbibliography::
""" # noqa: E501
@verbose
def __init__(self, n_components=None, *, noise_cov=None,
random_state=None, method='fastica', fit_params=None,
max_iter='auto', allow_ref_meg=False,
verbose=None): # noqa: D102
_validate_type(method, str, 'method')
_validate_type(n_components, (float, 'int-like', None))
if method != 'imported_eeglab': # internal use only
_check_option('method', method, _KNOWN_ICA_METHODS)
if method == 'fastica' and not check_version('sklearn'):
raise ImportError(
'The scikit-learn package is required for method="fastica".')
if method == 'picard' and not check_version('picard'):
raise ImportError(
'The python-picard package is required for method="picard".')
self.noise_cov = noise_cov
for (kind, val) in [('n_components', n_components)]:
if isinstance(val, float) and not 0 < val < 1:
raise ValueError('Selecting ICA components by explained '
'variance needs values between 0.0 and 1.0 '
f'(exclusive), got {kind}={val}')
if isinstance(val, int_like) and val == 1:
raise ValueError(
f'Selecting one component with {kind}={val} is not '
'supported')
self.current_fit = 'unfitted'
self.verbose = verbose
self.n_components = n_components
# In newer ICAs this should always be None, but keep it for
# backward compat with older versions of MNE that used it
self._max_pca_components = None
self.n_pca_components = None
self.ch_names = None
self.random_state = random_state
if fit_params is None:
fit_params = {}
fit_params = deepcopy(fit_params) # avoid side effects
if method == 'fastica':
update = {'algorithm': 'parallel', 'fun': 'logcosh',
'fun_args': None}
fit_params.update({k: v for k, v in update.items() if k
not in fit_params})
elif method == 'infomax':
# extended=True is default in underlying function, but we want
# default False here unless user specified True:
fit_params.setdefault('extended', False)
_validate_type(max_iter, (str, 'int-like'), 'max_iter')
if isinstance(max_iter, str):
_check_option('max_iter', max_iter, ('auto',), 'when str')
if method == 'fastica':
max_iter = 1000
elif method in ['infomax', 'picard']:
max_iter = 500
fit_params.setdefault('max_iter', max_iter)
self.max_iter = max_iter
self.fit_params = fit_params
self.exclude = []
self.info = None
self.method = method
self.labels_ = dict()
self.allow_ref_meg = allow_ref_meg
def __repr__(self):
"""ICA fit information."""
if self.current_fit == 'unfitted':
s = 'no'
elif self.current_fit == 'raw':
s = 'raw data'
else:
s = 'epochs'
s += ' decomposition, '
s += 'fit (%s): %s samples, ' % (self.method,
str(getattr(self, 'n_samples_', '')))
s += ('%s components' % str(self.n_components_) if
hasattr(self, 'n_components_') else
'no dimension reduction')
if self.info is not None:
ch_fit = ['"%s"' % c for c in _DATA_CH_TYPES_SPLIT if c in self]
s += ', channels used: {}'.format('; '.join(ch_fit))
if self.exclude:
s += ', %i sources marked for exclusion' % len(self.exclude)
return '<ICA | %s>' % s
@verbose
def fit(self, inst, picks=None, start=None, stop=None, decim=None,
reject=None, flat=None, tstep=2.0, reject_by_annotation=True,
verbose=None):
"""Run the ICA decomposition on raw data.
Caveat! If supplying a noise covariance keep track of the projections
available in the cov, the raw or the epochs object. For example,
if you are interested in EOG or ECG artifacts, EOG and ECG projections
should be temporally removed before fitting the ICA.
Parameters
----------
inst : instance of Raw or Epochs
The data to be decomposed.
%(picks_good_data_noref)s
This selection remains throughout the initialized ICA solution.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
decim : int | None
Increment for selecting each nth time slice. If None, all samples
within ``start`` and ``stop`` are used.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'dbs', 'ecog', 'eog',
'ecg', 'hbo', 'hbr'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
It only applies if ``inst`` is of type Raw.
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'dbs', 'ecog', 'eog',
'ecg', 'hbo', 'hbr'.
Values are floats that set the minimum acceptable peak-to-peak
amplitude. If flat is None then no rejection is done.
It only applies if ``inst`` is of type Raw.
tstep : float
Length of data chunks for artifact rejection in seconds.
It only applies if ``inst`` is of type Raw.
%(reject_by_annotation_raw)s
.. versionadded:: 0.14.0
%(verbose_meth)s
Returns
-------
self : instance of ICA
Returns the modified instance.
"""
_validate_type(inst, (BaseRaw, BaseEpochs), 'inst', 'Raw or Epochs')
if np.isclose(inst.info['highpass'], 0.):
warn('The data has not been high-pass filtered. For good ICA '
'performance, it should be high-pass filtered (e.g., with a '
'1.0 Hz lower bound) before fitting ICA.')
if isinstance(inst, BaseEpochs) and inst.baseline is not None:
warn('The epochs you passed to ICA.fit() were baseline-corrected. '
'However, we suggest to fit ICA only on data that has been '
'high-pass filtered, but NOT baseline-corrected.')
picks = _picks_to_idx(inst.info, picks, allow_empty=False,
with_ref_meg=self.allow_ref_meg)
_check_for_unsupported_ica_channels(
picks, inst.info, allow_ref_meg=self.allow_ref_meg)
# Actually start fitting
t_start = time()
if self.current_fit != 'unfitted':
self._reset()
logger.info('Fitting ICA to data using %i channels '
'(please be patient, this may take a while)' % len(picks))
# n_components could be float 0 < x < 1, but that's okay here
if self.n_components is not None and self.n_components > len(picks):
raise ValueError(
f'ica.n_components ({self.n_components}) cannot '
f'be greater than len(picks) ({len(picks)})')
# filter out all the channels the raw wouldn't have initialized
self.info = pick_info(inst.info, picks)
if self.info['comps']:
self.info['comps'] = []
self.ch_names = self.info['ch_names']
if isinstance(inst, BaseRaw):
self._fit_raw(inst, picks, start, stop, decim, reject, flat,
tstep, reject_by_annotation, verbose)
else:
assert isinstance(inst, BaseEpochs)
self._fit_epochs(inst, picks, decim, verbose)
# sort ICA components by explained variance
var = _ica_explained_variance(self, inst)
var_ord = var.argsort()[::-1]
_sort_components(self, var_ord, copy=False)
t_stop = time()
logger.info("Fitting ICA took {:.1f}s.".format(t_stop - t_start))
return self
def _reset(self):
"""Aux method."""
for key in ('pre_whitener_', 'unmixing_matrix_', 'mixing_matrix_',
'n_components_', 'n_samples_', 'pca_components_',
'pca_explained_variance_',
'pca_mean_', 'n_iter_', 'drop_inds_', 'reject_'):
if hasattr(self, key):
delattr(self, key)
def _fit_raw(self, raw, picks, start, stop, decim, reject, flat, tstep,
reject_by_annotation, verbose):
"""Aux method."""
start, stop = _check_start_stop(raw, start, stop)
reject_by_annotation = 'omit' if reject_by_annotation else None
# this will be a copy
data = raw.get_data(picks, start, stop, reject_by_annotation)
# this will be a view
if decim is not None:
data = data[:, ::decim]
# this will make a copy
if (reject is not None) or (flat is not None):
self.reject_ = reject
data, self.drop_inds_ = _reject_data_segments(data, reject, flat,
decim, self.info,
tstep)
self.n_samples_ = data.shape[1]
self._fit(data, 'raw')
return self
def _fit_epochs(self, epochs, picks, decim, verbose):
"""Aux method."""
if epochs.events.size == 0:
raise RuntimeError('Tried to fit ICA with epochs, but none were '
'found: epochs.events is "{}".'
.format(epochs.events))
# this should be a copy (picks a list of int)
data = epochs.get_data()[:, picks]
# this will be a view
if decim is not None:
data = data[:, :, ::decim]
self.n_samples_ = data.shape[0] * data.shape[2]
# This will make at least one copy (one from hstack, maybe one
# more from _pre_whiten)
data = np.hstack(data)
self._fit(data, 'epochs')
return self
def _compute_pre_whitener(self, data):
"""Aux function."""
data = self._do_proj(data, log_suffix='(pre-whitener computation)')
if self.noise_cov is None:
# use standardization as whitener
# Scale (z-score) the data by channel type
info = self.info
pre_whitener = np.empty([len(data), 1])
for ch_type in _DATA_CH_TYPES_SPLIT + ('eog', "ref_meg"):
if _contains_ch_type(info, ch_type):
if ch_type == 'seeg':
this_picks = pick_types(info, meg=False, seeg=True)
elif ch_type == 'dbs':
this_picks = pick_types(info, meg=False, dbs=True)
elif ch_type == 'ecog':
this_picks = pick_types(info, meg=False, ecog=True)
elif ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
elif ch_type in ('mag', 'grad'):
this_picks = pick_types(info, meg=ch_type)
elif ch_type == 'eog':
this_picks = pick_types(info, meg=False, eog=True)
elif ch_type in ('hbo', 'hbr'):
this_picks = pick_types(info, meg=False, fnirs=ch_type)
elif ch_type == 'ref_meg':
this_picks = pick_types(info, meg=False, ref_meg=True)
else:
raise RuntimeError('Should not be reached.'
'Unsupported channel {}'
.format(ch_type))
pre_whitener[this_picks] = np.std(data[this_picks])
else:
pre_whitener, _ = compute_whitener(self.noise_cov, self.info)
assert data.shape[0] == pre_whitener.shape[1]
self.pre_whitener_ = pre_whitener
def _do_proj(self, data, log_suffix=''):
if self.info is not None and self.info['projs']:
proj, nproj, _ = make_projector(
[p for p in self.info['projs'] if p['active']],
self.info['ch_names'], include_active=True)
if nproj:
logger.info(
f' Applying projection operator with {nproj} '
f'vector{_pl(nproj)}'
f'{" " if log_suffix else ""}{log_suffix}')
if self.noise_cov is None: # otherwise it's in pre_whitener_
data = proj @ data
return data
def _pre_whiten(self, data):
data = self._do_proj(data, log_suffix='(pre-whitener application)')
if self.noise_cov is None:
data /= self.pre_whitener_
else:
data = self.pre_whitener_ @ data
return data
def _fit(self, data, fit_type):
"""Aux function."""
random_state = check_random_state(self.random_state)
n_channels, n_samples = data.shape
self._compute_pre_whitener(data)
data = self._pre_whiten(data)
pca = _PCA(n_components=self._max_pca_components, whiten=True)
data = pca.fit_transform(data.T)
use_ev = pca.explained_variance_ratio_
n_pca = self.n_pca_components
if isinstance(n_pca, float):
n_pca = int(_exp_var_ncomp(use_ev, n_pca)[0])
elif n_pca is None:
n_pca = len(use_ev)
assert isinstance(n_pca, (int, np.int_))
# If user passed a float, select the PCA components explaining the
# given cumulative variance. This information will later be used to
# only submit the corresponding parts of the data to ICA.
if self.n_components is None:
# None case: check if n_pca_components or 0.999999 yields smaller
msg = 'Selecting by non-zero PCA components'
self.n_components_ = min(
n_pca, _exp_var_ncomp(use_ev, 0.999999)[0])
elif isinstance(self.n_components, float):
self.n_components_, ev = _exp_var_ncomp(use_ev, self.n_components)
if self.n_components_ == 1:
raise RuntimeError(
'One PCA component captures most of the '
f'explained variance ({100 * ev}%), your threshold '
'results in 1 component. You should select '
'a higher value.')
msg = 'Selecting by explained variance'
else:
msg = 'Selecting by number'
self.n_components_ = _ensure_int(self.n_components)
# check to make sure something okay happened
if self.n_components_ > n_pca:
ev = np.cumsum(use_ev)
ev /= ev[-1]
evs = 100 * ev[[self.n_components_ - 1, n_pca - 1]]
raise RuntimeError(
f'n_components={self.n_components} requires '
f'{self.n_components_} PCA values (EV={evs[0]:0.1f}%) but '
f'n_pca_components ({self.n_pca_components}) results in '
f'only {n_pca} components (EV={evs[1]:0.1f}%)')
logger.info('%s: %s components' % (msg, self.n_components_))
# the things to store for PCA
self.pca_mean_ = pca.mean_
self.pca_components_ = pca.components_
self.pca_explained_variance_ = pca.explained_variance_
del pca
# update number of components
self._update_ica_names()
if self.n_pca_components is not None and \
self.n_pca_components > len(self.pca_components_):
raise ValueError(
f'n_pca_components ({self.n_pca_components}) is greater than '
f'the number of PCA components ({len(self.pca_components_)})')
# take care of ICA
sel = slice(0, self.n_components_)
if self.method == 'fastica':
from sklearn.decomposition import FastICA
ica = FastICA(
whiten=False, random_state=random_state, **self.fit_params)
ica.fit(data[:, sel])
self.unmixing_matrix_ = ica.components_
self.n_iter_ = ica.n_iter_
elif self.method in ('infomax', 'extended-infomax'):
unmixing_matrix, n_iter = infomax(
data[:, sel], random_state=random_state, return_n_iter=True,
**self.fit_params)
self.unmixing_matrix_ = unmixing_matrix
self.n_iter_ = n_iter
del unmixing_matrix, n_iter
elif self.method == 'picard':
from picard import picard
_, W, _, n_iter = picard(
data[:, sel].T, whiten=False, return_n_iter=True,
random_state=random_state, **self.fit_params)
self.unmixing_matrix_ = W
self.n_iter_ = n_iter + 1 # picard() starts counting at 0
del _, n_iter
assert self.unmixing_matrix_.shape == (self.n_components_,) * 2
norms = self.pca_explained_variance_
stable = norms / norms[0] > 1e-6 # to be stable during pinv
norms = norms[:self.n_components_]
if not stable[self.n_components_ - 1]:
max_int = np.where(stable)[0][-1] + 1
warn(f'Using n_components={self.n_components} (resulting in '
f'n_components_={self.n_components_}) may lead to an '
f'unstable mixing matrix estimation because the ratio '
f'between the largest ({norms[0]:0.2g}) and smallest '
f'({norms[-1]:0.2g}) variances is too large (> 1e6); '
f'consider setting n_components=0.999999 or an '
f'integer <= {max_int}')
norms = np.sqrt(norms)
norms[norms == 0] = 1.
self.unmixing_matrix_ /= norms # whitening
self._update_mixing_matrix()
self.current_fit = fit_type
def _update_mixing_matrix(self):
from scipy import linalg
self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
def _update_ica_names(self):
"""Update ICA names when n_components_ is set."""
self._ica_names = ['ICA%03d' % ii for ii in range(self.n_components_)]
def _transform(self, data):
"""Compute sources from data (operates inplace)."""
data = self._pre_whiten(data)
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
# Apply unmixing
pca_data = np.dot(self.unmixing_matrix_,
self.pca_components_[:self.n_components_])
# Apply PCA
sources = np.dot(pca_data, data)
return sources
def _transform_raw(self, raw, start, stop, reject_by_annotation=False):
"""Transform raw data."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
if len(picks) != len(self.ch_names):
raise RuntimeError('Raw doesn\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Raw compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
reject = 'omit' if reject_by_annotation else None
data = raw.get_data(picks, start, stop, reject)
return self._transform(data)
def _transform_epochs(self, epochs, concatenate):
"""Aux method."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
picks = pick_types(epochs.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
data = np.hstack(epochs.get_data()[:, picks])
sources = self._transform(data)
if not concatenate:
# Put the data back in 3D
sources = np.array(np.split(sources, len(epochs.events), 1))
return sources
def _transform_evoked(self, evoked):
"""Aux method."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
picks = pick_types(evoked.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked doesn\'t match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide Evoked compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
sources = self._transform(evoked.data[picks])
return sources
def get_components(self):
"""Get ICA topomap for components as numpy arrays.
Returns
-------
components : array, shape (n_channels, n_components)
The ICA components (maps).
"""
return np.dot(self.mixing_matrix_[:, :self.n_components_].T,
self.pca_components_[:self.n_components_]).T
def get_sources(self, inst, add_channels=None, start=None, stop=None):
"""Estimate sources given the unmixing matrix.
This method will return the sources in the container format passed.
Typical usecases:
1. pass Raw object to use `raw.plot <mne.io.Raw.plot>` for ICA sources
2. pass Epochs object to compute trial-based statistics in ICA space
3. pass Evoked object to investigate time-locking in ICA space
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from and to represent sources in.
add_channels : None | list of str
Additional channels to be added. Useful to e.g. compare sources
with some reference. Defaults to None.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
Returns
-------
sources : instance of Raw, Epochs or Evoked
The ICA sources time series.
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Raw',
ch_names=self.ch_names)
sources = self._sources_as_raw(inst, add_channels, start, stop)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Epochs',
ch_names=self.ch_names)
sources = self._sources_as_epochs(inst, add_channels, False)
elif isinstance(inst, Evoked):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Evoked',
ch_names=self.ch_names)
sources = self._sources_as_evoked(inst, add_channels)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
return sources
def _sources_as_raw(self, raw, add_channels, start, stop):
"""Aux method."""
# merge copied instance and picked data with sources
start, stop = _check_start_stop(raw, start, stop)
data_ = self._transform_raw(raw, start=start, stop=stop)
assert data_.shape[1] == stop - start
if raw.preload: # get data and temporarily delete
data = raw._data
del raw._data
out = raw.copy() # copy and reappend
if raw.preload:
raw._data = data
# populate copied raw.
if add_channels is not None and len(add_channels):
picks = pick_channels(raw.ch_names, add_channels)
data_ = np.concatenate([
data_, raw.get_data(picks, start=start, stop=stop)])
out._data = data_
out._filenames = [None]
out.preload = True
out._first_samps[:] = [out.first_samp + start]
out._last_samps[:] = [out.first_samp + data_.shape[1] - 1]
out._projector = None
self._export_info(out.info, raw, add_channels)
return out
def _sources_as_epochs(self, epochs, add_channels, concatenate):
"""Aux method."""
out = epochs.copy()
sources = self._transform_epochs(epochs, concatenate)
if add_channels is not None:
picks = [epochs.ch_names.index(k) for k in add_channels]
else:
picks = []
out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
axis=1) if len(picks) > 0 else sources
self._export_info(out.info, epochs, add_channels)
out.preload = True
out._raw = None
out._projector = None
return out
def _sources_as_evoked(self, evoked, add_channels):
"""Aux method."""
if add_channels is not None:
picks = [evoked.ch_names.index(k) for k in add_channels]
else:
picks = []
sources = self._transform_evoked(evoked)
if len(picks) > 1:
data = np.r_[sources, evoked.data[picks]]
else:
data = sources
out = evoked.copy()
out.data = data
self._export_info(out.info, evoked, add_channels)
return out
def _export_info(self, info, container, add_channels):
"""Aux method."""
# set channel names and info
ch_names = []
ch_info = info['chs'] = []
for ii, name in enumerate(self._ica_names):
ch_names.append(name)
ch_info.append(dict(
ch_name=name, cal=1, logno=ii + 1,
coil_type=FIFF.FIFFV_COIL_NONE, kind=FIFF.FIFFV_MISC_CH,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_NONE,
loc=np.zeros(12, dtype='f4'),
range=1.0, scanno=ii + 1, unit_mul=0))
if add_channels is not None:
# re-append additionally picked ch_names
ch_names += add_channels
# re-append additionally picked ch_info
ch_info += [k for k in container.info['chs'] if k['ch_name'] in
add_channels]
info['bads'] = [ch_names[k] for k in self.exclude]
info['projs'] = [] # make sure projections are removed.
info._update_redundant()
info._check_consistency()
@verbose
def score_sources(self, inst, target=None, score_func='pearsonr',
start=None, stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, verbose=None):
"""Assign score to components based on statistic or metric.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The object to reconstruct the sources from.
target : array-like | str | None
Signal to which the sources shall be compared. It has to be of
the same shape as the sources. If str, a routine will try to find
a matching channel name. If None, a score
function expecting only one input-array argument must be used,
for instance, scipy.stats.skew (default).
score_func : callable | str
Callable taking as arguments either two input arrays
(e.g. Pearson correlation) or one input
array (e. g. skewness) and returns a float. For convenience the
most common score_funcs are available via string labels:
Currently, all distance metrics from scipy.spatial and All
functions from scipy.stats taking compatible input arguments are
supported. These function have been modified to support iteration
over the rows of a 2D array.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
%(verbose_meth)s
Returns
-------
scores : ndarray
Scores for each source as returned from score_func.
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Raw',
ch_names=self.ch_names)
sources = self._transform_raw(inst, start, stop,
reject_by_annotation)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Epochs',
ch_names=self.ch_names)
sources = self._transform_epochs(inst, concatenate=True)
elif isinstance(inst, Evoked):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Evoked',
ch_names=self.ch_names)
sources = self._transform_evoked(inst)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
if target is not None: # we can have univariate metrics without target
target = self._check_target(target, inst, start, stop,
reject_by_annotation)
if sources.shape[-1] != target.shape[-1]:
raise ValueError('Sources and target do not have the same '
'number of time slices.')
# auto target selection
if isinstance(inst, BaseRaw):
# We pass inst, not self, because the sfreq of the data we
# use for scoring components can be different:
sources, target = _band_pass_filter(inst, sources, target,
l_freq, h_freq)
scores = _find_sources(sources, target, score_func)
return scores
def _check_target(self, target, inst, start, stop,
reject_by_annotation=False):
"""Aux Method."""
if isinstance(inst, BaseRaw):
reject_by_annotation = 'omit' if reject_by_annotation else None
start, stop = _check_start_stop(inst, start, stop)
if hasattr(target, 'ndim'):
if target.ndim < 2:
target = target.reshape(1, target.shape[-1])
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.get_data(pick, start, stop, reject_by_annotation)
elif isinstance(inst, BaseEpochs):
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.get_data()[:, pick]
if hasattr(target, 'ndim'):
if target.ndim == 3 and min(target.shape) == 1:
target = target.ravel()
elif isinstance(inst, Evoked):
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.data[pick]
return target
def _find_bads_ch(self, inst, chs, threshold=3.0, start=None,
stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, prefix='chs',
measure='zscore'):
"""Compute ExG/ref components.
See find_bads_ecg, find_bads_eog, and find_bads_ref for details.
"""
scores, idx = [], []
# some magic we need inevitably ...
# get targets before equalizing
targets = [self._check_target(
ch, inst, start, stop, reject_by_annotation) for ch in chs]
# assign names, if targets are arrays instead of strings
target_names = []
for ch in chs:
if not isinstance(ch, str):
if prefix == "ecg":
target_names.append('ECG-MAG')
else:
target_names.append(prefix)
else:
target_names.append(ch)
for ii, (ch, target) in enumerate(zip(target_names, targets)):
scores += [self.score_sources(
inst, target=target, score_func='pearsonr', start=start,
stop=stop, l_freq=l_freq, h_freq=h_freq,
reject_by_annotation=reject_by_annotation)]
# pick last scores
if measure == "zscore":
this_idx = _find_outliers(scores[-1], threshold=threshold)
elif measure == "correlation":
this_idx = np.where(abs(scores[-1]) > threshold)[0]
else:
raise ValueError("Unknown measure {}".format(measure))
idx += [this_idx]
self.labels_['%s/%i/' % (prefix, ii) + ch] = list(this_idx)
# remove duplicates but keep order by score, even across multiple
# ref channels
scores_ = np.concatenate([scores[ii][inds]
for ii, inds in enumerate(idx)])
idx_ = np.concatenate(idx)[np.abs(scores_).argsort()[::-1]]
idx_unique = list(np.unique(idx_))
idx = []
for i in idx_:
if i in idx_unique:
idx.append(i)
idx_unique.remove(i)
if len(scores) == 1:
scores = scores[0]
labels = list(idx)
return labels, scores
def _get_ctps_threshold(self, pk_threshold=20):
"""Automatically decide the threshold of Kuiper index for CTPS method.
This function finds the threshold of Kuiper index based on the
threshold of pk. Kuiper statistic that minimizes the difference between
pk and the pk threshold (defaults to 20 [1]) is returned. It is assumed
that the data are appropriately filtered and bad data are rejected at
least based on peak-to-peak amplitude when/before running the ICA
decomposition on data.
References
----------
[1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
and phase statistics for complete artifact removal in independent
components of neuromagnetic recordings. Biomedical
Engineering, IEEE Transactions on 55 (10), pp.2356.
"""
N = self.info['sfreq']
Vs = np.arange(1, 100) / 100
C = math.sqrt(N) + 0.155 + 0.24 / math.sqrt(N)
# in formula (13), when k gets large, only k=1 matters for the
# summation. k*V*C thus becomes V*C
Pks = 2 * (4 * (Vs * C)**2 - 1) * (np.exp(-2 * (Vs * C)**2))
# NOTE: the threshold of pk is transformed to Pk for comparison
# pk = -log10(Pk)
return Vs[np.argmin(np.abs(Pks - 10**(-pk_threshold)))]
@verbose
def find_bads_ecg(self, inst, ch_name=None, threshold='auto', start=None,
stop=None, l_freq=8, h_freq=16, method='ctps',
reject_by_annotation=True, measure='zscore',
verbose=None):
"""Detect ECG related components.
Cross-trial phase statistics (default) or Pearson correlation can be
used for detection.
.. note:: If no ECG channel is available, routine attempts to create
an artificial ECG based on cross-channel averaging.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for ECG peak detection.
The argument is mandatory if the dataset contains no ECG
channels.
threshold : float | str
The value above which a feature is classified as outlier. If 'auto'
and method is 'ctps', automatically compute the threshold. If
'auto' and method is 'correlation', defaults to 3.0. The default
translates to 0.25 for 'ctps' and 3.0 for 'correlation' in version
0.21 but will change to 'auto' in version 0.22.
.. versionchanged:: 0.21
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
method : {'ctps', 'correlation'}
The method used for detection. If 'ctps', cross-trial phase
statistics [1] are used to detect ECG related components.
Thresholding is then based on the significance value of a Kuiper
statistic.
If 'correlation', detection is based on Pearson correlation
between the filtered data and the filtered ECG channel.
Thresholding is based on iterative z-scoring. The above
threshold components will be masked and the z-score will
be recomputed until no supra-threshold component remains.
Defaults to 'ctps'.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
measure : 'zscore' | 'correlation'
Which method to use for finding outliers. ``'zscore'`` (default) is
the iterated Z-scoring method, and ``'correlation'`` is an absolute
raw correlation threshold with a range of 0 to 1.
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
ecg_idx : list of int
The indices of ECG-related components.
scores : np.ndarray of float, shape (``n_components_``)
If method is 'ctps', the normalized Kuiper index scores. If method
is 'correlation', the correlation scores.
See Also
--------
find_bads_eog, find_bads_ref
References
----------
[1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
and phase statistics for complete artifact removal in independent
components of neuromagnetic recordings. Biomedical
Engineering, IEEE Transactions on 55 (10), 2353-2362.
"""
idx_ecg = _get_ecg_channel_index(ch_name, inst)
if idx_ecg is None:
ecg, times = _make_ecg(inst, start, stop,
reject_by_annotation=reject_by_annotation)
else:
ecg = inst.ch_names[idx_ecg]
_validate_type(threshold, (str, 'numeric'), 'threshold')
if isinstance(threshold, str):
_check_option('threshold', threshold, ('auto',), extra='when str')
if method == 'ctps':
if threshold == 'auto':
threshold = self._get_ctps_threshold()
logger.info('Using threshold: %.2f for CTPS ECG detection'
% threshold)
if isinstance(inst, BaseRaw):
sources = self.get_sources(create_ecg_epochs(
inst, ch_name, l_freq=l_freq, h_freq=h_freq,
keep_ecg=False,
reject_by_annotation=reject_by_annotation)).get_data()
if sources.shape[0] == 0:
warn('No ECG activity detected. Consider changing '
'the input parameters.')
elif isinstance(inst, BaseEpochs):
sources = self.get_sources(inst).get_data()
else:
raise ValueError('With `ctps` only Raw and Epochs input is '
'supported')
_, p_vals, _ = ctps(sources)
scores = p_vals.max(-1)
ecg_idx = np.where(scores >= threshold)[0]
# sort indices by scores
ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]
self.labels_['ecg'] = list(ecg_idx)
if ch_name is None:
ch_name = 'ECG-MAG'
self.labels_['ecg/%s' % ch_name] = list(ecg_idx)
elif method == 'correlation':
if threshold == 'auto':
threshold = 3.0
self.labels_['ecg'], scores = self._find_bads_ch(
inst, [ecg], threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix="ecg",
reject_by_annotation=reject_by_annotation, measure=measure)
else:
raise ValueError('Method "%s" not supported.' % method)
return self.labels_['ecg'], scores
@verbose
def find_bads_ref(self, inst, ch_name=None, threshold=3.0, start=None,
stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, method='together',
measure="zscore", verbose=None):
"""Detect MEG reference related components using correlation.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from. Should contain at least one channel
i.e. component derived from MEG reference channels.
ch_name : list of str
Which MEG reference components to use. If None, then all channels
that begin with REF_ICA.
threshold : int | float
The value above which a feature is classified as outlier.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
method : 'together' | 'separate'
Method to use to identify reference channel related components.
Defaults to ``'together'``. See notes.
.. versionadded:: 0.21
measure : 'zscore' | 'correlation'
Which method to use for finding outliers. ``'zscore'`` (default) is
the iterated Z-scoring method, and ``'correlation'`` is an absolute
raw correlation threshold with a range of 0 to 1.
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
ref_idx : list of int
The indices of MEG reference related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg, find_bads_eog
Notes
-----
ICA decomposition on MEG reference channels is used to assess external
magnetic noise and remove it from the MEG. Two methods are supported:
With the "together" method, only one ICA fit is used, which
encompasses both MEG and reference channels together. Components which
have particularly strong weights on the reference channels may be
thresholded and marked for removal.
With "separate," selected components from a separate ICA decomposition
on the reference channels are used as a ground truth for identifying
bad components in an ICA fit done on MEG channels only. The logic here
is similar to an EOG/ECG, with reference components replacing the
EOG/ECG channels. Recommended procedure is to perform ICA separately
on reference channels, extract them using .get_sources(), and then
append them to the inst using :meth:`~mne.io.Raw.add_channels`,
preferably with the prefix ``REF_ICA`` so that they can be
automatically detected.
Thresholding in both cases is based on adaptive z-scoring:
The above-threshold components will be masked and the z-score will be
recomputed until no supra-threshold component remains.
Validation and further documentation for this technique can be found
in :footcite:`HannaEtAl2020`.
.. versionadded:: 0.18
References
----------
.. footbibliography::
"""
if method == "separate":
if not ch_name:
inds = pick_channels_regexp(inst.ch_names, 'REF_ICA*')
else:
inds = pick_channels(inst.ch_names, ch_name)
# regexp returns list, pick_channels returns numpy
inds = list(inds)
if not inds:
raise ValueError('No valid channels available.')
ref_chs = [inst.ch_names[k] for k in inds]
self.labels_['ref_meg'], scores = self._find_bads_ch(
inst, ref_chs, threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix='ref_meg',
reject_by_annotation=reject_by_annotation,
measure=measure)
elif method == 'together':
meg_picks = pick_types(self.info, meg=True, ref_meg=False)
ref_picks = pick_types(self.info, meg=False, ref_meg=True)
if not any(meg_picks) or not any(ref_picks):
raise ValueError('ICA solution must contain both reference and\
MEG channels.')
weights = self.get_components()
# take norm of component weights on reference channels for each
# component, divide them by the norm on the standard channels,
# log transform to approximate normal distribution
normrats = np.linalg.norm(weights[ref_picks],
axis=0) / np.linalg.norm(weights[meg_picks], # noqa
axis=0)
scores = np.log(normrats)
self.labels_['ref_meg'] = list(_find_outliers(scores,
threshold=threshold,
tail=1))
else:
raise ValueError('Method "%s" not supported.' % method)
return self.labels_['ref_meg'], scores
@verbose
def find_bads_eog(self, inst, ch_name=None, threshold=3.0, start=None,
stop=None, l_freq=1, h_freq=10,
reject_by_annotation=True, measure='zscore',
verbose=None):
"""Detect EOG related components using correlation.
Detection is based on Pearson correlation between the
filtered data and the filtered EOG channel.
Thresholding is based on adaptive z-scoring. The above threshold
components will be masked and the z-score will be recomputed
until no supra-threshold component remains.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for EOG peak detection.
The argument is mandatory if the dataset contains no EOG
channels.
threshold : int | float
The value above which a feature is classified as outlier.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
measure : 'zscore' | 'correlation'
Which method to use for finding outliers. ``'zscore'`` (default) is
the iterated Z-scoring method, and ``'correlation'`` is an absolute
raw correlation threshold with a range of 0 to 1.
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
eog_idx : list of int
The indices of EOG related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg, find_bads_ref
"""
eog_inds = _get_eog_channel_index(ch_name, inst)
eog_chs = [inst.ch_names[k] for k in eog_inds]
self.labels_['eog'], scores = self._find_bads_ch(
inst, eog_chs, threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix="eog",
reject_by_annotation=reject_by_annotation, measure=measure)
return self.labels_['eog'], scores
@verbose
def apply(self, inst, include=None, exclude=None, n_pca_components=None,
start=None, stop=None, verbose=None):
"""Remove selected components from the signal.
Given the unmixing matrix, transform the data,
zero out all excluded components, and inverse-transform the data.
This procedure will reconstruct M/EEG signals from which
the dynamics described by the excluded components is subtracted.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The data to be processed (i.e., cleaned). It will be modified
in-place.
include : array_like of int
The indices referring to columns in the ummixing matrix. The
components to be kept.
exclude : array_like of int
The indices referring to columns in the ummixing matrix. The
components to be zeroed out.
%(n_pca_components_apply)s
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
%(verbose_meth)s
Returns
-------
out : instance of Raw, Epochs or Evoked
The processed data.
Notes
-----
.. note:: Applying ICA may introduce a DC shift. If you pass
baseline-corrected `~mne.Epochs` or `~mne.Evoked` data,
the baseline period of the cleaned data may not be of
zero mean anymore. If you require baseline-corrected
data, apply baseline correction again after cleaning
via ICA. A warning will be emitted to remind you of this
fact if you pass baseline-corrected data.
.. versionchanged:: 0.23
Warn if instance was baseline-corrected.
"""
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked), 'inst',
'Raw, Epochs, or Evoked')
kwargs = dict(include=include, exclude=exclude,
n_pca_components=n_pca_components)
if isinstance(inst, BaseRaw):
kind, meth = 'Raw', self._apply_raw
kwargs.update(raw=inst, start=start, stop=stop)
elif isinstance(inst, BaseEpochs):
kind, meth = 'Epochs', self._apply_epochs
kwargs.update(epochs=inst)
else: # isinstance(inst, Evoked):
kind, meth = 'Evoked', self._apply_evoked
kwargs.update(evoked=inst)
_check_compensation_grade(self.info, inst.info, 'ICA', kind,
ch_names=self.ch_names)
if isinstance(inst, (BaseEpochs, Evoked)):
if getattr(inst, 'baseline', None) is not None:
warn('The data you passed to ICA.apply() was '
'baseline-corrected. Please note that ICA can introduce '
'DC shifts, therefore you may wish to consider '
'baseline-correcting the cleaned data again.')
logger.info(f'Applying ICA to {kind} instance')
return meth(**kwargs)
def _check_exclude(self, exclude):
if exclude is None:
return list(set(self.exclude))
else:
# Allow both self.exclude and exclude to be array-like:
return list(set(self.exclude).union(set(exclude)))
def _apply_raw(self, raw, include, exclude, n_pca_components, start, stop):
"""Aux method."""
_check_preload(raw, "ica.apply")
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, meg=False, include=self.ch_names,
exclude='bads', ref_meg=False)
data = raw[picks, start:stop][0]
data = self._pick_sources(data, include, exclude, n_pca_components)
raw[picks, start:stop] = data
return raw
def _apply_epochs(self, epochs, include, exclude, n_pca_components):
"""Aux method."""
_check_preload(epochs, "ica.apply")
picks = pick_types(epochs.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
data = np.hstack(epochs.get_data(picks))
data = self._pick_sources(data, include, exclude, n_pca_components)
# restore epochs, channels, tsl order
epochs._data[:, picks] = np.array(
np.split(data, len(epochs.events), 1))
epochs.preload = True
return epochs
def _apply_evoked(self, evoked, include, exclude, n_pca_components):
"""Aux method."""
picks = pick_types(evoked.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where evoked come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked does not match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide an Evoked object that\'s compatible '
'with ica.ch_names' % (len(self.ch_names),
len(picks)))
data = evoked.data[picks]
data = self._pick_sources(data, include, exclude, n_pca_components)
# restore evoked
evoked.data[picks] = data
return evoked
def _pick_sources(self, data, include, exclude, n_pca_components):
"""Aux function."""
if n_pca_components is None:
n_pca_components = self.n_pca_components
data = self._pre_whiten(data)
exclude = self._check_exclude(exclude)
_n_pca_comp = self._check_n_pca_components(n_pca_components)
n_ch, _ = data.shape
max_pca_components = self.pca_components_.shape[0]
if not self.n_components_ <= _n_pca_comp <= max_pca_components:
raise ValueError(
f'n_pca_components ({_n_pca_comp}) must be >= '
f'n_components_ ({self.n_components_}) and <= '
'the total number of PCA components '
f'({max_pca_components}).')
logger.info(f' Transforming to ICA space ({self.n_components_} '
f'component{_pl(self.n_components_)})')
# Apply first PCA
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
sel_keep = np.arange(self.n_components_)
if include not in (None, []):
sel_keep = np.unique(include)
elif exclude not in (None, []):
sel_keep = np.setdiff1d(np.arange(self.n_components_), exclude)
n_zero = self.n_components_ - len(sel_keep)
logger.info(f' Zeroing out {n_zero} ICA component{_pl(n_zero)}')
# Mixing and unmixing should both be shape (self.n_components_, 2),
# and we need to put these into the upper left part of larger mixing
# and unmixing matrices of shape (n_ch, _n_pca_comp)
pca_components = self.pca_components_[:_n_pca_comp]
assert pca_components.shape == (_n_pca_comp, n_ch)
assert self.unmixing_matrix_.shape == \
self.mixing_matrix_.shape == \
(self.n_components_,) * 2
unmixing = np.eye(_n_pca_comp)
unmixing[:self.n_components_, :self.n_components_] = \
self.unmixing_matrix_
unmixing = np.dot(unmixing, pca_components)
logger.info(f' Projecting back using {_n_pca_comp} '
f'PCA component{_pl(_n_pca_comp)}')
mixing = np.eye(_n_pca_comp)
mixing[:self.n_components_, :self.n_components_] = \
self.mixing_matrix_
mixing = pca_components.T @ mixing
assert mixing.shape == unmixing.shape[::-1] == (n_ch, _n_pca_comp)
# keep requested components plus residuals (if any)
sel_keep = np.concatenate(
(sel_keep, np.arange(self.n_components_, _n_pca_comp)))
proj_mat = np.dot(mixing[:, sel_keep], unmixing[sel_keep, :])
data = np.dot(proj_mat, data)
assert proj_mat.shape == (n_ch,) * 2
if self.pca_mean_ is not None:
data += self.pca_mean_[:, None]
# restore scaling
if self.noise_cov is None: # revert standardization
data *= self.pre_whitener_
else:
data = np.linalg.pinv(self.pre_whitener_, rcond=1e-14) @ data
return data
@verbose
def save(self, fname, verbose=None):
"""Store ICA solution into a fiff file.
Parameters
----------
fname : str
The absolute path of the file name to save the ICA solution into.
The file name should end with -ica.fif or -ica.fif.gz.
%(verbose_meth)s
Returns
-------
ica : instance of ICA
The object.
See Also
--------
read_ica
"""
if self.current_fit == 'unfitted':
raise RuntimeError('No fit available. Please first fit ICA')
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',
'_ica.fif', '_ica.fif.gz'))
logger.info('Writing ICA solution to %s...' % fname)
fid = start_file(fname)
try:
_write_ica(fid, self)
end_file(fid)
except Exception:
end_file(fid)
os.remove(fname)
raise
return self
def copy(self):
"""Copy the ICA object.
Returns
-------
ica : instance of ICA
The copied object.
"""
return deepcopy(self)
@copy_function_doc_to_method_doc(plot_ica_components)
def plot_components(self, picks=None, ch_type=None, res=64,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=False, title=None, show=True, outlines='head',
contours=6, image_interp='bilinear',
inst=None, plot_std=True, topomap_args=None,
image_args=None, psd_args=None, reject='auto',
sphere=None, verbose=None):
return plot_ica_components(self, picks=picks, ch_type=ch_type,
res=res, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, title=title, show=show,
outlines=outlines, contours=contours,
image_interp=image_interp,
inst=inst, plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
reject=reject, sphere=sphere,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_ica_properties)
def plot_properties(self, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None, image_args=None,
psd_args=None, figsize=None, show=True, reject='auto',
reject_by_annotation=True, *, verbose=None):
return plot_ica_properties(self, inst, picks=picks, axes=axes,
dB=dB, plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
figsize=figsize, show=show, reject=reject,
reject_by_annotation=reject_by_annotation,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_ica_sources)
def plot_sources(self, inst, picks=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False, show_scrollbars=True):
return plot_ica_sources(self, inst=inst, picks=picks,
start=start, stop=stop, title=title, show=show,
block=block, show_first_samp=show_first_samp,
show_scrollbars=show_scrollbars)
@copy_function_doc_to_method_doc(plot_ica_scores)
def plot_scores(self, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=None, n_cols=None,
show=True):
return plot_ica_scores(
ica=self, scores=scores, exclude=exclude, labels=labels,
axhline=axhline, title=title, figsize=figsize, n_cols=n_cols,
show=show)
@copy_function_doc_to_method_doc(plot_ica_overlay)
def plot_overlay(self, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True, n_pca_components=None):
return plot_ica_overlay(self, inst=inst, exclude=exclude, picks=picks,
start=start, stop=stop, title=title, show=show,
n_pca_components=n_pca_components)
def detect_artifacts(self, raw, start_find=None, stop_find=None,
ecg_ch=None, ecg_score_func='pearsonr',
ecg_criterion=0.1, eog_ch=None,
eog_score_func='pearsonr',
eog_criterion=0.1, skew_criterion=0,
kurt_criterion=0, var_criterion=-1,
add_nodes=None):
"""Run ICA artifacts detection workflow.
Note. This is still experimental and will most likely change over
the next releases. For maximum control use the workflow exposed in
the examples.
Hints and caveats:
- It is highly recommended to bandpass filter ECG and EOG
data and pass them instead of the channel names as ecg_ch and eog_ch
arguments.
- please check your results. Detection by kurtosis and variance
may be powerful but misclassification of brain signals as
noise cannot be precluded.
- Consider using shorter times for start_find and stop_find than
for start and stop. It can save you much time.
Example invocation (taking advantage of the defaults)::
ica.detect_artifacts(ecg_channel='MEG 1531', eog_channel='EOG 061')
Parameters
----------
raw : instance of Raw
Raw object to draw sources from. No components are actually removed
here, i.e. ica is not applied to raw in this function. Use
`ica.apply() <ICA.apply>` for this after inspection of the
identified components.
start_find : int | float | None
First sample to include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop_find : int | float | None
Last sample to not include for artifact search. If float, data will
be interpreted as time in seconds. If None, data will be used to
the last sample.
ecg_ch : str | ndarray | None
The ``target`` argument passed to ica.find_sources_raw. Either the
name of the ECG channel or the ECG time series. If None, this step
will be skipped.
ecg_score_func : str | callable
The ``score_func`` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
ecg_criterion : float | int | list-like | slice
The indices of the sorted ecg scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
eog_ch : list | str | ndarray | None
The ``target`` argument or the list of target arguments
subsequently passed to ica.find_sources_raw. Either the name of the
vertical EOG channel or the corresponding EOG time series. If None,
this step will be skipped.
eog_score_func : str | callable
The ``score_func`` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
eog_criterion : float | int | list-like | slice
The indices of the sorted eog scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
skew_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
kurt_criterion : float | int | list-like | slice
The indices of the sorted kurtosis scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
var_criterion : float | int | list-like | slice
The indices of the sorted variance scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
add_nodes : list of tuple
Additional list if tuples carrying the following parameters
of ica nodes:
(name : str, target : str | array, score_func : callable,
criterion : float | int | list-like | slice). This parameter is a
generalization of the artifact specific parameters above and has
the same structure. Example::
add_nodes=('ECG phase lock', ECG 01',
my_phase_lock_function, 0.5)
Returns
-------
self : instance of ICA
The ICA object with the detected artifact indices marked for
exclusion.
"""
logger.info(' Searching for artifacts...')
_detect_artifacts(self, raw=raw, start_find=start_find,
stop_find=stop_find, ecg_ch=ecg_ch,
ecg_score_func=ecg_score_func,
ecg_criterion=ecg_criterion,
eog_ch=eog_ch, eog_score_func=eog_score_func,
eog_criterion=eog_criterion,
skew_criterion=skew_criterion,
kurt_criterion=kurt_criterion,
var_criterion=var_criterion,
add_nodes=add_nodes)
return self
@verbose
def _check_n_pca_components(self, _n_pca_comp, verbose=None):
"""Aux function."""
if isinstance(_n_pca_comp, float):
n, ev = _exp_var_ncomp(
self.pca_explained_variance_, _n_pca_comp)
logger.info(f' Selected {n} PCA components by explained '
f'variance ({100 * ev}≥{100 * _n_pca_comp}%)')
_n_pca_comp = n
elif _n_pca_comp is None:
_n_pca_comp = self._max_pca_components
if _n_pca_comp is None:
_n_pca_comp = self.pca_components_.shape[0]
elif _n_pca_comp < self.n_components_:
_n_pca_comp = self.n_components_
return _n_pca_comp
def _exp_var_ncomp(var, n):
cvar = np.asarray(var, dtype=np.float64)
cvar = cvar.cumsum()
cvar /= cvar[-1]
# We allow 1., which would give us N+1
n = min((cvar <= n).sum() + 1, len(cvar))
return n, cvar[n - 1]
def _check_start_stop(raw, start, stop):
"""Aux function."""
out = list()
for st, none_ in ((start, 0), (stop, raw.n_times)):
if st is None:
out.append(none_)
else:
try:
out.append(_ensure_int(st))
except TypeError: # not int-like
out.append(raw.time_as_index(st)[0])
return out
@verbose
def ica_find_ecg_events(raw, ecg_source, event_id=999,
tstart=0.0, l_freq=5, h_freq=35, qrs_threshold='auto',
verbose=None):
"""Find ECG peaks from one selected ICA source.
Parameters
----------
raw : instance of Raw
Raw object to draw sources from.
ecg_source : ndarray
ICA source resembling ECG to find peaks from.
event_id : int
The index to assign to found events.
tstart : float
Start detection after tstart seconds. Useful when beginning
of run is noisy.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
%(verbose)s
Returns
-------
ecg_events : array
Events.
ch_ECG : string
Name of channel used.
average_pulse : float.
Estimated average pulse.
"""
logger.info('Using ICA source to identify heart beats')
# detecting QRS and generating event file
ecg_events = qrs_detector(raw.info['sfreq'], ecg_source.ravel(),
tstart=tstart, thresh_value=qrs_threshold,
l_freq=l_freq, h_freq=h_freq)
n_events = len(ecg_events)
ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
event_id * np.ones(n_events)]
return ecg_events
@verbose
def ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,
h_freq=10, verbose=None):
"""Locate EOG artifacts from one selected ICA source.
Parameters
----------
raw : instance of Raw
The raw data.
eog_source : ndarray
ICA source resembling EOG to find peaks from.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency in Hz.
h_freq : float
High cut-off frequency in Hz.
%(verbose)s
Returns
-------
eog_events : array
Events.
"""
eog_events = _find_eog_events(eog_source[np.newaxis], event_id=event_id,
l_freq=l_freq, h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp)
return eog_events
def _get_target_ch(container, target):
"""Aux function."""
# auto target selection
picks = pick_channels(container.ch_names, include=[target])
ref_picks = pick_types(container.info, meg=False, eeg=False, ref_meg=True)
if len(ref_picks) > 0:
picks = list(set(picks) - set(ref_picks))
if len(picks) == 0:
raise ValueError('%s not in channel list (%s)' %
(target, container.ch_names))
return picks
def _find_sources(sources, target, score_func):
"""Aux function."""
if isinstance(score_func, str):
score_func = get_score_funcs().get(score_func, score_func)
if not callable(score_func):
raise ValueError('%s is not a valid score_func.' % score_func)
scores = (score_func(sources, target) if target is not None
else score_func(sources, 1))
return scores
def _ica_explained_variance(ica, inst, normalize=False):
"""Check variance accounted for by each component in supplied data.
Parameters
----------
ica : ICA
Instance of `mne.preprocessing.ICA`.
inst : Raw | Epochs | Evoked
Data to explain with ICA. Instance of Raw, Epochs or Evoked.
normalize : bool
Whether to normalize the variance.
Returns
-------
var : array
Variance explained by each component.
"""
# check if ica is ICA and whether inst is Raw or Epochs
if not isinstance(ica, ICA):
raise TypeError('first argument must be an instance of ICA.')
if not isinstance(inst, (BaseRaw, BaseEpochs, Evoked)):
raise TypeError('second argument must an instance of either Raw, '
'Epochs or Evoked.')
source_data = _get_inst_data(ica.get_sources(inst))
# if epochs - reshape to channels x timesamples
if isinstance(inst, BaseEpochs):
n_epochs, n_chan, n_samp = source_data.shape
source_data = source_data.transpose(1, 0, 2).reshape(
(n_chan, n_epochs * n_samp))
n_chan, n_samp = source_data.shape
var = np.sum(ica.mixing_matrix_ ** 2, axis=0) * np.sum(
source_data ** 2, axis=1) / (n_chan * n_samp - 1)
if normalize:
var /= var.sum()
return var
def _sort_components(ica, order, copy=True):
"""Change the order of components in ica solution."""
assert ica.n_components_ == len(order)
if copy:
ica = ica.copy()
# reorder components
ica.mixing_matrix_ = ica.mixing_matrix_[:, order]
ica.unmixing_matrix_ = ica.unmixing_matrix_[order, :]
# reorder labels, excludes etc.
if isinstance(order, np.ndarray):
order = list(order)
if ica.exclude:
ica.exclude = [order.index(ic) for ic in ica.exclude]
for k in ica.labels_.keys():
ica.labels_[k] = [order.index(ic) for ic in ica.labels_[k]]
return ica
def _serialize(dict_, outer_sep=';', inner_sep=':'):
"""Aux function."""
s = []
for key, value in dict_.items():
if callable(value):
value = value.__name__
elif isinstance(value, Integral):
value = int(value)
elif isinstance(value, dict):
# py35 json does not support numpy int64
for subkey, subvalue in value.items():
if isinstance(subvalue, list):
if len(subvalue) > 0:
if isinstance(subvalue[0], (int, np.integer)):
value[subkey] = [int(i) for i in subvalue]
for cls in (np.random.RandomState, Covariance):
if isinstance(value, cls):
value = cls.__name__
s.append(key + inner_sep + json.dumps(value))
return outer_sep.join(s)
def _deserialize(str_, outer_sep=';', inner_sep=':'):
"""Aux Function."""
out = {}
for mapping in str_.split(outer_sep):
k, v = mapping.split(inner_sep, 1)
out[k] = json.loads(v)
return out
def _write_ica(fid, ica):
"""Write an ICA object.
Parameters
----------
fid: file
The file descriptor
ica:
The instance of ICA to write
"""
ica_init = dict(noise_cov=ica.noise_cov,
n_components=ica.n_components,
n_pca_components=ica.n_pca_components,
max_pca_components=ica._max_pca_components,
current_fit=ica.current_fit,
allow_ref_meg=ica.allow_ref_meg)
if ica.info is not None:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if ica.info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, ica.info['meas_id'])
# Write measurement info
write_meas_info(fid, ica.info)
end_block(fid, FIFF.FIFFB_MEAS)
start_block(fid, FIFF.FIFFB_MNE_ICA)
# ICA interface params
write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
_serialize(ica_init))
# Channel names
if ica.ch_names is not None:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, ica.ch_names)
# samples on fit
n_samples = getattr(ica, 'n_samples_', None)
ica_misc = {'n_samples_': (None if n_samples is None else int(n_samples)),
'labels_': getattr(ica, 'labels_', None),
'method': getattr(ica, 'method', None),
'n_iter_': getattr(ica, 'n_iter_', None),
'fit_params': getattr(ica, 'fit_params', None)}
# ICA misc params
write_string(fid, FIFF.FIFF_MNE_ICA_MISC_PARAMS,
_serialize(ica_misc))
# Whitener
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_WHITENER, ica.pre_whitener_)
# PCA components_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_COMPONENTS,
ica.pca_components_)
# PCA mean_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_MEAN, ica.pca_mean_)
# PCA explained_variance_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
ica.pca_explained_variance_)
# ICA unmixing
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_MATRIX, ica.unmixing_matrix_)
# Write bad components
write_int(fid, FIFF.FIFF_MNE_ICA_BADS, list(ica.exclude))
# Done!
end_block(fid, FIFF.FIFFB_MNE_ICA)
@verbose
def read_ica(fname, verbose=None):
"""Restore ICA solution from fif file.
Parameters
----------
fname : str
Absolute path to fif file containing ICA matrices.
The file name should end with -ica.fif or -ica.fif.gz.
%(verbose)s
Returns
-------
ica : instance of ICA
The ICA estimator.
"""
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',
'_ica.fif', '_ica.fif.gz'))
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
try:
# we used to store bads that weren't part of the info...
info, _ = read_meas_info(fid, tree, clean_bads=True)
except ValueError:
logger.info('Could not find the measurement info. \n'
'Functionality requiring the info won\'t be'
' available.')
info = None
ica_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ICA)
if len(ica_data) == 0:
ica_data = dir_tree_find(tree, 123) # Constant 123 Used before v 0.11
if len(ica_data) == 0:
fid.close()
raise ValueError('Could not find ICA data')
my_ica_data = ica_data[0]
for d in my_ica_data['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS:
tag = read_tag(fid, pos)
ica_init = tag.data
elif kind == FIFF.FIFF_MNE_ROW_NAMES:
tag = read_tag(fid, pos)
ch_names = tag.data
elif kind == FIFF.FIFF_MNE_ICA_WHITENER:
tag = read_tag(fid, pos)
pre_whitener = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_COMPONENTS:
tag = read_tag(fid, pos)
pca_components = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR:
tag = read_tag(fid, pos)
pca_explained_variance = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_MEAN:
tag = read_tag(fid, pos)
pca_mean = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MATRIX:
tag = read_tag(fid, pos)
unmixing_matrix = tag.data
elif kind == FIFF.FIFF_MNE_ICA_BADS:
tag = read_tag(fid, pos)
exclude = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MISC_PARAMS:
tag = read_tag(fid, pos)
ica_misc = tag.data
fid.close()
ica_init, ica_misc = [_deserialize(k) for k in (ica_init, ica_misc)]
n_pca_components = ica_init.pop('n_pca_components')
current_fit = ica_init.pop('current_fit')
max_pca_components = ica_init.pop('max_pca_components')
method = ica_misc.get('method', 'fastica')
if method in _KNOWN_ICA_METHODS:
ica_init['method'] = method
if ica_init['noise_cov'] == Covariance.__name__:
logger.info('Reading whitener drawn from noise covariance ...')
logger.info('Now restoring ICA solution ...')
# make sure dtypes are np.float64 to satisfy fast_dot
def f(x):
return x.astype(np.float64)
ica_init = {k: v for k, v in ica_init.items()
if k in _get_args(ICA.__init__)}
ica = ICA(**ica_init)
ica.current_fit = current_fit
ica.ch_names = ch_names.split(':')
if n_pca_components is not None and \
not isinstance(n_pca_components, int_like):
n_pca_components = np.float64(n_pca_components)
ica.n_pca_components = n_pca_components
ica.pre_whitener_ = f(pre_whitener)
ica.pca_mean_ = f(pca_mean)
ica.pca_components_ = f(pca_components)
ica.n_components_ = unmixing_matrix.shape[0]
ica._max_pca_components = max_pca_components
ica._update_ica_names()
ica.pca_explained_variance_ = f(pca_explained_variance)
ica.unmixing_matrix_ = f(unmixing_matrix)
ica._update_mixing_matrix()
ica.exclude = [] if exclude is None else list(exclude)
ica.info = info
if 'n_samples_' in ica_misc:
ica.n_samples_ = ica_misc['n_samples_']
if 'labels_' in ica_misc:
labels_ = ica_misc['labels_']
if labels_ is not None:
ica.labels_ = labels_
if 'method' in ica_misc:
ica.method = ica_misc['method']
if 'n_iter_' in ica_misc:
ica.n_iter_ = ica_misc['n_iter_']
if 'fit_params' in ica_misc:
ica.fit_params = ica_misc['fit_params']
logger.info('Ready.')
return ica
_ica_node = namedtuple('Node', 'name target score_func criterion')
def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
ecg_criterion, eog_ch, eog_score_func, eog_criterion,
skew_criterion, kurt_criterion, var_criterion,
add_nodes):
"""Aux Function."""
from scipy import stats
nodes = []
if ecg_ch is not None:
nodes += [_ica_node('ECG', ecg_ch, ecg_score_func, ecg_criterion)]
if eog_ch not in [None, []]:
if not isinstance(eog_ch, list):
eog_ch = [eog_ch]
for idx, ch in enumerate(eog_ch):
nodes += [_ica_node('EOG %02d' % idx, ch, eog_score_func,
eog_criterion)]
if skew_criterion is not None:
nodes += [_ica_node('skewness', None, stats.skew, skew_criterion)]
if kurt_criterion is not None:
nodes += [_ica_node('kurtosis', None, stats.kurtosis, kurt_criterion)]
if var_criterion is not None:
nodes += [_ica_node('variance', None, np.var, var_criterion)]
if add_nodes is not None:
nodes.extend(add_nodes)
for node in nodes:
scores = ica.score_sources(raw, start=start_find, stop=stop_find,
target=node.target,
score_func=node.score_func)
if isinstance(node.criterion, float):
found = list(np.where(np.abs(scores) > node.criterion)[0])
else:
# Sort in descending order; use (-abs()), rather than [::-1] to
# keep any NaN values in the end (and also keep the order of same
# values):
found = list(np.atleast_1d((-np.abs(scores)).argsort()
[node.criterion]))
case = (len(found), _pl(found), node.name)
logger.info(' found %s artifact%s by %s' % case)
ica.exclude = list(ica.exclude) + found
logger.info('Artifact indices found:\n ' + str(ica.exclude).strip('[]'))
if len(set(ica.exclude)) != len(ica.exclude):
logger.info(' Removing duplicate indices...')
ica.exclude = list(set(ica.exclude))
logger.info('Ready.')
@verbose
def _band_pass_filter(inst, sources, target, l_freq, h_freq, verbose=None):
"""Optionally band-pass filter the data."""
if l_freq is not None and h_freq is not None:
logger.info('... filtering ICA sources')
# use FIR here, steeper is better
kw = dict(phase='zero-double', filter_length='10s', fir_window='hann',
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
fir_design='firwin2')
sources = filter_data(sources, inst.info['sfreq'], l_freq, h_freq,
**kw)
logger.info('... filtering target')
target = filter_data(target, inst.info['sfreq'], l_freq, h_freq, **kw)
elif l_freq is not None or h_freq is not None:
raise ValueError('Must specify both pass bands')
return sources, target
# #############################################################################
# CORRMAP
def _find_max_corrs(all_maps, target, threshold):
"""Compute correlations between template and target components."""
all_corrs = [compute_corr(target, subj.T) for subj in all_maps]
abs_corrs = [np.abs(a) for a in all_corrs]
corr_polarities = [np.sign(a) for a in all_corrs]
if threshold <= 1:
max_corrs = [list(np.nonzero(s_corr > threshold)[0])
for s_corr in abs_corrs]
else:
max_corrs = [list(_find_outliers(s_corr, threshold=threshold))
for s_corr in abs_corrs]
am = [l_[i] for l_, i_s in zip(abs_corrs, max_corrs)
for i in i_s]
median_corr_with_target = np.median(am) if len(am) > 0 else 0
polarities = [l_[i] for l_, i_s in zip(corr_polarities, max_corrs)
for i in i_s]
maxmaps = [l_[i] for l_, i_s in zip(all_maps, max_corrs)
for i in i_s]
if len(maxmaps) == 0:
return [], 0, 0, []
newtarget = np.zeros(maxmaps[0].size)
std_of_maps = np.std(np.asarray(maxmaps))
mean_of_maps = np.std(np.asarray(maxmaps))
for maxmap, polarity in zip(maxmaps, polarities):
newtarget += (maxmap / std_of_maps - mean_of_maps) * polarity
newtarget /= len(maxmaps)
newtarget *= std_of_maps
sim_i_o = np.abs(np.corrcoef(target, newtarget)[1, 0])
return newtarget, median_corr_with_target, sim_i_o, max_corrs
@verbose
def corrmap(icas, template, threshold="auto", label=None, ch_type="eeg",
plot=True, show=True, outlines='head',
sensors=True, contours=6, cmap=None, sphere=None, verbose=None):
"""Find similar Independent Components across subjects by map similarity.
Corrmap (Viola et al. 2009 Clin Neurophysiol) identifies the best group
match to a supplied template. Typically, feed it a list of fitted ICAs and
a template IC, for example, the blink for the first subject, to identify
specific ICs across subjects.
The specific procedure consists of two iterations. In a first step, the
maps best correlating with the template are identified. In the next step,
the analysis is repeated with the mean of the maps identified in the first
stage.
Run with ``plot`` and ``show`` set to ``True`` and ``label=False`` to find
good parameters. Then, run with labelling enabled to apply the
labelling in the IC objects. (Running with both ``plot`` and ``labels``
off does nothing.)
Outputs a list of fitted ICAs with the indices of the marked ICs in a
specified field.
The original Corrmap website: www.debener.de/corrmap/corrmapplugin1.html
Parameters
----------
icas : list of mne.preprocessing.ICA
A list of fitted ICA objects.
template : tuple | np.ndarray, shape (n_components,)
Either a tuple with two elements (int, int) representing the list
indices of the set from which the template should be chosen, and the
template. E.g., if template=(1, 0), the first IC of the 2nd ICA object
is used.
Or a numpy array whose size corresponds to each IC map from the
supplied maps, in which case this map is chosen as the template.
threshold : "auto" | list of float | float
Correlation threshold for identifying ICs
If "auto", search for the best map by trying all correlations between
0.6 and 0.95. In the original proposal, lower values are considered,
but this is not yet implemented.
If list of floats, search for the best map in the specified range of
correlation strengths. As correlation values, must be between 0 and 1
If float > 0, select ICs correlating better than this.
If float > 1, use z-scoring to identify ICs within subjects (not in
original Corrmap)
Defaults to "auto".
label : None | str
If not None, categorised ICs are stored in a dictionary ``labels_``
under the given name. Preexisting entries will be appended to
(excluding repeats), not overwritten. If None, a dry run is performed
and the supplied ICs are not changed.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
The channel type to plot. Defaults to 'eeg'.
plot : bool
Should constructed template and selected maps be plotted? Defaults
to True.
show : bool
Show figures if True.
%(topomap_outlines)s
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. Defaults to 6.
cmap : None | matplotlib colormap
Colormap for the plot. If ``None``, defaults to 'Reds_r' for norm data,
otherwise to 'RdBu_r'.
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
template_fig : Figure
Figure showing the template.
labelled_ics : Figure
Figure showing the labelled ICs in all ICA decompositions.
"""
if not isinstance(plot, bool):
raise ValueError("`plot` must be of type `bool`")
same_chans = _check_all_same_channel_names(icas)
if same_chans is False:
raise ValueError("Not all ICA instances have the same channel names. "
"Corrmap requires all instances to have the same "
"montage. Consider interpolating bad channels before "
"running ICA.")
threshold_extra = ''
if threshold == 'auto':
threshold = np.arange(60, 95, dtype=np.float64) / 100.
threshold_extra = ' ("auto")'
all_maps = [ica.get_components().T for ica in icas]
# check if template is an index to one IC in one ICA object, or an array
if len(template) == 2:
target = all_maps[template[0]][template[1]]
is_subject = True
elif template.ndim == 1 and len(template) == all_maps[0].shape[1]:
target = template
is_subject = False
else:
raise ValueError("`template` must be a length-2 tuple or an array the "
"size of the ICA maps.")
template_fig, labelled_ics = None, None
if plot is True:
if is_subject: # plotting from an ICA object
ttl = 'Template from subj. {}'.format(str(template[0]))
template_fig = icas[template[0]].plot_components(
picks=template[1], ch_type=ch_type, title=ttl,
outlines=outlines, cmap=cmap, contours=contours,
show=show, topomap_args=dict(sphere=sphere))
else: # plotting an array
template_fig = _plot_corrmap([template], [0], [0], ch_type,
icas[0].copy(), "Template",
outlines=outlines, cmap=cmap,
contours=contours,
show=show, template=True,
sphere=sphere)
template_fig.subplots_adjust(top=0.8)
template_fig.canvas.draw()
# first run: use user-selected map
threshold = np.atleast_1d(np.array(threshold, float)).ravel()
threshold_err = ('No component detected using when z-scoring '
'threshold%s %s, consider using a more lenient '
'threshold' % (threshold_extra, threshold))
if len(all_maps) == 0:
raise RuntimeError(threshold_err)
paths = [_find_max_corrs(all_maps, target, t) for t in threshold]
# find iteration with highest avg correlation with target
new_target, _, _, _ = paths[np.argmax([path[2] for path in paths])]
# second run: use output from first run
if len(all_maps) == 0 or len(new_target) == 0:
raise RuntimeError(threshold_err)
paths = [_find_max_corrs(all_maps, new_target, t) for t in threshold]
del new_target
# find iteration with highest avg correlation with target
_, median_corr, _, max_corrs = paths[
np.argmax([path[1] for path in paths])]
allmaps, indices, subjs, nones = [list() for _ in range(4)]
logger.info('Median correlation with constructed map: %0.3f' % median_corr)
del median_corr
if plot is True:
logger.info('Displaying selected ICs per subject.')
for ii, (ica, max_corr) in enumerate(zip(icas, max_corrs)):
if len(max_corr) > 0:
if isinstance(max_corr[0], np.ndarray):
max_corr = max_corr[0]
if label is not None:
ica.labels_[label] = list(set(list(max_corr) +
ica.labels_.get(label, list())))
if plot is True:
allmaps.extend(ica.get_components()[:, max_corr].T)
subjs.extend([ii] * len(max_corr))
indices.extend(max_corr)
else:
if (label is not None) and (label not in ica.labels_):
ica.labels_[label] = list()
nones.append(ii)
if len(nones) == 0:
logger.info('At least 1 IC detected for each subject.')
else:
logger.info('No maps selected for subject%s %s, '
'consider a more liberal threshold.'
% (_pl(nones), nones))
if plot is True:
labelled_ics = _plot_corrmap(allmaps, subjs, indices, ch_type, ica,
label, outlines=outlines, cmap=cmap,
contours=contours,
show=show, sphere=sphere)
return template_fig, labelled_ics
else:
return None
@verbose
def read_ica_eeglab(fname, *, verbose=None):
"""Load ICA information saved in an EEGLAB .set file.
Parameters
----------
fname : str
Complete path to a .set EEGLAB file that contains an ICA object.
%(verbose)s
Returns
-------
ica : instance of ICA
An ICA object based on the information contained in the input file.
"""
from scipy import linalg
eeg = _check_load_mat(fname, None)
info, eeg_montage, _ = _get_info(eeg)
info.set_montage(eeg_montage)
pick_info(info, np.round(eeg['icachansind']).astype(int) - 1, copy=False)
rank = eeg.icasphere.shape[0]
n_components = eeg.icaweights.shape[0]
ica = ICA(method='imported_eeglab', n_components=n_components)
ica.current_fit = "eeglab"
ica.ch_names = info["ch_names"]
ica.n_pca_components = None
ica.n_components_ = n_components
n_ch = len(ica.ch_names)
assert len(eeg.icachansind) == n_ch
ica.pre_whitener_ = np.ones((n_ch, 1))
ica.pca_mean_ = np.zeros(n_ch)
assert eeg.icasphere.shape[1] == n_ch
assert eeg.icaweights.shape == (n_components, rank)
# When PCA reduction is used in EEGLAB, runica returns
# weights= weights*sphere*eigenvectors(:,1:ncomps)';
# sphere = eye(urchans). When PCA reduction is not used, we have:
#
# eeg.icawinv == pinv(eeg.icaweights @ eeg.icasphere)
#
# So in either case, we can use SVD to get our square whitened
# weights matrix (u * s) and our PCA vectors (v) back:
use = eeg.icaweights @ eeg.icasphere
use_check = linalg.pinv(eeg.icawinv)
if not np.allclose(use, use_check, rtol=1e-6):
warn('Mismatch between icawinv and icaweights @ icasphere from EEGLAB '
'possibly due to ICA component removal, assuming icawinv is '
'correct')
use = use_check
u, s, v = _safe_svd(use, full_matrices=False)
ica.unmixing_matrix_ = u * s
ica.pca_components_ = v
ica.pca_explained_variance_ = s * s
ica.info = info
ica._update_mixing_matrix()
ica._update_ica_names()
return ica
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extension of the basic matplotlib Figure for GWpy
"""
import itertools
import importlib
import warnings
from collections.abc import (KeysView, ValuesView)
from itertools import zip_longest
import numpy
from matplotlib import (figure, get_backend, _pylab_helpers)
from matplotlib.artist import setp
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import LogFormatterSciNotation
from matplotlib.projections import get_projection_class
from . import (colorbar as gcbar, utils)
from .gps import GPS_SCALES
from .log import LogFormatter
from .rc import (rcParams, MPL_RCPARAMS, get_subplot_params)
__all__ = ['Plot']
try:
__IPYTHON__
except NameError:
IPYTHON = False
else:
IPYTHON = True
iterable_types = (list, tuple, KeysView, ValuesView,)
def interactive_backend():
"""Returns `True` if the current backend is interactive
"""
from matplotlib.rcsetup import interactive_bk
return get_backend() in interactive_bk
def get_backend_mod(name=None):
"""Returns the imported module for the given backend name
Parameters
----------
name : `str`, optional
the name of the backend, defaults to the current backend.
Returns
-------
backend_mod: `module`
the module as returned by :func:`importlib.import_module`
Examples
--------
>>> from gwpy.plot.plot import get_backend_mod
>>> print(get_backend_mod('agg'))
<module 'matplotlib.backends.backend_agg' from ... >
"""
if name is None:
name = get_backend()
backend_name = (name[9:] if name.startswith("module://") else
"matplotlib.backends.backend_{}".format(name.lower()))
return importlib.import_module(backend_name)
class Plot(figure.Figure):
"""An extension of the core matplotlib `~matplotlib.figure.Figure`
The `Plot` provides a number of methods to simplify generating
figures from GWpy data objects, and modifying them on-the-fly in
interactive mode.
"""
def __init__(self, *data, **kwargs):
# get default x-axis scale if all axes have the same x-axis units
kwargs.setdefault('xscale', _parse_xscale(
_group_axes_data(data, flat=True)))
# set default size for time-axis figures
if (
kwargs.get('projection', None) == 'segments'
or kwargs.get('xscale') in GPS_SCALES
):
kwargs.setdefault('figsize', (12, 6))
kwargs.setdefault('xscale', 'auto-gps')
# initialise figure
figure_kw = {key: kwargs.pop(key) for key in utils.FIGURE_PARAMS if
key in kwargs}
self._init_figure(**figure_kw)
# initialise axes with data
if data or kwargs.get("geometry"):
self._init_axes(data, **kwargs)
def _init_figure(self, **kwargs):
from matplotlib import pyplot
# add new attributes
self.colorbars = []
self._coloraxes = []
# create Figure
num = kwargs.pop('num', max(pyplot.get_fignums() or {0}) + 1)
self._parse_subplotpars(kwargs)
super().__init__(**kwargs)
self.number = num
# add interactivity (scraped from pyplot.figure())
backend_mod = get_backend_mod()
try:
manager = backend_mod.new_figure_manager_given_figure(num, self)
except AttributeError:
upstream_mod = importlib.import_module(
pyplot.new_figure_manager.__module__)
canvas = upstream_mod.FigureCanvasBase(self)
manager = upstream_mod.FigureManagerBase(canvas, 1)
manager._cidgcf = manager.canvas.mpl_connect(
'button_press_event',
lambda ev: _pylab_helpers.Gcf.set_active(manager))
_pylab_helpers.Gcf.set_active(manager)
pyplot.draw_if_interactive()
def _init_axes(self, data, method='plot',
xscale=None, sharex=False, sharey=False,
geometry=None, separate=None, **kwargs):
"""Populate this figure with data, creating `Axes` as necessary
"""
if isinstance(sharex, bool):
sharex = "all" if sharex else "none"
if isinstance(sharey, bool):
sharey = "all" if sharey else "none"
# parse keywords
axes_kw = {key: kwargs.pop(key) for key in utils.AXES_PARAMS if
key in kwargs}
# handle geometry and group axes
if geometry is not None and geometry[0] * geometry[1] == len(data):
separate = True
axes_groups = _group_axes_data(data, separate=separate)
if geometry is None:
geometry = (len(axes_groups), 1)
nrows, ncols = geometry
if axes_groups and nrows * ncols != len(axes_groups):
# mismatching data and geometry
raise ValueError("cannot group data into {0} axes with a "
"{1}x{2} grid".format(len(axes_groups), nrows,
ncols))
# create grid spec
gs = GridSpec(nrows, ncols)
axarr = numpy.empty((nrows, ncols), dtype=object)
# set default labels
defxlabel = 'xlabel' not in axes_kw
defylabel = 'ylabel' not in axes_kw
flatdata = [s for group in axes_groups for s in group]
for axis in ('x', 'y'):
unit = _common_axis_unit(flatdata, axis=axis)
if unit:
axes_kw.setdefault('{}label'.format(axis),
unit.to_string('latex_inline_dimensional'))
# create axes for each group and draw each data object
for group, (row, col) in zip_longest(
axes_groups, itertools.product(range(nrows), range(ncols)),
fillvalue=[]):
# create Axes
shared_with = {"none": None, "all": axarr[0, 0],
"row": axarr[row, 0], "col": axarr[0, col]}
axes_kw["sharex"] = shared_with[sharex]
axes_kw["sharey"] = shared_with[sharey]
axes_kw['xscale'] = xscale if xscale else _parse_xscale(group)
ax = axarr[row, col] = self.add_subplot(gs[row, col], **axes_kw)
# plot data
plot_func = getattr(ax, method)
if method in ('imshow', 'pcolormesh'):
for obj in group:
plot_func(obj, **kwargs)
elif group:
plot_func(*group, **kwargs)
# set default axis labels
for axis, share, pos, n, def_ in (
(ax.xaxis, sharex, row, nrows, defxlabel),
(ax.yaxis, sharey, col, ncols, defylabel),
):
# hide label if shared axis and not bottom left panel
if share == 'all' and pos < n - 1:
axis.set_label_text('')
# otherwise set default status
else:
axis.isDefault_label = def_
return self.axes
@staticmethod
def _parse_subplotpars(kwargs):
# dynamically set the subplot positions based on the figure size
# -- only if the user hasn't customised the subplot params
figsize = kwargs.get('figsize') or rcParams['figure.figsize']
subplotpars = get_subplot_params(figsize)
use_subplotpars = 'subplotpars' not in kwargs and all([
rcParams['figure.subplot.%s' % pos]
== MPL_RCPARAMS['figure.subplot.%s' % pos]
for pos in ('left', 'bottom', 'right', 'top')
])
if use_subplotpars:
kwargs['subplotpars'] = subplotpars
# -- Plot methods ---------------------------
def refresh(self):
"""Refresh the current figure
"""
for cbar in self.colorbars:
cbar.draw_all()
self.canvas.draw()
def show(self, block=None, warn=True):
"""Display the current figure (if possible).
If blocking, this method replicates the behaviour of
:func:`matplotlib.pyplot.show()`, otherwise it just calls up to
:meth:`~matplotlib.figure.Figure.show`.
This method also supports repeatedly showing the same figure, even
after closing the display window, which isn't supported by
`pyplot.show` (AFAIK).
Parameters
----------
block : `bool`, optional
open the figure and block until the figure is closed, otherwise
open the figure as a detached window, default: `None`.
If `None`, block if using an interactive backend and _not_
inside IPython.
warn : `bool`, optional
print a warning if matplotlib is not running in an interactive
backend and cannot display the figure, default: `True`.
"""
# this method tries to reproduce the functionality of pyplot.show,
# mainly for user convenience. However, as of matplotlib-3.0.0,
# pyplot.show() ends up calling _back_ to Plot.show(),
# so we have to be careful not to end up in a recursive loop
import inspect
try:
callframe = inspect.currentframe().f_back
except AttributeError:
pass
else:
if 'matplotlib' in callframe.f_code.co_filename:
block = False
# render
super().show(warn=warn)
# don't block on ipython with interactive backends
if block is None and interactive_backend():
block = not IPYTHON
# block in GUI loop (stolen from mpl.backend_bases._Backend.show)
if block:
backend_mod = get_backend_mod()
backend_mod.Show().mainloop()
def save(self, *args, **kwargs):
"""Save the figure to disk.
This method is an alias to :meth:`~matplotlib.figure.Figure.savefig`,
all arguments are passed directory to that method.
"""
self.savefig(*args, **kwargs)
def close(self):
"""Close the plot and release its memory.
"""
from matplotlib.pyplot import close
for ax in self.axes[::-1]:
# avoid matplotlib/matplotlib#9970
ax.set_xscale('linear')
ax.set_yscale('linear')
# clear the axes
ax.cla()
# close the figure
close(self)
# -- axes manipulation ----------------------
def get_axes(self, projection=None):
"""Find all `Axes`, optionally matching the given projection
Parameters
----------
projection : `str`
name of axes types to return
Returns
-------
axlist : `list` of `~matplotlib.axes.Axes`
"""
if projection is None:
return self.axes
return [ax for ax in self.axes if ax.name == projection.lower()]
# -- colour bars ----------------------------
def colorbar(self, mappable=None, cax=None, ax=None, fraction=0.,
emit=True, **kwargs):
"""Add a colorbar to the current `Plot`
A colorbar must be associated with an `Axes` on this `Plot`,
and an existing mappable element (e.g. an image).
Parameters
----------
mappable : matplotlib data collection
Collection against which to map the colouring
cax : `~matplotlib.axes.Axes`
Axes on which to draw colorbar
ax : `~matplotlib.axes.Axes`
Axes relative to which to position colorbar
fraction : `float`, optional
Fraction of original axes to use for colorbar, give `fraction=0`
to not resize the original axes at all.
emit : `bool`, optional
If `True` update all mappables on `Axes` to match the same
colouring as the colorbar.
**kwargs
other keyword arguments to be passed to the
:meth:`~matplotlib.figure.Figure.colorbar`
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See also
--------
matplotlib.figure.Figure.colorbar
matplotlib.colorbar.Colorbar
Examples
--------
>>> import numpy
>>> from gwpy.plot import Plot
To plot a simple image and add a colorbar:
>>> plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> plot.colorbar(label='Value')
>>> plot.show()
Colorbars can also be generated by directly referencing the parent
axes:
>>> Plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> ax.colorbar(label='Value')
>>> plot.show()
"""
# pre-process kwargs
mappable, kwargs = gcbar.process_colorbar_kwargs(
self, mappable, ax, cax=cax, fraction=fraction, **kwargs)
# generate colour bar
cbar = super().colorbar(mappable, **kwargs)
# force the minor ticks to be the same as the major ticks
# in practice, this normally swaps out LogFormatterSciNotation to
# gwpy's LogFormatter; # this is hacky, and would be improved using a
# subclass of Colorbar in the first place, but matplotlib's
# cbar_factory doesn't support that
longaxis = (cbar.ax.yaxis if cbar.orientation == "vertical" else
cbar.ax.xaxis)
if (
isinstance(cbar.formatter, LogFormatter)
and isinstance(
longaxis.get_minor_formatter(),
LogFormatterSciNotation,
)
):
longaxis.set_minor_formatter(type(cbar.formatter)())
# record colorbar in parent object
self.colorbars.append(cbar)
# update mappables for this axis
if emit:
ax = kwargs.pop('ax')
norm = mappable.norm
cmap = mappable.get_cmap()
for map_ in ax.collections + ax.images:
map_.set_norm(norm)
map_.set_cmap(cmap)
return cbar
def add_colorbar(self, *args, **kwargs):
"""DEPRECATED, use `Plot.colorbar` instead
"""
warnings.warn(
"{0}.add_colorbar was renamed {0}.colorbar, this warnings will "
"result in an error in the future".format(type(self).__name__),
DeprecationWarning)
return self.colorbar(*args, **kwargs)
# -- extra methods --------------------------
def add_segments_bar(self, segments, ax=None, height=0.14, pad=0.1,
sharex=True, location='bottom', **plotargs):
"""Add a segment bar `Plot` indicating state information.
By default, segments are displayed in a thin horizontal set of Axes
sitting immediately below the x-axis of the main,
similarly to a colorbar.
Parameters
----------
segments : `~gwpy.segments.DataQualityFlag`
A data-quality flag, or `SegmentList` denoting state segments
about this Plot
ax : `Axes`, optional
Specific `Axes` relative to which to position new `Axes`,
defaults to :func:`~matplotlib.pyplot.gca()`
height : `float, `optional
Height of the new axes, as a fraction of the anchor axes
pad : `float`, optional
Padding between the new axes and the anchor, as a fraction of
the anchor axes dimension
sharex : `True`, `~matplotlib.axes.Axes`, optional
Either `True` to set ``sharex=ax`` for the new segment axes,
or an `Axes` to use directly
location : `str`, optional
Location for new segment axes, defaults to ``'bottom'``,
acceptable values are ``'top'`` or ``'bottom'``.
**plotargs
extra keyword arguments are passed to
:meth:`~gwpy.plot.SegmentAxes.plot`
"""
# get axes to anchor against
if not ax:
ax = self.gca()
# set options for new axes
axes_kw = {
'pad': pad,
'add_to_figure': True,
'sharex': ax if sharex is True else sharex or None,
'axes_class': get_projection_class('segments'),
}
# map X-axis limit from old axes
if axes_kw['sharex'] is ax and not ax.get_autoscalex_on():
axes_kw['xlim'] = ax.get_xlim()
# if axes uses GPS scaling, copy the epoch as well
try:
axes_kw['epoch'] = ax.get_epoch()
except AttributeError:
pass
# add new axes
if ax.get_axes_locator():
divider = ax.get_axes_locator()._axes_divider
else:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
if location not in {'top', 'bottom'}:
raise ValueError("Segments can only be positoned at 'top' or "
"'bottom'.")
segax = divider.append_axes(location, height, **axes_kw)
# update anchor axes
if axes_kw['sharex'] is ax and location == 'bottom':
# map label
segax.set_xlabel(ax.get_xlabel())
segax.xaxis.isDefault_label = ax.xaxis.isDefault_label
ax.set_xlabel("")
# hide ticks on original axes
setp(ax.get_xticklabels(), visible=False)
# plot segments
segax.plot(segments, **plotargs)
segax.grid(b=False, which='both', axis='y')
segax.autoscale(axis='y', tight=True)
return segax
def add_state_segments(self, *args, **kwargs):
"""DEPRECATED: use :meth:`Plot.add_segments_bar`
"""
warnings.warn('add_state_segments() was renamed add_segments_bar(), '
'this warning will result in an error in the future',
DeprecationWarning)
return self.add_segments_bar(*args, **kwargs)
# -- utilities ----------------------------------------------------------------
def _group_axes_data(inputs, separate=None, flat=False):
"""Determine the number of axes from the input args to this `Plot`
Parameters
----------
inputs : `list` of array-like data sets
A list of data arrays, or a list of lists of data sets
sep : `bool`, optional
Plot each set of data on a separate `Axes`
flat : `bool`, optional
Return a flattened list of data objects
Returns
-------
axesdata : `list` of lists of array-like data
A `list` with one element per required `Axes` containing the
array-like data sets for those `Axes`, unless ``flat=True``
is given.
Notes
-----
The logic for this method is as follows:
- if a `list` of data arrays are given, and `separate=False`, use 1 `Axes`
- if a `list` of data arrays are given, and `separate=True`, use N `Axes,
one for each data array
- if a nested `list` of data arrays are given, ignore `sep` and
use one `Axes` for each group of arrays.
Examples
--------
>>> from gwpy.plot import Plot
>>> Plot._group_axes_data([1, 2], separate=False)
[[1, 2]]
>>> Plot._group_axes_data([1, 2], separate=True)
[[1], [2]]
>>> Plot._group_axes_data([[1, 2], 3])
[[1, 2], [3]]
"""
# determine auto-separation
if separate is None and inputs:
# if given a nested list of data, multiple axes are required
if any(isinstance(x, iterable_types + (dict,)) for x in inputs):
separate = True
# if data are of different types, default to separate
elif not all(type(x) is type(inputs[0]) for x in inputs): # noqa: E721
separate = True
# build list of lists
out = []
for x in inputs:
if isinstance(x, dict): # unwrap dict
x = list(x.values())
# new group from iterable, notes:
# the iterable is presumed to be a list of independent data
# structures, unless its a list of scalars in which case we
# should plot them all as one
if (
isinstance(x, (KeysView, ValuesView))
or isinstance(x, (list, tuple)) and (
not x
or not numpy.isscalar(x[0])
)
):
out.append(x)
# dataset starts a new group
elif separate or not out:
out.append([x])
# dataset joins current group
else: # append input to most recent group
out[-1].append(x)
if flat:
return [s for group in out for s in group]
return out
def _common_axis_unit(data, axis='x'):
units = set()
uname = '{}unit'.format(axis)
for x in data:
units.add(getattr(x, uname, None))
if len(units) == 1:
return units.pop()
return None
def _parse_xscale(data):
unit = _common_axis_unit(data, axis='x')
if unit is None:
return None
if unit.physical_type == 'time':
return 'auto-gps'
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for bfloat16 helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tpu.python.tpu import bfloat16
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BFloat16ScopeTest(test.TestCase):
def testScopeName(self):
"""Test if name for the variable scope is propogated correctly.
"""
with bfloat16.bfloat16_scope() as bf:
self.assertEqual(bf.name, "")
def testRequestedDType(self):
"""Test if requested dtype is honored in the getter.
"""
with bfloat16.bfloat16_scope() as scope:
v1 = variable_scope.get_variable("v1", [])
self.assertEqual(v1.dtype.base_dtype, dtypes.float32)
v2 = variable_scope.get_variable("v2", [], dtype=dtypes.bfloat16)
self.assertEqual(v2.dtype.base_dtype, dtypes.bfloat16)
self.assertEqual([dtypes.float32, dtypes.float32],
[v.dtype.base_dtype for v in scope.global_variables()])
if __name__ == "__main__":
test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from uuid import uuid4
from apiclient.discovery import build
from apiclient import errors
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
def _format_subscription(project, subscription):
return 'projects/{}/subscriptions/{}'.format(project, subscription)
def _format_topic(project, topic):
return 'projects/{}/topics/{}'.format(project, topic)
class PubSubException(Exception):
pass
class PubSubHook(GoogleCloudBaseHook):
"""Hook for accessing Google Pub/Sub.
The GCP project against which actions are applied is determined by
the project embedded in the Connection referenced by gcp_conn_id.
"""
def __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None):
super(PubSubHook, self).__init__(gcp_conn_id, delegate_to=delegate_to)
def get_conn(self):
"""Returns a Pub/Sub service object.
:rtype: apiclient.discovery.Resource
"""
http_authorized = self._authorize()
return build(
'pubsub', 'v1', http=http_authorized, cache_discovery=False)
def publish(self, project, topic, messages):
"""Publishes messages to a Pub/Sub topic.
:param project: the GCP project ID in which to publish
:type project: string
:param topic: the Pub/Sub topic to which to publish; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: string
:param messages: messages to publish; if the data field in a
message is set, it should already be base64 encoded.
:type messages: list of PubSub messages; see
http://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
"""
body = {'messages': messages}
full_topic = _format_topic(project, topic)
request = self.get_conn().projects().topics().publish(
topic=full_topic, body=body)
try:
request.execute()
except errors.HttpError as e:
raise PubSubException(
'Error publishing to topic {}'.format(full_topic), e)
def create_topic(self, project, topic, fail_if_exists=False):
"""Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project ID in which to create
the topic
:type project: string
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: string
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().create(
name=full_topic, body={}).execute()
except errors.HttpError as e:
# Status code 409 indicates that the topic already exists.
if str(e.resp['status']) == '409':
message = 'Topic already exists: {}'.format(full_topic)
self.log.warning(message)
if fail_if_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error creating topic {}'.format(full_topic), e)
def delete_topic(self, project, topic, fail_if_not_exists=False):
"""Deletes a Pub/Sub topic if it exists.
:param project: the GCP project ID in which to delete the topic
:type project: string
:param topic: the Pub/Sub topic name to delete; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: string
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().delete(topic=full_topic).execute()
except errors.HttpError as e:
# Status code 409 indicates that the topic was not found
if str(e.resp['status']) == '404':
message = 'Topic does not exist: {}'.format(full_topic)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting topic {}'.format(full_topic), e)
def create_subscription(self, topic_project, topic, subscription=None,
subscription_project=None, ack_deadline_secs=10,
fail_if_exists=False):
"""Creates a Pub/Sub subscription, if it does not already exist.
:param topic_project: the GCP project ID of the topic that the
subscription will be bound to.
:type topic_project: string
:param topic: the Pub/Sub topic name that the subscription will be bound
to create; do not include the ``projects/{project}/subscriptions/``
prefix.
:type topic: string
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:type subscription: string
:param subscription_project: the GCP project ID where the subscription
will be created. If unspecified, ``topic_project`` will be used.
:type subscription_project: string
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:type ack_deadline_secs: int
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
:return: subscription name which will be the system-generated value if
the ``subscription`` parameter is not supplied
:rtype: string
"""
service = self.get_conn()
full_topic = _format_topic(topic_project, topic)
if not subscription:
subscription = 'sub-{}'.format(uuid4())
if not subscription_project:
subscription_project = topic_project
full_subscription = _format_subscription(subscription_project,
subscription)
body = {
'topic': full_topic,
'ackDeadlineSeconds': ack_deadline_secs
}
try:
service.projects().subscriptions().create(
name=full_subscription, body=body).execute()
except errors.HttpError as e:
# Status code 409 indicates that the subscription already exists.
if str(e.resp['status']) == '409':
message = 'Subscription already exists: {}'.format(
full_subscription)
self.log.warning(message)
if fail_if_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error creating subscription {}'.format(full_subscription),
e)
return subscription
def delete_subscription(self, project, subscription,
fail_if_not_exists=False):
"""Deletes a Pub/Sub subscription, if it exists.
:param project: the GCP project ID where the subscription exists
:type project: string
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:type subscription: string
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
try:
service.projects().subscriptions().delete(
subscription=full_subscription).execute()
except errors.HttpError as e:
# Status code 404 indicates that the subscription was not found
if str(e.resp['status']) == '404':
message = 'Subscription does not exist: {}'.format(
full_subscription)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting subscription {}'.format(full_subscription),
e)
def pull(self, project, subscription, max_messages,
return_immediately=False):
"""Pulls up to ``max_messages`` messages from Pub/Sub subscription.
:param project: the GCP project ID where the subscription exists
:type project: string
:param subscription: the Pub/Sub subscription name to pull from; do not
include the 'projects/{project}/topics/' prefix.
:type subscription: string
:param max_messages: The maximum number of messages to return from
the Pub/Sub API.
:type max_messages: int
:param return_immediately: If set, the Pub/Sub API will immediately
return if no messages are available. Otherwise, the request will
block for an undisclosed, but bounded period of time
:type return_immediately: bool
:return A list of Pub/Sub ReceivedMessage objects each containing
an ``ackId`` property and a ``message`` property, which includes
the base64-encoded message content. See
https://cloud.google.com/pubsub/docs/reference/rest/v1/\
projects.subscriptions/pull#ReceivedMessage
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
body = {
'maxMessages': max_messages,
'returnImmediately': return_immediately
}
try:
response = service.projects().subscriptions().pull(
subscription=full_subscription, body=body).execute()
return response.get('receivedMessages', [])
except errors.HttpError as e:
raise PubSubException(
'Error pulling messages from subscription {}'.format(
full_subscription), e)
def acknowledge(self, project, subscription, ack_ids):
"""Pulls up to ``max_messages`` messages from Pub/Sub subscription.
:param project: the GCP project name or ID in which to create
the topic
:type project: string
:param subscription: the Pub/Sub subscription name to delete; do not
include the 'projects/{project}/topics/' prefix.
:type subscription: string
:param ack_ids: List of ReceivedMessage ackIds from a previous pull
response
:type ack_ids: list
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
try:
service.projects().subscriptions().acknowledge(
subscription=full_subscription, body={'ackIds': ack_ids}
).execute()
except errors.HttpError as e:
raise PubSubException(
'Error acknowledging {} messages pulled from subscription {}'
.format(len(ack_ids), full_subscription), e)
|
unknown
|
codeparrot/codeparrot-clean
| ||
{% block content %}base{% endblock %}
|
html
|
github
|
https://github.com/django/django
|
tests/template_tests/templates/inclusion_base.html
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import request, abort, render_template, session, redirect, url_for
import json
import md5
from wrc import app
from functions import getDeviceById, getState, requires_auth
from devices import setDeviceState
@app.route("/")
@requires_auth
def index():
"""Index page"""
devices = getState()
return render_template('index.html', devices=devices, polling=app.config['GPIO_POLLING_DELAY'], display=app.config['DISPLAY_PIN_ID'])
@app.route('/pin/', methods=['POST'])
@requires_auth
def setPinState():
"""Sets device state"""
if request.method == 'POST':
try:
deviceId = int(request.form['deviceId'])
value = request.form['value'] == 'true'
device = getDeviceById(deviceId)
if device:
setDeviceState(device, value)
return "Ok"
else:
return "Error"
except:
abort(403)
else:
abort(403)
@app.route('/state/')
@requires_auth
def getPinState():
"""Returns devices' state as JSON"""
devices = getState()
return json.dumps(devices)
@app.route('/login/', methods=['GET','POST'])
def login():
"""Generates authorisation page"""
# Get page to be loaded after aithorisation
next = request.args.get('next') or url_for('index')
if not app.config['AUTHORISATION_ENABLED']:
return redirect(next)
# If user already authorised
if 'user' in session:
return redirect(next)
# Authorisation
error = ''
if request.method == 'POST':
login = request.form['login']
password = request.form['password']
if login == app.config['USER_LOGIN'] and md5.new(password).digest() == app.config['USER_MD5_PASSWORD']:
# Successful authorisation
session['user'] = login
return redirect(next)
else:
# Error message
error = u'Wrong login or password'
return render_template('login.html', error=error, next=next)
@app.route('/logout/')
@requires_auth
def logout():
"""Closes user session"""
session.pop('user', None)
return redirect(url_for('index'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package endpoints
import (
"context"
clientv3 "go.etcd.io/etcd/client/v3"
)
// Endpoint represents a single address the connection can be established with.
//
// Inspired by: https://pkg.go.dev/google.golang.org/grpc/resolver#Address.
// Please document etcd version since which version each field is supported.
type Endpoint struct {
// Addr is the server address on which a connection will be established.
// Since etcd 3.1
Addr string
// Metadata is the information associated with Addr.
// Since etcd 3.1
Metadata any
}
type Operation uint8
const (
// Add indicates an Endpoint is added.
Add Operation = iota
// Delete indicates an existing address is deleted.
Delete
)
// Update describes a single edit action of an Endpoint.
type Update struct {
// Op - action Add or Delete.
Op Operation
Key string
Endpoint Endpoint
}
// WatchChannel is used to deliver notifications about endpoints updates.
type WatchChannel <-chan []*Update
// Key2EndpointMap maps etcd key into struct describing the endpoint.
type Key2EndpointMap map[string]Endpoint
// UpdateWithOpts describes endpoint update (add or delete) together
// with etcd options (e.g. to attach an endpoint to a lease).
type UpdateWithOpts struct {
Update
Opts []clientv3.OpOption
}
// NewAddUpdateOpts constructs UpdateWithOpts for endpoint registration.
func NewAddUpdateOpts(key string, endpoint Endpoint, opts ...clientv3.OpOption) *UpdateWithOpts {
return &UpdateWithOpts{Update: Update{Op: Add, Key: key, Endpoint: endpoint}, Opts: opts}
}
// NewDeleteUpdateOpts constructs UpdateWithOpts for endpoint deletion.
func NewDeleteUpdateOpts(key string, opts ...clientv3.OpOption) *UpdateWithOpts {
return &UpdateWithOpts{Update: Update{Op: Delete, Key: key}, Opts: opts}
}
// Manager can be used to add/remove & inspect endpoints stored in etcd for
// a particular target.
type Manager interface {
// Update allows to atomically add/remove a few endpoints from etcd.
Update(ctx context.Context, updates []*UpdateWithOpts) error
// AddEndpoint registers a single endpoint in etcd.
// For more advanced use-cases use the Update method.
AddEndpoint(ctx context.Context, key string, endpoint Endpoint, opts ...clientv3.OpOption) error
// DeleteEndpoint deletes a single endpoint stored in etcd.
// For more advanced use-cases use the Update method.
DeleteEndpoint(ctx context.Context, key string, opts ...clientv3.OpOption) error
// List returns all the endpoints for the current target as a map.
List(ctx context.Context) (Key2EndpointMap, error)
// NewWatchChannel creates a channel that populates or endpoint updates.
// Cancel the 'ctx' to close the watcher.
NewWatchChannel(ctx context.Context) (WatchChannel, error)
}
|
go
|
github
|
https://github.com/etcd-io/etcd
|
client/v3/naming/endpoints/endpoints.go
|
# -*- coding: utf-8 -*-
# To-do list
#
# optimize validateCard and setIssuerImg - less cycles
# automatically add spaces at the end of each 4-digit group. modify validator accordingly
#
# possibly in future - remove validateBtn and make resultLabel update on text update in ccText
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import re
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.pixmaps = {
'American Express': 'american_express.png',
'Diners Club': 'diners_club.png',
'Discover': 'discover.png',
'JCB': 'jcb.png',
'Dankort': 'Dankort.png',
'MasterCard': 'Master_Card.png',
'Maestro': 'Maestro.png',
'Visa': 'Visa.png'
}
self.redPalette = QtGui.QPalette()
self.redPalette.setColor(QtGui.QPalette.Foreground, QtCore.Qt.red)
self.greenPalette = QtGui.QPalette()
self.greenPalette.setColor(QtGui.QPalette.Foreground, QtCore.Qt.darkGreen)
self.aboutWindow = AboutWindow()
self.setup_ui(self)
def setup_ui(self, MainWindow):
MainWindow.setFixedSize(560, 225)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.ccTextLabel = QtWidgets.QLabel(self.centralwidget)
self.verticalLayout.addWidget(self.ccTextLabel)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.ccImgLabel = QtWidgets.QLabel(self.centralwidget)
self.ccImgLabel.setMinimumSize(QtCore.QSize(64, 64))
self.ccImgLabel.setMaximumSize(QtCore.QSize(64, 64))
self.ccImgLabel.setText("")
self.horizontalLayout.addWidget(self.ccImgLabel)
self.ccText = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ccText.sizePolicy().hasHeightForWidth())
self.ccText.setSizePolicy(sizePolicy)
self.ccText.setMaximumSize(QtCore.QSize(16777215, 28))
self.mask = '9999 9999 9999 9999'
self.ccText.setInputMask(self.mask)
self.ccText.textChanged.connect(self.set_issuer_img)
self.horizontalLayout.addWidget(self.ccText)
self.verticalLayout.addLayout(self.horizontalLayout)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.verticalLayout.addWidget(self.line)
self.resultLabel = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.resultLabel.setFont(font)
self.resultLabel.setText("")
self.resultLabel.setAlignment(QtCore.Qt.AlignCenter)
self.verticalLayout.addWidget(self.resultLabel)
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.verticalLayout.addWidget(self.line_2)
self.validateBtn = QtWidgets.QPushButton(self.centralwidget)
self.validateBtn.clicked.connect(self.validate_card)
self.validateBtn.setAutoDefault(True)
self.validateBtn.setDefault(True)
self.verticalLayout.addWidget(self.validateBtn)
self.verticalLayout_2.addLayout(self.verticalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 560, 23))
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuAbout = QtWidgets.QMenu(self.menubar)
MainWindow.setMenuBar(self.menubar)
self.actionExit = QtWidgets.QAction(QtGui.QIcon('exit.png'), '&Exit', MainWindow)
self.actionExit.triggered.connect(QtWidgets.qApp.quit)
self.actionAbout = QtWidgets.QAction(QtGui.QIcon('gnome-info.png'), '&About', MainWindow)
self.actionAbout.triggered.connect(self.aboutWindow.show)
self.menuFile.addAction(self.actionExit)
self.menuAbout.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.setWindowIcon(QtGui.QIcon('cc_icon.png'))
self.retranslate_ui(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslate_ui(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Validator"))
self.ccTextLabel.setText(_translate("MainWindow", "Enter your card number below"))
self.validateBtn.setText(_translate("MainWindow", "Validate"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuAbout.setTitle(_translate("MainWindow", "About"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionExit.setStatusTip(_translate("MainWindow", "Exit application"))
self.actionExit.setShortcut(_translate("MainWindow", "Alt+X"))
self.actionAbout.setText(_translate("MainWindow", "About"))
self.actionAbout.setStatusTip(_translate("MainWindow", "About application"))
def validate_card(self):
issuers = {
'American Express': '^3[47]\d{13}$',
'Diners Club': '^(30[0-5]\d|309\d|36\d\d|3[89]\d\d)\d{10}',
'Discover': '^6(?:011\d\d|5\d{4}|4[4-9]\d{3}|22(?:1(?:2[6-9]|[3-9]\d)|[2-8]\d\d|9(?:[01]\d|2[0-5])))\d{10}$',
'JCB': '^35(?:2[89]|[3-8]\d)\d{12}$',
'Dankort': '^5019\d{12}',
'MasterCard': '^5[1-5]\d{14}$',
'Maestro': '^(?:5[06789]\d\d|6304|6390|67\d\d)\d{8,15}$',
'Visa': '^4\d{15}$'
}
cc_number = self.ccText.text()
stripped = re.sub(r"\D", '', cc_number)
reverse = stripped[::-1]
total = 0
i = 1
valid = None
for issuer in issuers:
exp = issuers[issuer]
if re.match(exp, stripped):
for char in reverse:
digit = int(char)
if i % 2 == 0:
digit *= 2
temp_lst = [int(d) for d in str(digit)]
for d in temp_lst:
total += d
else:
total += digit
i += 1
if total % 10 == 0:
valid = True
self.resultLabel.setText('Card is valid')
self.resultLabel.setPalette(self.greenPalette)
break
else:
self.resultLabel.setText('Invalid card number')
self.resultLabel.setPalette(self.redPalette)
break
if not valid:
self.resultLabel.setText('Invalid card')
self.resultLabel.setPalette(self.redPalette)
def set_issuer_img(self):
issuers = {
'American Express': '^3[47]\d\d',
'Diners Club': '^30[0-5]\d|309\d|36\d\d|3[89]\d\d',
'Discover': '^6(?:011\d\d|5\d{4}|4[4-9]\d{3}|22(?:1(?:2[6-9]|[3-9]\d)|[2-8]\d\d|9(?:[01]\d|2[0-5])))',
'JCB': '^35(?:2[89]|[3-8]\d)',
'Dankort': '^5019',
'MasterCard': '^5[1-5]\d\d',
'Maestro': '^(?:5[06789]\d\d|6304|6390|67\d\d)',
'Visa': '^4\d\d\d'
}
current_text = self.ccText.text()
if len(current_text) < 4:
self.ccImgLabel.setPixmap(QtGui.QPixmap('blank.png'))
self.ccImgLabel.setToolTip('')
else:
for issuer in issuers:
exp = issuers[issuer]
if re.match(exp, current_text):
pixmap = QtGui.QPixmap(self.pixmaps[issuer])
self.ccImgLabel.setPixmap(pixmap)
self.ccImgLabel.setToolTip(issuer)
break
class AboutWindow(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.pixmap = QtGui.QPixmap('logo.png')
self.image_label = QtWidgets.QLabel()
self.image_label.setPixmap(self.pixmap)
self.image_label.setAlignment(QtCore.Qt.AlignCenter)
self.about_label_1 = QtWidgets.QLabel('Credit card validator application')
self.about_label_1.setAlignment(QtCore.Qt.AlignCenter)
self.about_label_1.setWordWrap(True)
self.about_label_2 = QtWidgets.QLabel('Created by Dannyo')
self.about_label_2.setAlignment(QtCore.Qt.AlignCenter)
self.close_button = QtWidgets.QPushButton('Close')
self.close_button.pressed.connect(self.close)
vbox = QtWidgets.QVBoxLayout()
vbox.addStretch(1)
vbox.addWidget(self.image_label)
vbox.addWidget(self.about_label_1)
vbox.addWidget(self.about_label_2)
vbox.addWidget(self.close_button)
self.setWindowTitle('About')
self.setFixedSize(200, 200)
self.setWindowIcon(QtGui.QIcon('cc_icon.png'))
self.setLayout(vbox)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: GPL-2.0-only
%YAML 1.2
---
$id: http://devicetree.org/schemas/mmc/sdhci-pxa.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Marvell PXA SDHCI v1/v2/v3
maintainers:
- Ulf Hansson <ulf.hansson@linaro.org>
allOf:
- $ref: mmc-controller.yaml#
- if:
properties:
compatible:
contains:
const: marvell,armada-380-sdhci
then:
properties:
reg:
minItems: 3
reg-names:
minItems: 3
required:
- reg-names
else:
properties:
reg:
maxItems: 1
reg-names:
maxItems: 1
- if:
properties:
compatible:
contains:
const: mrvl,pxav1-mmc
then:
properties:
pinctrl-names:
description:
Optional for supporting PXA168 SDIO IRQ errata to switch CMD pin between
SDIO CMD and GPIO mode.
items:
- const: default
- const: state_cmd_gpio
minItems: 1
pinctrl-1:
description:
Should switch CMD pin to GPIO mode as a high output.
- if:
properties:
compatible:
contains:
const: mrvl,pxav3-mmc
then:
properties:
pinctrl-names:
description:
Optional for increasing stability of the controller at fast bus clocks.
items:
- const: default
- const: state_uhs
minItems: 1
pinctrl-1:
description:
Should switch the drive strength of the data pins to high.
properties:
compatible:
enum:
- mrvl,pxav1-mmc
- mrvl,pxav2-mmc
- mrvl,pxav3-mmc
- marvell,armada-380-sdhci
reg:
minItems: 1
maxItems: 3
reg-names:
items:
- const: sdhci
- const: mbus
- const: conf-sdio3
interrupts:
maxItems: 1
clocks:
minItems: 1
maxItems: 2
clock-names:
minItems: 1
items:
- const: io
- const: core
pinctrl-names: true
pinctrl-0:
description:
Should contain default pinctrl.
pinctrl-1: true
mrvl,clk-delay-cycles:
description: Specify a number of cycles to delay for tuning.
$ref: /schemas/types.yaml#/definitions/uint32
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/berlin2.h>
mmc@d4280800 {
compatible = "mrvl,pxav3-mmc";
reg = <0xd4280800 0x800>;
bus-width = <8>;
interrupts = <27>;
clocks = <&chip CLKID_SDIO1XIN>, <&chip CLKID_SDIO1>;
clock-names = "io", "core";
non-removable;
mrvl,clk-delay-cycles = <31>;
};
- |
mmc@d8000 {
compatible = "marvell,armada-380-sdhci";
reg-names = "sdhci", "mbus", "conf-sdio3";
reg = <0xd8000 0x1000>,
<0xdc000 0x100>,
<0x18454 0x4>;
interrupts = <0 25 0x4>;
clocks = <&gateclk 17>;
clock-names = "io";
mrvl,clk-delay-cycles = <0x1F>;
};
...
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
class AssertStateVariable():
""" Abstract asserted state variable. """
def __init__(self, parent, state):
self.parent = parent
self.state = state
def variable_name(self):
return '{}.{}'.format(self.parent.prefix, self.state)
def variable(self, solver):
return solver.get_variable(self.variable_name())
def __str__(self):
return self.variable_name()
class DeassertStateVariable():
""" Abstract deasserted state variable. """
def __init__(self, parent, state):
self.parent = parent
self.state = state
def variable_name(self):
return '{}.NOT.{}'.format(self.parent.prefix, self.state)
def variable(self, solver):
return solver.get_variable(self.variable_name())
def __str__(self):
return self.variable_name()
class Not():
""" Abstract inverted variable. """
def __init__(self, variable):
self.a_variable = variable
def variable_name(self):
return self.a_variable.variable_name()
def variable(self, solver):
return -solver.get_variable(self.variable_name())
def __str__(self):
return '!' + self.variable_name()
class Xor():
""" Abstract XOR SAT clause. """
def __init__(self, variable_a, variable_b):
self.variable_a = variable_a
self.variable_b = variable_b
def clauses(self):
yield [self.variable_a, self.variable_b]
yield [Not(self.variable_a), Not(self.variable_b)]
def __str__(self):
return '{} xor {}'.format(self.variable_a.variable_name(),
self.variable_b.variable_name())
class Implies():
""" Abstract implies (->) SAT clause. """
def __init__(self, source_variable, target_variable):
self.source_variable = source_variable
self.target_variable = target_variable
def clauses(self):
yield [Not(self.source_variable), self.target_variable]
def __str__(self):
return '{} -> {}'.format(self.source_variable.variable_name(),
self.target_variable.variable_name())
class Or():
""" Abstract OR SAT clause. """
def __init__(self, variables):
self.variables = variables
def clauses(self):
yield self.variables
def __str__(self):
return 'sum({})'.format(', '.join(str(var) for var in self.variables))
class ExclusiveStateGroup():
""" A group of states that have at most 1 state selected. """
def __init__(self, prefix, default):
self.prefix = prefix
self.states = set()
self.default = default
def name(self):
""" Return name of state group. """
return self.prefix
def add_state(self, state):
""" Add a state to this group. """
self.states.add(state)
def assert_state(self, state):
""" Return a SAT variable that asserts that a state must be asserted. """
return AssertStateVariable(self, state)
def deassert_state(self, state):
""" Return a SAT variable that asserts that a state must be deasserted. """
return DeassertStateVariable(self, state)
def select_one(self):
""" Yields SAT clauses that ensure that one variable from this state group is selected. """
yield Or([self.assert_state(state) for state in self.states])
def implies_clause(self, source_variable, state):
""" Yields SAT clauses that ensure if source_variable is true, then state is asserted from this group. """
assert state in self.states, state
yield Implies(source_variable, self.assert_state(state))
def implies_not_clause(self, source_variable, state):
""" Yields SAT clauses that ensure if source_variable is true, then state is deassert from this group. """
assert state in self.states
yield Implies(source_variable, self.deassert_state(state))
def requires_clause(self, source_variable, states):
""" Yields SAT clauses that ensure if source_variable is true, then one of the supplied states must be asserted from this group. """
for other_state in self.states - states:
yield self.implies_not_clause(source_variable, other_state)
def variables(self):
""" Yields SAT variables generated from this state group. """
for state in self.states:
yield self.assert_state(state)
yield self.deassert_state(state)
def clauses(self):
""" Yield SAT clauses that ensure this state group selects at most one state. """
for state in self.states:
yield Xor(
AssertStateVariable(self, state),
DeassertStateVariable(self, state))
for other_state in (self.states - set([state])):
yield Implies(
AssertStateVariable(self, state),
DeassertStateVariable(self, other_state))
def get_state(self, variables_for_state_group):
""" Return state for this group based on true SAT variables relevant to this group. """
state = None
for variable in variables_for_state_group:
assert variable.startswith(self.prefix + '.')
data_portion = variable[len(self.prefix) + 1:]
not_set = False
if data_portion.startswith('NOT.'):
data_portion = data_portion[len('NOT.'):]
not_set = True
assert data_portion in self.states
if not_set:
continue
if state is None:
state = data_portion
else:
assert False, (state, data_portion)
if state is None:
state = self.default
return state
class Solver():
""" Abstract SAT solver, where each SAT variable is a string.
Clauses used in this class are "abstract" clauses, that can yield more than
one clause.
"""
def __init__(self):
self.variable_names = set()
self.variable_name_to_index = None
self.abstract_clauses = []
self.state_group_names = set()
self.state_groups = []
self.variable_to_state_group = {}
def add_state_group(self, state_group):
""" Adds a state group to the solver.
state_group (ExclusiveStateGroup) - State group.
"""
assert state_group.name() not in self.state_group_names
self.state_group_names.add(state_group.name())
self.state_groups.append(state_group)
def add_variable_names(self, variables):
""" Adds a variable names to this Solver.
These variable names cannot already be apart of the Solver.
"""
new_variable_names = set()
for variable in variables:
new_variable_names.add(variable)
assert len(self.variable_names & variables) == 0
self.variable_names |= new_variable_names
def add_clause(self, clause):
""" Add an abstract clause to the Solver.
Interface for abstract clause should have one method that yields a
list of abstract variable objects.
Abstract variable objects should have a method called variable, that
takes a Solver object.
"""
self.abstract_clauses.append(clause)
def get_variable(self, variable_name):
""" Return SAT variable index for a variable name. """
assert self.variable_name_to_index is not None
return self.variable_name_to_index[variable_name]
def get_variable_name(self, variable_index):
""" Return a SAT variable name for a given variable index. """
return self.variable_names[variable_index - 1]
def prepare_for_sat(self):
""" Convert SAT clauses using variable name strings to SAT indicies """
for state_group in self.state_groups:
new_variables = set()
for variable in state_group.variables():
new_variables.add(variable.variable_name())
self.add_variable_names(new_variables)
for variable in new_variables:
assert variable not in self.variable_to_state_group
self.variable_to_state_group[variable] = state_group
for clause in state_group.clauses():
self.add_clause(clause)
self.variable_names = sorted(self.variable_names)
self.variable_name_to_index = {}
# Assign SAT variables indicies to variable names
for idx, variable_name in enumerate(self.variable_names):
assert variable_name not in self.variable_name_to_index
self.variable_name_to_index[variable_name] = idx + 1
# Convert abstract clauses using variable names to SAT clauses
concrete_clauses = set()
for abstract_clause in self.abstract_clauses:
for clause in abstract_clause.clauses():
concrete_clause = []
for part in clause:
concrete_clause.append(part.variable(self))
assert len(set(concrete_clause)) == len(concrete_clause)
concrete_clauses.add(tuple(sorted(concrete_clause)))
return sorted(concrete_clauses)
def decode_solution_model(self, sat_model):
""" Decode a solution from a SAT solver.
Returns a dict of state group states and a set of SAT variables that
don't belong to state group states.
"""
state_group_variables = {}
other_variables = set()
for idx in sat_model:
if idx < 0:
continue
variable = self.get_variable_name(idx)
if variable in self.variable_to_state_group:
state_group = self.variable_to_state_group[variable]
state_group_name = state_group.name()
if state_group_name not in state_group_variables:
state_group_variables[state_group_name] = set()
state_group_variables[state_group_name].add(variable)
else:
other_variables.add(variable)
state_group_results = {}
for state_group_name, variables in state_group_variables.items():
state_group = self.variable_to_state_group[list(variables)[0]]
state_group_results[state_group_name] = state_group.get_state(
variables)
return state_group_results, other_variables
def print_debug(self):
""" Print debugging information for the abstract SAT solver. """
print()
print("Variable names ({} total):".format(len(self.variable_names)))
print()
for variable in self.variable_names:
print(variable)
print()
print("Clauses:")
print()
for clause in self.abstract_clauses:
print(clause)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2010 Pedro Matiello <pmatiello@gmail.com>
from mockito import any, mock, verify, when
from handlers.entity import entity_handler, entities_handler
from models.entity import entity
class entities_handler_spec():
def setup(self):
self.handler = object.__new__(entities_handler)
self.repository = mock()
self.handler.initialize(self.repository)
self.entity_list = [1,2,3]
when(self.repository).list().thenReturn(self.entity_list)
when(self.handler).render().thenReturn(None)
def should_list_all_instances(self):
self.handler.get()
verify(self.repository).list()
verify(self.handler).render('entity/index.html', data=self.entity_list)
def should_create_new_instances(self):
when(self.handler).param('field1').thenReturn("Field1 data")
when(self.handler).param('field2').thenReturn("Field2 data")
self.handler.post()
verify(self.repository).save(any(entity))
class entity_handler_spec():
def setup(self):
self.handler = object.__new__(entity_handler)
self.repository = mock()
self.handler.initialize(self.repository)
self.entity = entity("Field1 data", "Field2 data")
when(self.repository).load(1).thenReturn(self.entity)
when(self.handler).render().thenReturn(None)
def should_modify_instances(self):
when(self.handler).param('field1').thenReturn("Field1 data updated")
when(self.handler).param('field2').thenReturn("Field2 data updated")
self.handler.put(1)
assert self.entity.field1 == "Field1 data updated"
assert self.entity.field2 == "Field2 data updated"
verify(self.repository).save(self.entity)
def should_delete_instances(self):
self.handler.delete(1)
verify(self.repository).remove(self.entity)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Utilities using NDG HTTPS Client, including a main module that can be used to
fetch from a URL.
"""
__author__ = "R B Wilkinson"
__date__ = "09/12/11"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import logging
from optparse import OptionParser
import os
import sys
if sys.version_info[0] > 2:
import http.cookiejar as cookiejar_
import http.client as http_client_
from urllib.request import Request as Request_
from urllib.request import HTTPHandler as HTTPHandler_
from urllib.request import HTTPCookieProcessor as HTTPCookieProcessor_
from urllib.request import HTTPBasicAuthHandler as HTTPBasicAuthHandler_
from urllib.request import HTTPPasswordMgrWithDefaultRealm as \
HTTPPasswordMgrWithDefaultRealm_
from urllib.request import ProxyHandler as ProxyHandler_
from urllib.error import HTTPError as HTTPError_
import urllib.parse as urlparse_
else:
import cookielib as cookiejar_
import httplib as http_client_
from urllib2 import Request as Request_
from urllib2 import HTTPHandler as HTTPHandler_
from urllib2 import HTTPCookieProcessor as HTTPCookieProcessor_
from urllib2 import HTTPBasicAuthHandler as HTTPBasicAuthHandler_
from urllib2 import HTTPPasswordMgrWithDefaultRealm as \
HTTPPasswordMgrWithDefaultRealm_
from urllib2 import ProxyHandler as ProxyHandler_
from urllib2 import HTTPError as HTTPError_
import urlparse as urlparse_
from ndg.httpsclient.urllib2_build_opener import build_opener
from ndg.httpsclient.https import HTTPSContextHandler
from ndg.httpsclient import ssl_context_util
log = logging.getLogger(__name__)
class AccumulatingHTTPCookieProcessor(HTTPCookieProcessor_):
"""Cookie processor that adds new cookies (instead of replacing the existing
ones as HTTPCookieProcessor does)
"""
def http_request(self, request):
"""Processes cookies for a HTTP request.
@param request: request to process
@type request: urllib2.Request
@return: request
@rtype: urllib2.Request
"""
COOKIE_HEADER_NAME = "Cookie"
tmp_request = Request_(request.get_full_url(), request.data, {},
request.origin_req_host,
request.unverifiable)
self.cookiejar.add_cookie_header(tmp_request)
# Combine existing and new cookies.
new_cookies = tmp_request.get_header(COOKIE_HEADER_NAME)
if new_cookies:
if request.has_header(COOKIE_HEADER_NAME):
# Merge new cookies with existing ones.
old_cookies = request.get_header(COOKIE_HEADER_NAME)
merged_cookies = '; '.join([old_cookies, new_cookies])
request.add_unredirected_header(COOKIE_HEADER_NAME,
merged_cookies)
else:
# No existing cookies so just set new ones.
request.add_unredirected_header(COOKIE_HEADER_NAME, new_cookies)
return request
# Process cookies for HTTPS in the same way.
https_request = http_request
class URLFetchError(Exception):
"""Error fetching content from URL"""
def fetch_from_url(url, config, data=None, handlers=None):
"""Returns data retrieved from a URL.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@return data retrieved from URL or None
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code and return_code == http_client_.OK:
return_data = response.read()
response.close()
return return_data
else:
raise URLFetchError(return_message)
def fetch_from_url_to_file(url, config, output_file, data=None, handlers=None):
"""Writes data retrieved from a URL to a file.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@param output_file: output file
@type output_file: basestring
@return: tuple (
returned HTTP status code or 0 if an error occurred
returned message
boolean indicating whether access was successful)
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code == http_client_.OK:
return_data = response.read()
response.close()
outfile = open(output_file, "w")
outfile.write(return_data)
outfile.close()
return return_code, return_message, return_code == http_client_.OK
def fetch_stream_from_url(url, config, data=None, handlers=None):
"""Returns data retrieved from a URL.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@param data: HTTP POST data
@type data: str
@param handlers: list of custom urllib2 handlers to add to the request
@type handlers: iterable
@return: data retrieved from URL or None
@rtype: file derived type
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code and return_code == http_client_.OK:
return response
else:
raise URLFetchError(return_message)
def open_url(url, config, data=None, handlers=None):
"""Attempts to open a connection to a specified URL.
@param url: URL to attempt to open
@param config: SSL context configuration
@type config: Configuration
@param data: HTTP POST data
@type data: str
@param handlers: list of custom urllib2 handlers to add to the request
@type handlers: iterable
@return: tuple (
returned HTTP status code or 0 if an error occurred
returned message or error description
response object)
"""
debuglevel = 1 if config.debug else 0
# Set up handlers for URL opener.
if config.cookie:
cj = config.cookie
else:
cj = cookiejar_.CookieJar()
# Use a cookie processor that accumulates cookies when redirects occur so
# that an application can redirect for authentication and retain both any
# cookies for the application and the security system (c.f.,
# urllib2.HTTPCookieProcessor which replaces cookies).
cookie_handler = AccumulatingHTTPCookieProcessor(cj)
if not handlers:
handlers = []
handlers.append(cookie_handler)
if config.debug:
http_handler = HTTPHandler_(debuglevel=debuglevel)
https_handler = HTTPSContextHandler(config.ssl_context,
debuglevel=debuglevel)
handlers.extend([http_handler, https_handler])
if config.http_basicauth:
# currently only supports http basic auth
auth_handler = HTTPBasicAuthHandler_(HTTPPasswordMgrWithDefaultRealm_())
auth_handler.add_password(realm=None, uri=url,
user=config.httpauth[0],
passwd=config.httpauth[1])
handlers.append(auth_handler)
# Explicitly remove proxy handling if the host is one listed in the value of
# the no_proxy environment variable because urllib2 does use proxy settings
# set via http_proxy and https_proxy, but does not take the no_proxy value
# into account.
if not _should_use_proxy(url, config.no_proxy):
handlers.append(ProxyHandler_({}))
log.debug("Not using proxy")
elif config.proxies:
handlers.append(ProxyHandler_(config.proxies))
log.debug("Configuring proxies: %s" % config.proxies)
opener = build_opener(*handlers, ssl_context=config.ssl_context)
headers = config.headers
if headers is None:
headers = {}
request = Request_(url, data, headers)
# Open the URL and check the response.
return_code = 0
return_message = ''
response = None
# FIXME
response = opener.open(request)
try:
response = opener.open(request)
return_message = response.msg
return_code = response.code
if log.isEnabledFor(logging.DEBUG):
for index, cookie in enumerate(cj):
log.debug("%s : %s", index, cookie)
except HTTPError_ as exc:
return_code = exc.code
return_message = "Error: %s" % exc.msg
if log.isEnabledFor(logging.DEBUG):
log.debug("%s %s", exc.code, exc.msg)
except Exception as exc:
return_message = "Error: %s" % exc.__str__()
if log.isEnabledFor(logging.DEBUG):
import traceback
log.debug(traceback.format_exc())
return (return_code, return_message, response)
def _should_use_proxy(url, no_proxy=None):
"""Determines whether a proxy should be used to open a connection to the
specified URL, based on the value of the no_proxy environment variable.
@param url: URL
@type url: basestring or urllib2.Request
"""
if no_proxy is None:
no_proxy_effective = os.environ.get('no_proxy', '')
else:
no_proxy_effective = no_proxy
urlObj = urlparse_.urlparse(_url_as_string(url))
for np in [h.strip() for h in no_proxy_effective.split(',')]:
if urlObj.hostname == np:
return False
return True
def _url_as_string(url):
"""Returns the URL string from a URL value that is either a string or
urllib2.Request..
@param url: URL
@type url: basestring or urllib2.Request
@return: URL string
@rtype: basestring
"""
if isinstance(url, Request_):
return url.get_full_url()
elif isinstance(url, str):
return url
else:
raise TypeError("Expected type %r or %r" %
(str, Request_))
class Configuration(object):
"""Connection configuration.
"""
def __init__(self, ssl_context, debug=False, proxies=None, no_proxy=None,
cookie=None, http_basicauth=None, headers=None):
"""
@param ssl_context: SSL context to use with this configuration
@type ssl_context: OpenSSL.SSL.Context
@param debug: if True, output debugging information
@type debug: bool
@param proxies: proxies to use for
@type proxies: dict with basestring keys and values
@param no_proxy: hosts for which a proxy should not be used
@type no_proxy: basestring
@param cookie: cookies to set for request
@type cookie: cookielib.CookieJar (python 3 - http.cookiejar)
@param http_basicauth: http authentication, or None
@type http_basicauth: tuple of (username,password)
@param headers: http headers
@type headers: dict
"""
self.ssl_context = ssl_context
self.debug = debug
self.proxies = proxies
self.no_proxy = no_proxy
self.cookie = cookie
self.http_basicauth = http_basicauth
self.headers = headers
def main():
'''Utility to fetch data using HTTP or HTTPS GET from a specified URL.
'''
parser = OptionParser(usage="%prog [options] url")
parser.add_option("-c", "--certificate", dest="cert_file", metavar="FILE",
default=os.path.expanduser("~/credentials.pem"),
help="Certificate file - defaults to $HOME/credentials.pem")
parser.add_option("-k", "--private-key", dest="key_file", metavar="FILE",
default=None,
help="Private key file - defaults to the certificate file")
parser.add_option("-t", "--ca-certificate-dir", dest="ca_dir",
metavar="PATH",
default=None,
help="Trusted CA certificate file directory")
parser.add_option("-d", "--debug", action="store_true", dest="debug",
default=False,
help="Print debug information.")
parser.add_option("-p", "--post-data-file", dest="data_file",
metavar="FILE", default=None,
help="POST data file")
parser.add_option("-f", "--fetch", dest="output_file", metavar="FILE",
default=None, help="Output file")
parser.add_option("-n", "--no-verify-peer", action="store_true",
dest="no_verify_peer", default=False,
help="Skip verification of peer certificate.")
parser.add_option("-a", "--basicauth", dest="basicauth",
metavar="USER:PASSWD",
default=None,
help="HTTP authentication credentials")
parser.add_option("--header", action="append", dest="headers",
metavar="HEADER: VALUE",
help="Add HTTP header to request")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Incorrect number of arguments")
url = args[0]
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
if options.key_file and os.path.exists(options.key_file):
key_file = options.key_file
else:
key_file = None
if options.cert_file and os.path.exists(options.cert_file):
cert_file = options.cert_file
else:
cert_file = None
if options.ca_dir and os.path.exists(options.ca_dir):
ca_dir = options.ca_dir
else:
ca_dir = None
verify_peer = not options.no_verify_peer
if options.data_file and os.path.exists(options.data_file):
data_file = open(options.data_file)
data = data_file.read()
data_file.close()
else:
data = None
if options.basicauth:
http_basicauth = options.basicauth.split(':', 1)
else:
http_basicauth = None
headers = {}
if options.headers:
for h in options.headers:
key, val = h.split(':', 1)
headers[key.strip()] = val.lstrip()
# If a private key file is not specified, the key is assumed to be stored in
# the certificate file.
ssl_context = ssl_context_util.make_ssl_context(key_file,
cert_file,
None,
ca_dir,
verify_peer,
url)
config = Configuration(ssl_context,
options.debug,
http_basicauth=http_basicauth,
headers=headers)
if options.output_file:
return_code, return_message = fetch_from_url_to_file(
url,
config,
options.output_file,
data)[:2]
raise SystemExit(return_code, return_message)
else:
data = fetch_from_url(url, config)
print(data)
if __name__=='__main__':
logging.basicConfig()
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
navigation_title: "Terms"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html
---
# Terms query [query-dsl-terms-query]
Returns documents that contain one or more **exact** terms in a provided field.
The `terms` query is the same as the [`term` query](/reference/query-languages/query-dsl/query-dsl-term-query.md), except you can search for multiple values. A document will match if it contains at least one of the terms. To search for documents that contain more than one matching term, use the [`terms_set` query](/reference/query-languages/query-dsl/query-dsl-terms-set-query.md).
## Example request [terms-query-ex-request]
The following search returns documents where the `user.id` field contains `kimchy` or `elkbee`.
```console
GET /_search
{
"query": {
"terms": {
"user.id": [ "kimchy", "elkbee" ],
"boost": 1.0
}
}
}
```
## Top-level parameters for `terms` [terms-top-level-params]
`<field>`
: (Optional, object) Field you wish to search.
The value of this parameter is an array of terms you wish to find in the provided field. To return a document, one or more terms must exactly match a field value, including whitespace and capitalization.
By default, {{es}} limits the `terms` query to a maximum of 65,536 terms. You can change this limit using the [`index.max_terms_count`](/reference/elasticsearch/index-settings/index-modules.md#index-max-terms-count) setting.
::::{note}
To use the field values of an existing document as search terms, use the [terms lookup](#query-dsl-terms-lookup) parameters.
::::
`boost`
: (Optional, float) Floating point number used to decrease or increase the [relevance scores](/reference/query-languages/query-dsl/query-filter-context.md#relevance-scores) of a query. Defaults to `1.0`.
You can use the `boost` parameter to adjust relevance scores for searches containing two or more queries.
Boost values are relative to the default value of `1.0`. A boost value between `0` and `1.0` decreases the relevance score. A value greater than `1.0` increases the relevance score.
## Notes [terms-query-notes]
### Highlighting `terms` queries [query-dsl-terms-query-highlighting]
[Highlighting](/reference/elasticsearch/rest-apis/highlighting.md) is best-effort only. {{es}} may not return highlight results for `terms` queries depending on:
* Highlighter type
* Number of terms in the query
### Terms lookup [query-dsl-terms-lookup]
Terms lookup fetches the field values of an existing document. {{es}} then uses those values as search terms. This can be helpful when searching for a large set of terms.
To run a terms lookup, the field’s [`_source`](/reference/elasticsearch/mapping-reference/mapping-source-field.md) must be enabled. You cannot use {{ccs}} to run a terms lookup on a remote index.
::::{note}
By default, {{es}} limits the `terms` query to a maximum of 65,536 terms. This includes terms fetched using terms lookup. You can change this limit using the [`index.max_terms_count`](/reference/elasticsearch/index-settings/index-modules.md#index-max-terms-count) setting.
::::
To reduce network traffic, a terms lookup will fetch the document’s values from a shard on a local data node if possible. If the your terms data is not large, consider using an index with a single primary shard that’s fully replicated across all applicable data nodes to minimize network traffic.
To perform a terms lookup, use the following parameters.
#### Terms lookup parameters [query-dsl-terms-lookup-params]
`index`
: (Required, string) Name of the index from which to fetch field values.
`id`
: (Required, string) [ID](/reference/elasticsearch/mapping-reference/mapping-id-field.md) of the document from which to fetch field values.
`path`
: (Required, string) Name of the field from which to fetch field values. {{es}} uses these values as search terms for the query.
If the field values include an array of nested inner objects, you can access those objects using dot notation syntax.
`routing`
: (Optional, string) Custom [routing value](/reference/elasticsearch/mapping-reference/mapping-routing-field.md) of the document from which to fetch term values. If a custom routing value was provided when the document was indexed, this parameter is required.
#### Terms lookup example [query-dsl-terms-lookup-example]
To see how terms lookup works, try the following example.
1. Create an index with a `keyword` field named `color`.
```console
PUT my-index-000001
{
"mappings": {
"properties": {
"color": { "type": "keyword" }
}
}
}
```
2. Index a document with an ID of 1 and values of `["blue", "green"]` in the `color` field.
```console
PUT my-index-000001/_doc/1
{
"color": ["blue", "green"]
}
```
% TEST[continued]
3. Index another document with an ID of 2 and value of `blue` in the `color` field.
```console
PUT my-index-000001/_doc/2
{
"color": "blue"
}
```
% TEST[continued]
4. Use the `terms` query with terms lookup parameters to find documents containing one or more of the same terms as document 2. Include the `pretty` parameter so the response is more readable.
<!--
```console
POST my-index-000001/_refresh
```
% TEST[continued]
-->
```console
GET my-index-000001/_search?pretty
{
"query": {
"terms": {
"color" : {
"index" : "my-index-000001",
"id" : "2",
"path" : "color"
}
}
}
}
```
% TEST[continued]
Because document 2 and document 1 both contain `blue` as a value in the `color` field, {{es}} returns both documents.
```console-result
{
"took" : 17,
"timed_out" : false,
"_shards" : {
"total" : 1,
"successful" : 1,
"skipped" : 0,
"failed" : 0
},
"hits" : {
"total" : {
"value" : 2,
"relation" : "eq"
},
"max_score" : 1.0,
"hits" : [
{
"_index" : "my-index-000001",
"_id" : "1",
"_score" : 1.0,
"_source" : {
"color" : [
"blue",
"green"
]
}
},
{
"_index" : "my-index-000001",
"_id" : "2",
"_score" : 1.0,
"_source" : {
"color" : "blue"
}
}
]
}
}
```
% TESTRESPONSE[s/"took" : 17/"took" : $body.took/]
|
unknown
|
github
|
https://github.com/elastic/elasticsearch
|
docs/reference/query-languages/query-dsl/query-dsl-terms-query.md
|
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Simple not-in-heap bump-pointer traceRegion allocator.
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// traceRegionAlloc is a thread-safe region allocator.
// It holds a linked list of traceRegionAllocBlock.
type traceRegionAlloc struct {
lock mutex
dropping atomic.Bool // For checking invariants.
current atomic.UnsafePointer // *traceRegionAllocBlock
full *traceRegionAllocBlock
}
// traceRegionAllocBlock is a block in traceRegionAlloc.
//
// traceRegionAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceRegionAllocBlocks do
// not need write barriers.
type traceRegionAllocBlock struct {
_ sys.NotInHeap
traceRegionAllocBlockHeader
data [traceRegionAllocBlockData]byte
}
type traceRegionAllocBlockHeader struct {
next *traceRegionAllocBlock
off atomic.Uintptr
}
const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})
// alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
n = alignUp(n, 8)
if n > traceRegionAllocBlockData {
throw("traceRegion: alloc too large")
}
if a.dropping.Load() {
throw("traceRegion: alloc with concurrent drop")
}
// Try to bump-pointer allocate into the current block.
block := (*traceRegionAllocBlock)(a.current.Load())
if block != nil {
r := block.off.Add(n)
if r <= uintptr(len(block.data)) {
return (*notInHeap)(unsafe.Pointer(&block.data[r-n]))
}
}
// Try to install a new block.
var x *notInHeap
systemstack(func() {
// Acquire a.lock on the systemstack to avoid stack growth
// and accidentally entering the tracer again.
lock(&a.lock)
// Check block again under the lock. Someone may
// have gotten here first.
block = (*traceRegionAllocBlock)(a.current.Load())
if block != nil {
r := block.off.Add(n)
if r <= uintptr(len(block.data)) {
unlock(&a.lock)
x = (*notInHeap)(unsafe.Pointer(&block.data[r-n]))
return
}
// Add the existing block to the full list.
block.next = a.full
a.full = block
}
// Allocate a new block.
block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys, "trace arena alloc"))
if block == nil {
throw("traceRegion: out of memory")
}
// Allocate space for our current request, so we always make
// progress.
block.off.Store(n)
x = (*notInHeap)(unsafe.Pointer(&block.data[0]))
// Publish the new block.
a.current.Store(unsafe.Pointer(block))
unlock(&a.lock)
})
return x
}
// drop frees all previously allocated memory and resets the allocator.
//
// drop is not safe to call concurrently with other calls to drop or with calls to alloc. The caller
// must ensure that it is not possible for anything else to be using the same structure.
func (a *traceRegionAlloc) drop() {
a.dropping.Store(true)
for a.full != nil {
block := a.full
a.full = block.next
sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
}
if current := a.current.Load(); current != nil {
sysFree(current, unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
a.current.Store(nil)
}
a.dropping.Store(false)
}
|
go
|
github
|
https://github.com/golang/go
|
src/runtime/traceregion.go
|
"""Various tools used by MIME-reading or MIME-writing programs."""
import os
import rfc822
import tempfile
__all__ = ["Message","choose_boundary","encode","decode","copyliteral",
"copybinary"]
class Message(rfc822.Message):
"""A derived class of rfc822.Message that knows about MIME headers and
contains some hooks for decoding encoded and multipart messages."""
def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable)
self.encodingheader = \
self.getheader('content-transfer-encoding')
self.typeheader = \
self.getheader('content-type')
self.parsetype()
self.parseplist()
def parsetype(self):
str = self.typeheader
if str is None:
str = 'text/plain'
if ';' in str:
i = str.index(';')
self.plisttext = str[i:]
str = str[:i]
else:
self.plisttext = ''
fields = str.split('/')
for i in range(len(fields)):
fields[i] = fields[i].strip().lower()
self.type = '/'.join(fields)
self.maintype = fields[0]
self.subtype = '/'.join(fields[1:])
def parseplist(self):
str = self.plisttext
self.plist = []
while str[:1] == ';':
str = str[1:]
if ';' in str:
# XXX Should parse quotes!
end = str.index(';')
else:
end = len(str)
f = str[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + \
'=' + f[i+1:].strip()
self.plist.append(f.strip())
str = str[end:]
def getplist(self):
return self.plist
def getparam(self, name):
name = name.lower() + '='
n = len(name)
for p in self.plist:
if p[:n] == name:
return rfc822.unquote(p[n:])
return None
def getparamnames(self):
result = []
for p in self.plist:
i = p.find('=')
if i >= 0:
result.append(p[:i].lower())
return result
def getencoding(self):
if self.encodingheader is None:
return '7bit'
return self.encodingheader.lower()
def gettype(self):
return self.type
def getmaintype(self):
return self.maintype
def getsubtype(self):
return self.subtype
# Utility functions
# -----------------
try:
import thread
except ImportError:
import dummy_thread as thread
_counter_lock = thread.allocate_lock()
del thread
_counter = 0
def _get_next_counter():
global _counter
_counter_lock.acquire()
_counter += 1
result = _counter
_counter_lock.release()
return result
_prefix = None
def choose_boundary():
"""Return a string usable as a multipart boundary.
The string chosen is unique within a single program run, and
incorporates the user id (if available), process id (if available),
and current time. So it's very unlikely the returned string appears
in message text, but there's no guarantee.
The boundary contains dots so you have to quote it in the header."""
global _prefix
import time
if _prefix is None:
import socket
try:
hostid = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
hostid = '127.0.0.1'
try:
uid = repr(os.getuid())
except AttributeError:
uid = '1'
try:
pid = repr(os.getpid())
except AttributeError:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter())
# Subroutines for decoding some common content-transfer-types
def decode(input, output, encoding):
"""Decode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.decode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.decode(input, output)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.decode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in decodetab:
pipethrough(input, decodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
def encode(input, output, encoding):
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.encode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.encode(input, output, 0)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.encode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in encodetab:
pipethrough(input, encodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
# The following is no longer used for standard encodings
# XXX This requires that uudecode and mmencode are in $PATH
uudecode_pipe = '''(
TEMP=/tmp/@uu.$$
sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
cat $TEMP
rm $TEMP
)'''
decodetab = {
'uuencode': uudecode_pipe,
'x-uuencode': uudecode_pipe,
'uue': uudecode_pipe,
'x-uue': uudecode_pipe,
'quoted-printable': 'mmencode -u -q',
'base64': 'mmencode -u -b',
}
encodetab = {
'x-uuencode': 'uuencode tempfile',
'uuencode': 'uuencode tempfile',
'x-uue': 'uuencode tempfile',
'uue': 'uuencode tempfile',
'quoted-printable': 'mmencode -q',
'base64': 'mmencode -b',
}
def pipeto(input, command):
pipe = os.popen(command, 'w')
copyliteral(input, pipe)
pipe.close()
def pipethrough(input, command, output):
(fd, tempname) = tempfile.mkstemp()
temp = os.fdopen(fd, 'w')
copyliteral(input, temp)
temp.close()
pipe = os.popen(command + ' <' + tempname, 'r')
copybinary(pipe, output)
pipe.close()
os.unlink(tempname)
def copyliteral(input, output):
while 1:
line = input.readline()
if not line: break
output.write(line)
def copybinary(input, output):
BUFSIZE = 8192
while 1:
line = input.read(BUFSIZE)
if not line: break
output.write(line)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Django settings for test_mode project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hafds2)rn#41%bl9u9$=t0bg3q5gm+hk^zq&y=-0mb=t8#h)jl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'test_model',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_mode.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_mode.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import socket
import struct
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes
from ansible.parsing.utils.jsonify import jsonify
from ansible.plugins.connection import ConnectionBase
from ansible.utils.encrypt import key_for_hostname, keyczar_encrypt, keyczar_decrypt
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (8 bytes)
# ((1400-8)/4)*3) = 1044
# which leaves room for the TCP/IP header. We set this to a
# multiple of the value to speed up file reads.
CHUNK_SIZE=1044*20
class Connection(ConnectionBase):
''' raw socket accelerated connection '''
transport = 'accelerate'
has_pipelining = False
become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.conn = None
self.key = key_for_hostname(self._play_context.remote_addr)
def _connect(self):
''' activates the connection object '''
if not self._connected:
wrong_user = False
tries = 3
self.conn = socket.socket()
self.conn.settimeout(C.ACCELERATE_CONNECT_TIMEOUT)
display.vvvv("attempting connection to %s via the accelerated port %d" % (self._play_context.remote_addr, self._play_context.accelerate_port), host=self._play_context.remote_addr)
while tries > 0:
try:
self.conn.connect((self._play_context.remote_addr,self._play_context.accelerate_port))
break
except socket.error:
display.vvvv("connection to %s failed, retrying..." % self._play_context.remote_addr, host=self._play_context.remote_addr)
time.sleep(0.1)
tries -= 1
if tries == 0:
display.vvv("Could not connect via the accelerated connection, exceeded # of tries", host=self._play_context.remote_addr)
raise AnsibleConnectionFailure("Failed to connect to %s on the accelerated port %s" % (self._play_context.remote_addr, self._play_context.accelerate_port))
elif wrong_user:
display.vvv("Restarting daemon with a different remote_user", host=self._play_context.remote_addr)
raise AnsibleError("The accelerated daemon was started on the remote with a different user")
self.conn.settimeout(C.ACCELERATE_TIMEOUT)
if not self.validate_user():
# the accelerated daemon was started with a
# different remote_user. The above command
# should have caused the accelerate daemon to
# shutdown, so we'll reconnect.
wrong_user = True
self._connected = True
return self
def send_data(self, data):
packed_len = struct.pack('!Q',len(data))
return self.conn.sendall(packed_len + data)
def recv_data(self):
header_len = 8 # size of a packed unsigned long long
data = b""
try:
display.vvvv("in recv_data(), waiting for the header", host=self._play_context.remote_addr)
while len(data) < header_len:
d = self.conn.recv(header_len - len(data))
if not d:
display.vvvv("received nothing, bailing out", host=self._play_context.remote_addr)
return None
data += d
display.vvvv("got the header, unpacking", host=self._play_context.remote_addr)
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
display.vvvv("data received so far (expecting %d): %d" % (data_len, len(data)), host=self._play_context.remote_addr)
while len(data) < data_len:
d = self.conn.recv(data_len - len(data))
if not d:
display.vvvv("received nothing, bailing out", host=self._play_context.remote_addr)
return None
display.vvvv("received %d bytes" % (len(d)), host=self._play_context.remote_addr)
data += d
display.vvvv("received all of the data, returning", host=self._play_context.remote_addr)
return data
except socket.timeout:
raise AnsibleError("timed out while waiting to receive data")
def validate_user(self):
'''
Checks the remote uid of the accelerated daemon vs. the
one specified for this play and will cause the accel
daemon to exit if they don't match
'''
display.vvvv("sending request for validate_user", host=self._play_context.remote_addr)
data = dict(
mode='validate_user',
username=self._play_context.remote_user,
)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self._play_context.remote_addr)
display.vvvv("waiting for validate_user response", host=self._play_context.remote_addr)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if "pong" in response:
# it's a keepalive, go back to waiting
display.vvvv("received a keepalive packet", host=self._play_context.remote_addr)
continue
else:
display.vvvv("received the validate_user response: %s" % (response), host=self._play_context.remote_addr)
break
if response.get('failed'):
return False
else:
return response.get('rc') == 0
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
display.vvv("EXEC COMMAND %s" % cmd, host=self._play_context.remote_addr)
data = dict(
mode='command',
cmd=cmd,
executable=C.DEFAULT_EXECUTABLE,
)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self._play_context.remote_addr)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if "pong" in response:
# it's a keepalive, go back to waiting
display.vvvv("received a keepalive packet", host=self._play_context.remote_addr)
continue
else:
display.vvvv("received the response", host=self._play_context.remote_addr)
break
return (response.get('rc', None), response.get('stdout', ''), response.get('stderr', ''))
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
fd = file(in_path, 'rb')
fstat = os.stat(in_path)
try:
display.vvv("PUT file is %d bytes" % fstat.st_size, host=self._play_context.remote_addr)
last = False
while fd.tell() <= fstat.st_size and not last:
display.vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size), host=self._play_context.remote_addr)
data = fd.read(CHUNK_SIZE)
if fd.tell() >= fstat.st_size:
last = True
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
if self._play_context.become:
data['user'] = self._play_context.become_user
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send the file to %s" % self._play_context.remote_addr)
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
finally:
fd.close()
display.vvvv("waiting for final response after PUT", host=self._play_context.remote_addr)
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
data = dict(mode='fetch', in_path=in_path)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to initiate the file fetch with %s" % self._play_context.remote_addr)
fh = open(to_bytes(out_path, errors='surrogate_or_strict'), "w")
try:
bytes = 0
while True:
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed', False):
raise AnsibleError("Error during file fetch, aborting")
out = base64.b64decode(response['data'])
fh.write(out)
bytes += len(out)
# send an empty response back to signify we
# received the last chunk without errors
data = jsonify(dict())
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send ack during file fetch")
if response.get('last', False):
break
finally:
# we don't currently care about this final response,
# we just receive it and drop it. It may be used at some
# point in the future or we may just have the put/fetch
# operations not send back a final response at all
response = self.recv_data()
display.vvv("FETCH wrote %d bytes to %s" % (bytes, out_path), host=self._play_context.remote_addr)
fh.close()
def close(self):
''' terminate the connection '''
# Be a good citizen
try:
self.conn.close()
except:
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Volatility
#
# Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.timefmt as timefmt
import volatility.obj as obj
import volatility.utils as utils
import volatility.commands as commands
#pylint: disable-msg=C0111
class DateTime(commands.Command):
"""A simple example plugin that gets the date/time information from a Windows image"""
def calculate(self):
"""Calculate and carry out any processing that may take time upon the image"""
# Load the address space
addr_space = utils.load_as(self._config)
# Call a subfunction so that it can be used by other plugins
return self.get_image_time(addr_space)
def get_image_time(self, addr_space):
"""Extracts the time and date from the KUSER_SHARED_DATA area"""
# Get the Image Datetime
result = {}
# Create a VOLATILITY_MAGIC object to look up the location of certain constants
# Get the KUSER_SHARED_DATA location
KUSER_SHARED_DATA = obj.VolMagic(addr_space).KUSER_SHARED_DATA.v()
# Create the _KUSER_SHARED_DATA object at the appropriate offset
k = obj.Object("_KUSER_SHARED_DATA",
offset = KUSER_SHARED_DATA,
vm = addr_space)
# Start reading members from it
result['ImageDatetime'] = k.SystemTime
result['ImageTz'] = timefmt.OffsetTzInfo(-k.TimeZoneBias.as_windows_timestamp() / 10000000)
# Return any results we got
return result
def render_text(self, outfd, data):
"""Renders the calculated data as text to outfd"""
# Convert the result into a datetime object for display in local and non local format
dt = data['ImageDatetime'].as_datetime()
# Display the datetime in UTC as taken from the image
outfd.write("Image date and time : {0}\n".format(data['ImageDatetime']))
# Display the datetime taking into account the timezone of the image itself
outfd.write("Image local date and time : {0}\n".format(timefmt.display_datetime(dt, data['ImageTz'])))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import pyfits
import numpy as np
import matplotlib.pyplot as pl
import fsps
# Measurements of cluster parameters.
tage = 10. ** (8.04 - 9)
logmass = 4.09
dist_mod = 24.5
# Set up the stellar population model.
sp = fsps.StellarPopulation(imf_type=2, dust_type=1, mwr=3.1, dust2=0.3)
# The measured magnitudes from the literature.
data = {"wfc3_f160w": 16.386,
"wfc3_f275w": 17.398,
"wfc_acs_f814w": 17.155}
# There are also a few filters that we have data for but aren't included in
# the standard FSPS install:
# "F110W": 16.877,
# "F336W": 17.349,
# "F475W": 17.762,
# Load the observed spectrum.
f = pyfits.open("DAO69.fits")
obs_spec = np.array(f[0].data, dtype=float)
f.close()
obs_spec /= 5e-20
# The observed wavelength grid in the data is magically this:
obs_lambda = np.arange(0, 4540) * 1.2 + 3700
# Compute the model magnitudes.
for b, v in data.iteritems():
print(b, v, sp.get_mags(zmet=20, tage=tage, band=b) - 2.5 * logmass
+ dist_mod)
# Compute the model spectrum in ``L_sun / A``.
lam, spec = sp.get_spectrum(zmet=20, tage=tage, peraa=True)
spec *= 3.839e33 * 10. ** (logmass - dist_mod / 2.5)
f = 1.0 # obs_spec[0, obs_lambda < 5000.][-1] / spec[lam < 5000.][-1]
print(obs_spec[0, obs_lambda < 5000.][-1] / spec[lam < 5000.][-1])
pl.loglog(obs_lambda, obs_spec[0], "k")
pl.loglog(lam, spec * f, "r")
pl.xlim(3700, 7000)
# pl.ylim(10 ** 3, 10 ** 4.5)
pl.savefig("spectrum.png")
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script to generate libyal m4 files."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import sys
from yaldevtools import configuration
from yaldevtools import output_writers
from yaldevtools.source_generators import m4 as m4_source_generator
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Generates a m4 file for a libyal library.'))
argument_parser.add_argument(
'configuration_file', action='store', metavar='CONFIGURATION_FILE',
default='source.conf', help='The source generation configuration file.')
argument_parser.add_argument(
'-o', '--output', dest='output_directory', action='store',
metavar='OUTPUT_DIRECTORY', default=None,
help='path of the output files to write to.')
argument_parser.add_argument(
'-p', '--projects', dest='projects_directory', action='store',
metavar='PROJECTS_DIRECTORY', default=None,
help='path of the projects.')
options = argument_parser.parse_args()
if not options.configuration_file:
print('Config file missing.')
print('')
argument_parser.print_help()
print('')
return False
if not os.path.exists(options.configuration_file):
print('No such configuration file: {0:s}.'.format(
options.configuration_file))
print('')
return False
if options.output_directory and not os.path.exists(options.output_directory):
print('No such output directory: {0:s}.'.format(options.output_directory))
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
project_configuration = configuration.ProjectConfiguration()
project_configuration.ReadFromFile(options.configuration_file)
libyal_directory = os.path.abspath(__file__)
libyal_directory = os.path.dirname(libyal_directory)
libyal_directory = os.path.dirname(libyal_directory)
projects_directory = options.projects_directory
if not projects_directory:
projects_directory = os.path.dirname(libyal_directory)
# TODO: generate m4 file
return False
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for Copyright Scanner utilities."""
import os
import re
import sys
import unittest
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.extend([
os.path.normpath(os.path.join(test_dir, '..', '..', 'tools')),
os.path.join(test_dir),
])
import find_depot_tools
from testing_support.super_mox import SuperMoxTestBase
import copyright_scanner
class FindCopyrightsTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
self.input_api = self.mox.CreateMockAnything()
self.input_api.re = re
self.input_api.os_path = os.path
self.input_api.os_walk = os.walk
def ShouldMatchReferenceOutput(self, test_data, expected_output):
for data in test_data:
self.input_api.ReadFile = lambda _1, _2: data
actual_output = copyright_scanner.FindCopyrights(self.input_api, '', [''])
self.assertEqual(
expected_output,
actual_output,
'Input """\n%s""", expected output: "%s", actual: "%s"' % \
(data, expected_output, actual_output));
def testCopyrightedFiles(self):
test_data = [
'// (c) 2014 Google Inc.\n//\n// (a) One\n//\n// (b) Two\n//\n',
'Copyright 2014 Google Inc.\n',
'Copr. 2014 Google Inc.',
'\xc2\xa9 2014 Google Inc.',
'Copyright 2014 Google Inc.'
]
self.ShouldMatchReferenceOutput(test_data, [['2014 Google Inc.']])
def testGeneratedFiles(self):
test_data = [
'ALL CHANGES MADE IN THIS FILE WILL BE LOST\nCopyright 2014 Google\n',
'GENERATED FILE. DO NOT EDIT\nCopyright 2014 Google\n',
'GENERATED. DO NOT DELETE THIS FILE.\nCopyright 2014 Google\n',
'DO NOT EDIT\nCopyright 2014 Google\n',
'DO NOT DELETE THIS FILE\nCopyright 2014 Google\n',
'All changes made in this file will be lost\nCopyright 2014 Google\n',
'Automatically generated file\nCopyright 2014 Google\n',
'Synthetically generated dummy file\nCopyright 2014 Google\n',
'Generated data (by gnugnu)\nCopyright 2014 Google\n'
]
self.ShouldMatchReferenceOutput(test_data, [['GENERATED FILE']])
def testNonCopyrightedFiles(self):
test_data = [
'std::cout << "Copyright 2014 Google"\n',
'// Several points can be made:\n//\n// (a) One\n//\n// (b) Two\n'
'//\n// (c) Three\n//\n',
'See \'foo\' for copyright information.\n',
'See \'foo\' for the copyright notice.\n',
'See \'foo\' for the copyright and other things.\n'
]
self.ShouldMatchReferenceOutput(test_data, [['*No copyright*']])
def testNonGeneratedFiles(self):
test_data = [
'This file was prohibited from being generated.\n',
'Please do not delete our files! They are valuable to us.\n',
'Manually generated from dice rolls.\n',
'"""This Python script produces generated data\n"""\n',
'\'\'\'This Python script produces generated data\n\'\'\'\n'
]
self.ShouldMatchReferenceOutput(test_data, [['*No copyright*']])
class FindFilesTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
self.input_api = self.mox.CreateMockAnything()
self.input_api.re = re
self.input_api.os_path = os.path
def testFilesAsStartPaths(self):
join = self.input_api.os_path.join
self.input_api.os_path.isfile = lambda _: True
input_files = [
'a',
'a.cc',
'a.txt',
join('foo', 'a'),
join('foo', 'a.cc'),
join('foo', 'a.txt'),
join('third_party', 'a'),
join('third_party', 'a.cc'),
join('third_party', 'a.txt'),
join('foo', 'third_party', 'a'),
join('foo', 'third_party', 'a.cc'),
join('foo', 'third_party', 'a.txt'),
]
root_dir = os.path.sep + 'src'
actual = copyright_scanner.FindFiles(
self.input_api, root_dir, input_files, [''])
self.assertEqual(['a.cc', join('foo', 'a.cc')], actual)
actual = copyright_scanner.FindFiles(
self.input_api, root_dir, input_files, ['third_party'])
self.assertEqual(['a.cc', join('foo', 'a.cc')], actual)
actual = copyright_scanner.FindFiles(
self.input_api, root_dir, input_files, ['foo'])
self.assertEqual(['a.cc'], actual)
actual = copyright_scanner.FindFiles(
self.input_api, root_dir, input_files, ['foo', 'third_party'])
self.assertEqual(['a.cc'], actual)
actual = copyright_scanner.FindFiles(
self.input_api, root_dir, input_files, [join('foo', 'third_party')])
self.assertEqual(['a.cc', join('foo', 'a.cc')], actual)
def testDirAsStartPath(self):
self.input_api.os_path.isfile = lambda _: False
join = self.input_api.os_path.join
normpath = self.input_api.os_path.normpath
root_dir = os.path.sep + 'src'
scan_from = '.'
base_path = join(root_dir, scan_from)
def mock_os_walk(path):
return lambda _: [(join(base_path, path), [''], ['a', 'a.cc', 'a.txt'])]
self.input_api.os_walk = mock_os_walk('')
actual = map(normpath, copyright_scanner.FindFiles(
self.input_api, root_dir, [scan_from], ['']))
self.assertEqual(['a.cc'], actual)
self.input_api.os_walk = mock_os_walk('third_party')
actual = map(normpath, copyright_scanner.FindFiles(
self.input_api, root_dir, [scan_from], ['']))
self.assertEqual([], actual)
self.input_api.os_walk = mock_os_walk('foo')
actual = map(normpath, copyright_scanner.FindFiles(
self.input_api, root_dir, [scan_from], ['']))
self.assertEqual([join('foo', 'a.cc')], actual)
self.input_api.os_walk = mock_os_walk('foo')
actual = map(normpath, copyright_scanner.FindFiles(
self.input_api, root_dir, [scan_from], ['foo']))
self.assertEqual([], actual)
self.input_api.os_walk = mock_os_walk(join('foo', 'bar'))
actual = map(normpath, copyright_scanner.FindFiles(
self.input_api, root_dir, [scan_from], ['foo']))
self.assertEqual([], actual)
self.input_api.os_walk = mock_os_walk(join('foo', 'third_party'))
actual = map(normpath, copyright_scanner.FindFiles(
self.input_api, root_dir, [scan_from], ['']))
self.assertEqual([], actual)
self.input_api.os_walk = mock_os_walk(join('foo', 'third_party'))
actual = map(normpath, copyright_scanner.FindFiles(
self.input_api, root_dir, [scan_from], [join('foo', 'third_party')]))
self.assertEqual([], actual)
class AnalyzeScanResultsTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
self.input_api = self.mox.CreateMockAnything()
self.input_api.os_path = os.path
def testAnalyzeScanResults(self):
# Tests whitelisted vs. current files state logic.
#
# Whitelisted - in whitelist, and contains 3rd party code => OK
# Missing - in whitelist, but doesn't exist
# Stale - in whitelist, but is clean
# Unknown - not in whitelist, but contains 3rd party code
self.input_api.os_path.isfile = lambda x: x != 'Missing'
self.assertEqual(
(['Unknown'], ['Missing'], ['Stale']),
copyright_scanner.AnalyzeScanResults(self.input_api, \
['Whitelisted', 'Missing', 'Stale'], ['Whitelisted', 'Unknown']))
class ScanAtPresubmitTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
self.input_api = self.mox.CreateMockAnything()
self.input_api.re = re
self.input_api.os_path = os.path
self.output_api = self.mox.CreateMockAnything()
def tearDown(self):
self.mox.UnsetStubs()
SuperMoxTestBase.tearDown(self)
class AffectedFileMock(object):
def __init__(self, local_path, action):
self._local_path = local_path
self._action = action
def LocalPath(self):
return self._local_path
def Action(self):
return self._action
def CreateAffectedFilesFunc(self, paths_and_actions):
result = []
for i in range(0, len(paths_and_actions), 2):
result.append(ScanAtPresubmitTest.AffectedFileMock(
paths_and_actions[i], paths_and_actions[i + 1]))
return lambda: result
def CreateDoScanAtPresubmitFunc(self):
self._whitelisted_files = None
self._files_to_check = None
def ScanAtPresubmitStub(_, whitelisted, to_check):
self._whitelisted_files = whitelisted
self._files_to_check = to_check
return ([], [], [])
return ScanAtPresubmitStub
def GetWhitelistedFiles(self):
return sorted(self._whitelisted_files)
def GetFilesToCheck(self):
return sorted(self._files_to_check)
def testWhitelistedUntouched(self):
# When a change doesn't touch the whitelist file, any updated files
# (except deleted) must be checked. The whitelist used for analysis
# must be trimmed to the changed files subset.
#
# A_NW.cc - added, not whitelisted => check
# A_W.cc - added, whitelisted => check, remain on the trimmed whitelist
# D_NW.cc - deleted, not whitelisted => ignore
# D_W.cc - deleted and whitelisted => remain on w/l
# M_NW.cc - modified, not whitelisted => check
# M_W.cc - modified and whitelisted => check, remain on w/l
# NM_W.cc - not modified, whitelisted => trim from w/l
# W - the whitelist file
self.input_api.AffectedFiles = self.CreateAffectedFilesFunc(
['A_NW.cc', 'A', 'A_W.cc', 'A', 'D_NW.cc', 'D', 'D_W.cc', 'D',
'M_NW.cc', 'M', 'M_W.cc', 'M'])
self.mox.StubOutWithMock(copyright_scanner, '_GetWhitelistFileName')
copyright_scanner._GetWhitelistFileName = lambda _: 'W'
self.mox.StubOutWithMock(copyright_scanner, 'LoadWhitelistedFilesList')
copyright_scanner.LoadWhitelistedFilesList = \
lambda _: ['A_W.cc', 'D_W.cc', 'M_W.cc', 'NM_W.cc']
self.mox.StubOutWithMock(copyright_scanner, '_DoScanAtPresubmit')
copyright_scanner._DoScanAtPresubmit = self.CreateDoScanAtPresubmitFunc()
self.mox.ReplayAll()
copyright_scanner.ScanAtPresubmit(self.input_api, self.output_api)
self.assertEqual(
['A_W.cc', 'D_W.cc', 'M_W.cc'], self.GetWhitelistedFiles())
self.assertEqual(
['A_NW.cc', 'A_W.cc', 'M_NW.cc', 'M_W.cc'], self.GetFilesToCheck())
def testWhitelistTouched(self):
# When the whitelist file is touched by the change, all the files listed in
# it, including deleted entries, must be re-checked. All modified files
# (including the deleted ones) must be checked as well. The current contents
# of the whitelist are used for analysis.
# Whitelist addition or deletion are not considered.
#
# All the files from names testWhitelistedUntouched are re-used, but now
# action for all of them is 'check' (except for the w/l file itself).
# A_DW.cc - added, deleted from w/l => check
# D_DW.cc - deleted from repo and w/l => check
# M_DW.cc - modified, deleted from w/l => check
self.input_api.AffectedFiles = self.CreateAffectedFilesFunc(
['A_DW.cc', 'A', 'A_NW.cc', 'A', 'A_W.cc', 'A',
'D_DW.cc', 'D', 'D_NW.cc', 'D', 'D_W.cc', 'D',
'M_DW.cc', 'M', 'M_NW.cc', 'M', 'M_W.cc', 'M', 'W', 'M'])
self.mox.StubOutWithMock(copyright_scanner, '_GetWhitelistFileName')
copyright_scanner._GetWhitelistFileName = lambda _: 'W'
self.mox.StubOutWithMock(copyright_scanner, '_GetDeletedContents')
def GetDeletedContentsStub(affected_file):
self.assertEqual('W', affected_file.LocalPath())
return ['A_DW.cc', 'D_DW.cc', 'M_DW.cc']
copyright_scanner._GetDeletedContents = GetDeletedContentsStub
self.mox.StubOutWithMock(copyright_scanner, 'LoadWhitelistedFilesList')
copyright_scanner.LoadWhitelistedFilesList = \
lambda _: ['A_W.cc', 'D_W.cc', 'M_W.cc', 'NM_W.cc']
self.mox.StubOutWithMock(copyright_scanner, '_DoScanAtPresubmit')
copyright_scanner._DoScanAtPresubmit = self.CreateDoScanAtPresubmitFunc()
self.mox.ReplayAll()
copyright_scanner.ScanAtPresubmit(self.input_api, self.output_api)
self.assertEqual(
['A_W.cc', 'D_W.cc', 'M_W.cc', 'NM_W.cc'], self.GetWhitelistedFiles())
self.assertEqual(
['A_DW.cc', 'A_NW.cc', 'A_W.cc', 'D_DW.cc', 'D_NW.cc', 'D_W.cc',
'M_DW.cc', 'M_NW.cc', 'M_W.cc', 'NM_W.cc' ], self.GetFilesToCheck())
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.r
~~~~~~~~~~~~~~~~~
Lexers for the R/S languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, words, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
line_re = re.compile('.*?\n')
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
.. versionadded:: 0.10
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
builtins_base = (
'Arg', 'Conj', 'Cstack_info', 'Encoding', 'FALSE',
'Filter', 'Find', 'I', 'ISOdate', 'ISOdatetime', 'Im', 'Inf',
'La.svd', 'Map', 'Math.Date', 'Math.POSIXt', 'Math.data.frame',
'Math.difftime', 'Math.factor', 'Mod', 'NA_character_',
'NA_complex_', 'NA_real_', 'NCOL', 'NROW', 'NULLNA_integer_', 'NaN',
'Negate', 'NextMethod', 'Ops.Date', 'Ops.POSIXt', 'Ops.data.frame',
'Ops.difftime', 'Ops.factor', 'Ops.numeric_version', 'Ops.ordered',
'Position', 'R.Version', 'R.home', 'R.version', 'R.version.string',
'RNGkind', 'RNGversion', 'R_system_version', 'Re', 'Recall',
'Reduce', 'Summary.Date', 'Summary.POSIXct', 'Summary.POSIXlt',
'Summary.data.frame', 'Summary.difftime', 'Summary.factor',
'Summary.numeric_version', 'Summary.ordered', 'Sys.Date',
'Sys.chmod', 'Sys.getenv', 'Sys.getlocale', 'Sys.getpid',
'Sys.glob', 'Sys.info', 'Sys.localeconv', 'Sys.readlink',
'Sys.setFileTime', 'Sys.setenv', 'Sys.setlocale', 'Sys.sleep',
'Sys.time', 'Sys.timezone', 'Sys.umask', 'Sys.unsetenv',
'Sys.which', 'TRUE', 'UseMethod', 'Vectorize', 'abbreviate', 'abs',
'acos', 'acosh', 'addNA', 'addTaskCallback', 'agrep', 'alist',
'all', 'all.equal', 'all.equal.POSIXct', 'all.equal.character',
'all.equal.default', 'all.equal.factor', 'all.equal.formula',
'all.equal.language', 'all.equal.list', 'all.equal.numeric',
'all.equal.raw', 'all.names', 'all.vars', 'any', 'anyDuplicated',
'anyDuplicated.array', 'anyDuplicated.data.frame',
'anyDuplicated.default', 'anyDuplicated.matrix', 'aperm',
'aperm.default', 'aperm.table', 'append', 'apply', 'args',
'arrayInd', 'as.Date', 'as.Date.POSIXct', 'as.Date.POSIXlt',
'as.Date.character', 'as.Date.date', 'as.Date.dates',
'as.Date.default', 'as.Date.factor', 'as.Date.numeric',
'as.POSIXct', 'as.POSIXct.Date', 'as.POSIXct.POSIXlt',
'as.POSIXct.date', 'as.POSIXct.dates', 'as.POSIXct.default',
'as.POSIXct.numeric', 'as.POSIXlt', 'as.POSIXlt.Date',
'as.POSIXlt.POSIXct', 'as.POSIXlt.character', 'as.POSIXlt.date',
'as.POSIXlt.dates', 'as.POSIXlt.default', 'as.POSIXlt.factor',
'as.POSIXlt.numeric', 'as.array', 'as.array.default', 'as.call',
'as.character', 'as.character.Date', 'as.character.POSIXt',
'as.character.condition', 'as.character.default',
'as.character.error', 'as.character.factor', 'as.character.hexmode',
'as.character.numeric_version', 'as.character.octmode',
'as.character.srcref', 'as.complex', 'as.data.frame',
'as.data.frame.AsIs', 'as.data.frame.Date', 'as.data.frame.POSIXct',
'as.data.frame.POSIXlt', 'as.data.frame.array',
'as.data.frame.character', 'as.data.frame.complex',
'as.data.frame.data.frame', 'as.data.frame.default',
'as.data.frame.difftime', 'as.data.frame.factor',
'as.data.frame.integer', 'as.data.frame.list',
'as.data.frame.logical', 'as.data.frame.matrix',
'as.data.frame.model.matrix', 'as.data.frame.numeric',
'as.data.frame.numeric_version', 'as.data.frame.ordered',
'as.data.frame.raw', 'as.data.frame.table', 'as.data.frame.ts',
'as.data.frame.vector', 'as.difftime', 'as.double',
'as.double.POSIXlt', 'as.double.difftime', 'as.environment',
'as.expression', 'as.expression.default', 'as.factor',
'as.function', 'as.function.default', 'as.hexmode', 'as.integer',
'as.list', 'as.list.Date', 'as.list.POSIXct', 'as.list.data.frame',
'as.list.default', 'as.list.environment', 'as.list.factor',
'as.list.function', 'as.list.numeric_version', 'as.logical',
'as.logical.factor', 'as.matrix', 'as.matrix.POSIXlt',
'as.matrix.data.frame', 'as.matrix.default', 'as.matrix.noquote',
'as.name', 'as.null', 'as.null.default', 'as.numeric',
'as.numeric_version', 'as.octmode', 'as.ordered',
'as.package_version', 'as.pairlist', 'as.qr', 'as.raw', 'as.single',
'as.single.default', 'as.symbol', 'as.table', 'as.table.default',
'as.vector', 'as.vector.factor', 'asNamespace', 'asS3', 'asS4',
'asin', 'asinh', 'assign', 'atan', 'atan2', 'atanh',
'attachNamespace', 'attr', 'attr.all.equal', 'attributes',
'autoload', 'autoloader', 'backsolve', 'baseenv', 'basename',
'besselI', 'besselJ', 'besselK', 'besselY', 'beta',
'bindingIsActive', 'bindingIsLocked', 'bindtextdomain', 'bitwAnd',
'bitwNot', 'bitwOr', 'bitwShiftL', 'bitwShiftR', 'bitwXor', 'body',
'bquote', 'browser', 'browserCondition', 'browserSetDebug',
'browserText', 'builtins', 'by', 'by.data.frame', 'by.default',
'bzfile', 'c.Date', 'c.POSIXct', 'c.POSIXlt', 'c.noquote',
'c.numeric_version', 'call', 'callCC', 'capabilities', 'casefold',
'cat', 'category', 'cbind', 'cbind.data.frame', 'ceiling',
'char.expand', 'charToRaw', 'charmatch', 'chartr', 'check_tzones',
'chol', 'chol.default', 'chol2inv', 'choose', 'class',
'clearPushBack', 'close', 'close.connection', 'close.srcfile',
'close.srcfilealias', 'closeAllConnections', 'col', 'colMeans',
'colSums', 'colnames', 'commandArgs', 'comment', 'computeRestarts',
'conditionCall', 'conditionCall.condition', 'conditionMessage',
'conditionMessage.condition', 'conflicts', 'contributors', 'cos',
'cosh', 'crossprod', 'cummax', 'cummin', 'cumprod', 'cumsum', 'cut',
'cut.Date', 'cut.POSIXt', 'cut.default', 'dQuote', 'data.class',
'data.matrix', 'date', 'debug', 'debugonce',
'default.stringsAsFactors', 'delayedAssign', 'deparse', 'det',
'determinant', 'determinant.matrix', 'dget', 'diag', 'diff',
'diff.Date', 'diff.POSIXt', 'diff.default', 'difftime', 'digamma',
'dim', 'dim.data.frame', 'dimnames', 'dimnames.data.frame', 'dir',
'dir.create', 'dirname', 'do.call', 'dput', 'drop', 'droplevels',
'droplevels.data.frame', 'droplevels.factor', 'dump', 'duplicated',
'duplicated.POSIXlt', 'duplicated.array', 'duplicated.data.frame',
'duplicated.default', 'duplicated.matrix',
'duplicated.numeric_version', 'dyn.load', 'dyn.unload', 'eapply',
'eigen', 'else', 'emptyenv', 'enc2native', 'enc2utf8',
'encodeString', 'enquote', 'env.profile', 'environment',
'environmentIsLocked', 'environmentName', 'eval', 'eval.parent',
'evalq', 'exists', 'exp', 'expand.grid', 'expm1', 'expression',
'factor', 'factorial', 'fifo', 'file', 'file.access', 'file.append',
'file.choose', 'file.copy', 'file.create', 'file.exists',
'file.info', 'file.link', 'file.path', 'file.remove', 'file.rename',
'file.show', 'file.symlink', 'find.package', 'findInterval',
'findPackageEnv', 'findRestart', 'floor', 'flush',
'flush.connection', 'force', 'formals', 'format',
'format.AsIs', 'format.Date', 'format.POSIXct', 'format.POSIXlt',
'format.data.frame', 'format.default', 'format.difftime',
'format.factor', 'format.hexmode', 'format.info',
'format.libraryIQR', 'format.numeric_version', 'format.octmode',
'format.packageInfo', 'format.pval', 'format.summaryDefault',
'formatC', 'formatDL', 'forwardsolve', 'gamma', 'gc', 'gc.time',
'gcinfo', 'gctorture', 'gctorture2', 'get', 'getAllConnections',
'getCallingDLL', 'getCallingDLLe', 'getConnection',
'getDLLRegisteredRoutines', 'getDLLRegisteredRoutines.DLLInfo',
'getDLLRegisteredRoutines.character', 'getElement',
'getExportedValue', 'getHook', 'getLoadedDLLs', 'getNamespace',
'getNamespaceExports', 'getNamespaceImports', 'getNamespaceInfo',
'getNamespaceName', 'getNamespaceUsers', 'getNamespaceVersion',
'getNativeSymbolInfo', 'getOption', 'getRversion', 'getSrcLines',
'getTaskCallbackNames', 'geterrmessage', 'gettext', 'gettextf',
'getwd', 'gl', 'globalenv', 'gregexpr', 'grep', 'grepRaw', 'grepl',
'gsub', 'gzcon', 'gzfile', 'head', 'iconv', 'iconvlist',
'icuSetCollate', 'identical', 'identity', 'ifelse', 'importIntoEnv',
'in', 'inherits', 'intToBits', 'intToUtf8', 'interaction', 'interactive',
'intersect', 'inverse.rle', 'invisible', 'invokeRestart',
'invokeRestartInteractively', 'is.R', 'is.array', 'is.atomic',
'is.call', 'is.character', 'is.complex', 'is.data.frame',
'is.double', 'is.element', 'is.environment', 'is.expression',
'is.factor', 'is.finite', 'is.function', 'is.infinite',
'is.integer', 'is.language', 'is.list', 'is.loaded', 'is.logical',
'is.matrix', 'is.na', 'is.na.POSIXlt', 'is.na.data.frame',
'is.na.numeric_version', 'is.name', 'is.nan', 'is.null',
'is.numeric', 'is.numeric.Date', 'is.numeric.POSIXt',
'is.numeric.difftime', 'is.numeric_version', 'is.object',
'is.ordered', 'is.package_version', 'is.pairlist', 'is.primitive',
'is.qr', 'is.raw', 'is.recursive', 'is.single', 'is.symbol',
'is.table', 'is.unsorted', 'is.vector', 'isBaseNamespace',
'isIncomplete', 'isNamespace', 'isOpen', 'isRestart', 'isS4',
'isSeekable', 'isSymmetric', 'isSymmetric.matrix', 'isTRUE',
'isatty', 'isdebugged', 'jitter', 'julian', 'julian.Date',
'julian.POSIXt', 'kappa', 'kappa.default', 'kappa.lm', 'kappa.qr',
'kronecker', 'l10n_info', 'labels', 'labels.default', 'lapply',
'lazyLoad', 'lazyLoadDBexec', 'lazyLoadDBfetch', 'lbeta', 'lchoose',
'length', 'length.POSIXlt', 'letters', 'levels', 'levels.default',
'lfactorial', 'lgamma', 'library.dynam', 'library.dynam.unload',
'licence', 'license', 'list.dirs', 'list.files', 'list2env', 'load',
'loadNamespace', 'loadedNamespaces', 'loadingNamespaceInfo',
'local', 'lockBinding', 'lockEnvironment', 'log', 'log10', 'log1p',
'log2', 'logb', 'lower.tri', 'ls', 'make.names', 'make.unique',
'makeActiveBinding', 'mapply', 'margin.table', 'mat.or.vec',
'match', 'match.arg', 'match.call', 'match.fun', 'max', 'max.col',
'mean', 'mean.Date', 'mean.POSIXct', 'mean.POSIXlt', 'mean.default',
'mean.difftime', 'mem.limits', 'memCompress', 'memDecompress',
'memory.profile', 'merge', 'merge.data.frame', 'merge.default',
'message', 'mget', 'min', 'missing', 'mode', 'month.abb',
'month.name', 'months', 'months.Date', 'months.POSIXt',
'months.abb', 'months.nameletters', 'names', 'names.POSIXlt',
'namespaceExport', 'namespaceImport', 'namespaceImportClasses',
'namespaceImportFrom', 'namespaceImportMethods', 'nargs', 'nchar',
'ncol', 'new.env', 'ngettext', 'nlevels', 'noquote', 'norm',
'normalizePath', 'nrow', 'numeric_version', 'nzchar', 'objects',
'oldClass', 'on.exit', 'open', 'open.connection', 'open.srcfile',
'open.srcfilealias', 'open.srcfilecopy', 'options', 'order',
'ordered', 'outer', 'packBits', 'packageEvent',
'packageHasNamespace', 'packageStartupMessage', 'package_version',
'pairlist', 'parent.env', 'parent.frame', 'parse',
'parseNamespaceFile', 'paste', 'paste0', 'path.expand',
'path.package', 'pipe', 'pmatch', 'pmax', 'pmax.int', 'pmin',
'pmin.int', 'polyroot', 'pos.to.env', 'pretty', 'pretty.default',
'prettyNum', 'print', 'print.AsIs', 'print.DLLInfo',
'print.DLLInfoList', 'print.DLLRegisteredRoutines', 'print.Date',
'print.NativeRoutineList', 'print.POSIXct', 'print.POSIXlt',
'print.by', 'print.condition', 'print.connection',
'print.data.frame', 'print.default', 'print.difftime',
'print.factor', 'print.function', 'print.hexmode',
'print.libraryIQR', 'print.listof', 'print.noquote',
'print.numeric_version', 'print.octmode', 'print.packageInfo',
'print.proc_time', 'print.restart', 'print.rle',
'print.simple.list', 'print.srcfile', 'print.srcref',
'print.summary.table', 'print.summaryDefault', 'print.table',
'print.warnings', 'prmatrix', 'proc.time', 'prod', 'prop.table',
'provideDimnames', 'psigamma', 'pushBack', 'pushBackLength', 'q',
'qr', 'qr.Q', 'qr.R', 'qr.X', 'qr.coef', 'qr.default', 'qr.fitted',
'qr.qty', 'qr.qy', 'qr.resid', 'qr.solve', 'quarters',
'quarters.Date', 'quarters.POSIXt', 'quit', 'quote', 'range',
'range.default', 'rank', 'rapply', 'raw', 'rawConnection',
'rawConnectionValue', 'rawShift', 'rawToBits', 'rawToChar', 'rbind',
'rbind.data.frame', 'rcond', 'read.dcf', 'readBin', 'readChar',
'readLines', 'readRDS', 'readRenviron', 'readline', 'reg.finalizer',
'regexec', 'regexpr', 'registerS3method', 'registerS3methods',
'regmatches', 'remove', 'removeTaskCallback', 'rep', 'rep.Date',
'rep.POSIXct', 'rep.POSIXlt', 'rep.factor', 'rep.int',
'rep.numeric_version', 'rep_len', 'replace', 'replicate',
'requireNamespace', 'restartDescription', 'restartFormals',
'retracemem', 'rev', 'rev.default', 'rle', 'rm', 'round',
'round.Date', 'round.POSIXt', 'row', 'row.names',
'row.names.data.frame', 'row.names.default', 'rowMeans', 'rowSums',
'rownames', 'rowsum', 'rowsum.data.frame', 'rowsum.default',
'sQuote', 'sample', 'sample.int', 'sapply', 'save', 'save.image',
'saveRDS', 'scale', 'scale.default', 'scan', 'search',
'searchpaths', 'seek', 'seek.connection', 'seq', 'seq.Date',
'seq.POSIXt', 'seq.default', 'seq.int', 'seq_along', 'seq_len',
'sequence', 'serialize', 'set.seed', 'setHook', 'setNamespaceInfo',
'setSessionTimeLimit', 'setTimeLimit', 'setdiff', 'setequal',
'setwd', 'shQuote', 'showConnections', 'sign', 'signalCondition',
'signif', 'simpleCondition', 'simpleError', 'simpleMessage',
'simpleWarning', 'simplify2array', 'sin', 'single',
'sinh', 'sink', 'sink.number', 'slice.index', 'socketConnection',
'socketSelect', 'solve', 'solve.default', 'solve.qr', 'sort',
'sort.POSIXlt', 'sort.default', 'sort.int', 'sort.list', 'split',
'split.Date', 'split.POSIXct', 'split.data.frame', 'split.default',
'sprintf', 'sqrt', 'srcfile', 'srcfilealias', 'srcfilecopy',
'srcref', 'standardGeneric', 'stderr', 'stdin', 'stdout', 'stop',
'stopifnot', 'storage.mode', 'strftime', 'strptime', 'strsplit',
'strtoi', 'strtrim', 'structure', 'strwrap', 'sub', 'subset',
'subset.data.frame', 'subset.default', 'subset.matrix',
'substitute', 'substr', 'substring', 'sum', 'summary',
'summary.Date', 'summary.POSIXct', 'summary.POSIXlt',
'summary.connection', 'summary.data.frame', 'summary.default',
'summary.factor', 'summary.matrix', 'summary.proc_time',
'summary.srcfile', 'summary.srcref', 'summary.table',
'suppressMessages', 'suppressPackageStartupMessages',
'suppressWarnings', 'svd', 'sweep', 'sys.call', 'sys.calls',
'sys.frame', 'sys.frames', 'sys.function', 'sys.load.image',
'sys.nframe', 'sys.on.exit', 'sys.parent', 'sys.parents',
'sys.save.image', 'sys.source', 'sys.status', 'system',
'system.file', 'system.time', 'system2', 't', 't.data.frame',
't.default', 'table', 'tabulate', 'tail', 'tan', 'tanh', 'tapply',
'taskCallbackManager', 'tcrossprod', 'tempdir', 'tempfile',
'testPlatformEquivalence', 'textConnection', 'textConnectionValue',
'toString', 'toString.default', 'tolower', 'topenv', 'toupper',
'trace', 'traceback', 'tracemem', 'tracingState', 'transform',
'transform.data.frame', 'transform.default', 'trigamma', 'trunc',
'trunc.Date', 'trunc.POSIXt', 'truncate', 'truncate.connection',
'try', 'tryCatch', 'typeof', 'unclass', 'undebug', 'union',
'unique', 'unique.POSIXlt', 'unique.array', 'unique.data.frame',
'unique.default', 'unique.matrix', 'unique.numeric_version',
'units', 'units.difftime', 'unix.time', 'unlink', 'unlist',
'unloadNamespace', 'unlockBinding', 'unname', 'unserialize',
'unsplit', 'untrace', 'untracemem', 'unz', 'upper.tri', 'url',
'utf8ToInt', 'vapply', 'version', 'warning', 'warnings', 'weekdays',
'weekdays.Date', 'weekdays.POSIXt', 'which', 'which.max',
'which.min', 'with', 'with.default', 'withCallingHandlers',
'withRestarts', 'withVisible', 'within', 'within.data.frame',
'within.list', 'write', 'write.dcf', 'writeBin', 'writeChar',
'writeLines', 'xor', 'xor.hexmode', 'xor.octmode',
'xpdrows.data.frame', 'xtfrm', 'xtfrm.AsIs', 'xtfrm.Date',
'xtfrm.POSIXct', 'xtfrm.POSIXlt', 'xtfrm.Surv', 'xtfrm.default',
'xtfrm.difftime', 'xtfrm.factor', 'xtfrm.numeric_version', 'xzfile',
'zapsmall'
)
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]*', Text),
# can begin with ., but not if that is followed by a digit
(r'\.[a-zA-Z_][0-9a-zA-Z\._]*', Text),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(words(builtins_base, suffix=r'(?![\w\. =])'),
Keyword.Pseudo),
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![\w\.])',
Keyword.Reserved),
(r'(array|category|character|complex|double|function|integer|list|'
r'logical|matrix|numeric|vector|data.frame|c)'
r'(?![\w\.])',
Keyword.Type),
(r'(library|require|attach|detach|source)'
r'(?![\w\.])',
Keyword.Namespace)
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator)
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))'
r'(?![0-9a-zA-Z\._])',
Keyword.Constant),
(r'(T|F)\b', Name.Builtin.Pseudo),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'`.*?`', String.Backtick),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
# (r'\{', Punctuation, 'block'),
(r'.', Text),
],
# 'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
# ],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
return 0.11
class RdLexer(RegexLexer):
"""
Pygments Lexer for R documentation (Rd) files
This is a very minimal implementation, highlighting little more
than the macros. A description of Rd syntax is found in `Writing R
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
.. versionadded:: 1.6
"""
name = 'Rd'
aliases = ['rd']
filenames = ['*.Rd']
mimetypes = ['text/x-r-doc']
# To account for verbatim / LaTeX-like / and R-like areas
# would require parsing.
tokens = {
'root': [
# catch escaped brackets and percent sign
(r'\\[\\{}%]', String.Escape),
# comments
(r'%.*$', Comment),
# special macros with no arguments
(r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
# macros
(r'\\[a-zA-Z]+\b', Keyword),
# special preprocessor macros
(r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
# non-escaped brackets
(r'[{}]', Name.Builtin),
# everything else
(r'[^\\%\n{}]+', Text),
(r'.', Text),
]
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.jmx;
import java.net.BindException;
import javax.management.MBeanServer;
import javax.management.MBeanServerFactory;
import javax.management.ObjectName;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.junit.jupiter.api.extension.LifecycleMethodExecutionExceptionHandler;
import org.junit.jupiter.api.extension.TestExecutionExceptionHandler;
import org.opentest4j.TestAbortedException;
import org.springframework.beans.factory.xml.XmlBeanDefinitionReader;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.jmx.AbstractMBeanServerTests.BindExceptionHandler;
import org.springframework.jmx.export.MBeanExporter;
import org.springframework.util.MBeanTestUtils;
import static org.assertj.core.api.Assertions.assertThat;
/**
* <p>If you run into the <em>"Unsupported protocol: jmxmp"</em> error, you will need to
* download the <a href="https://www.oracle.com/technetwork/java/javase/tech/download-jsp-141676.html">JMX
* Remote API 1.0.1_04 Reference Implementation</a> from Oracle and extract
* {@code jmxremote_optional.jar} into your classpath, for example in the {@code lib/ext}
* folder of your JVM.
*
* <p>See also: <a href="https://jira.spring.io/browse/SPR-8093">SPR-8093</a>
*
* @author Rob Harrop
* @author Juergen Hoeller
* @author Sam Brannen
* @author Chris Beams
* @author Stephane Nicoll
*/
@ExtendWith(BindExceptionHandler.class)
public abstract class AbstractMBeanServerTests {
protected MBeanServer server;
@BeforeEach
public final void setUp() throws Exception {
this.server = MBeanServerFactory.createMBeanServer();
try {
onSetUp();
}
catch (Exception ex) {
releaseServer();
throw ex;
}
}
@AfterEach
protected void tearDown() throws Exception {
releaseServer();
onTearDown();
}
private void releaseServer() {
try {
MBeanServerFactory.releaseMBeanServer(getServer());
}
catch (IllegalArgumentException ex) {
if (!ex.getMessage().contains("not in list")) {
throw ex;
}
}
MBeanTestUtils.resetMBeanServers();
}
protected final ConfigurableApplicationContext loadContext(String configLocation) {
GenericApplicationContext ctx = new GenericApplicationContext();
new XmlBeanDefinitionReader(ctx).loadBeanDefinitions(configLocation);
ctx.getDefaultListableBeanFactory().registerSingleton("server", getServer());
ctx.refresh();
return ctx;
}
protected void onSetUp() throws Exception {
}
protected void onTearDown() {
}
protected final MBeanServer getServer() {
return this.server;
}
/**
* Start the specified {@link MBeanExporter}.
*/
protected void start(MBeanExporter exporter) {
exporter.afterPropertiesSet();
exporter.afterSingletonsInstantiated();
}
protected void assertIsRegistered(String message, ObjectName objectName) {
assertThat(getServer().isRegistered(objectName)).as(message).isTrue();
}
protected void assertIsNotRegistered(String message, ObjectName objectName) {
assertThat(getServer().isRegistered(objectName)).as(message).isFalse();
}
static class BindExceptionHandler implements TestExecutionExceptionHandler, LifecycleMethodExecutionExceptionHandler {
@Override
public void handleTestExecutionException(ExtensionContext context, Throwable throwable) throws Throwable {
handleBindException(throwable);
}
@Override
public void handleBeforeEachMethodExecutionException(ExtensionContext context, Throwable throwable)
throws Throwable {
handleBindException(throwable);
}
@Override
public void handleAfterEachMethodExecutionException(ExtensionContext context, Throwable throwable)
throws Throwable {
handleBindException(throwable);
}
private void handleBindException(Throwable throwable) throws Throwable {
// Abort test?
if (throwable instanceof BindException) {
throw new TestAbortedException("Failed to bind to MBeanServer", throwable);
}
// Else rethrow to conform to the contracts of TestExecutionExceptionHandler and LifecycleMethodExecutionExceptionHandler
throw throwable;
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-context/src/test/java/org/springframework/jmx/AbstractMBeanServerTests.java
|
from __future__ import division
import numpy as np
from rl.util import *
class Policy(object):
"""Abstract base class for all implemented policies.
Each policy helps with selection of action to take on an environment.
Do not use this abstract base class directly but instead use one of the concrete policies implemented.
To implement your own policy, you have to implement the following methods:
- `select_action`
# Arguments
agent (rl.core.Agent): Agent used
"""
def _set_agent(self, agent):
self.agent = agent
@property
def metrics_names(self):
return []
@property
def metrics(self):
return []
def select_action(self, **kwargs):
raise NotImplementedError()
def get_config(self):
"""Return configuration of the policy
# Returns
Configuration as dict
"""
return {}
class LinearAnnealedPolicy(Policy):
"""Implement the linear annealing policy
Linear Annealing Policy computes a current threshold value and
transfers it to an inner policy which chooses the action. The threshold
value is following a linear function decreasing over time."""
def __init__(self, inner_policy, attr, value_max, value_min, value_test, nb_steps):
if not hasattr(inner_policy, attr):
raise ValueError('Policy does not have attribute "{}".'.format(attr))
super(LinearAnnealedPolicy, self).__init__()
self.inner_policy = inner_policy
self.attr = attr
self.value_max = value_max
self.value_min = value_min
self.value_test = value_test
self.nb_steps = nb_steps
def get_current_value(self):
"""Return current annealing value
# Returns
Value to use in annealing
"""
if self.agent.training:
# Linear annealed: f(x) = ax + b.
a = -float(self.value_max - self.value_min) / float(self.nb_steps)
b = float(self.value_max)
value = max(self.value_min, a * float(self.agent.step) + b)
else:
value = self.value_test
return value
def select_action(self, **kwargs):
"""Choose an action to perform
# Returns
Action to take (int)
"""
setattr(self.inner_policy, self.attr, self.get_current_value())
return self.inner_policy.select_action(**kwargs)
@property
def metrics_names(self):
"""Return names of metrics
# Returns
List of metric names
"""
return ['mean_{}'.format(self.attr)]
@property
def metrics(self):
"""Return metrics values
# Returns
List of metric values
"""
return [getattr(self.inner_policy, self.attr)]
def get_config(self):
"""Return configurations of LinearAnnealedPolicy
# Returns
Dict of config
"""
config = super(LinearAnnealedPolicy, self).get_config()
config['attr'] = self.attr
config['value_max'] = self.value_max
config['value_min'] = self.value_min
config['value_test'] = self.value_test
config['nb_steps'] = self.nb_steps
config['inner_policy'] = get_object_config(self.inner_policy)
return config
class SoftmaxPolicy(Policy):
""" Implement softmax policy for multinimial distribution
Simple Policy
- takes action according to the pobability distribution
"""
def select_action(self, nb_actions, probs):
"""Return the selected action
# Arguments
probs (np.ndarray) : Probabilty for each action
# Returns
action
"""
action = np.random.choice(range(nb_actions), p=probs)
return action
class EpsGreedyQPolicy(Policy):
"""Implement the epsilon greedy policy
Eps Greedy policy either:
- takes a random action with probability epsilon
- takes current best action with prob (1 - epsilon)
"""
def __init__(self, eps=.1):
super(EpsGreedyQPolicy, self).__init__()
self.eps = eps
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
nb_actions = q_values.shape[0]
if np.random.uniform() < self.eps:
action = np.random.randint(0, nb_actions)
else:
action = np.argmax(q_values)
return action
def get_config(self):
"""Return configurations of EpsGreedyQPolicy
# Returns
Dict of config
"""
config = super(EpsGreedyQPolicy, self).get_config()
config['eps'] = self.eps
return config
class GreedyQPolicy(Policy):
"""Implement the greedy policy
Greedy policy returns the current best action according to q_values
"""
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
action = np.argmax(q_values)
return action
class BoltzmannQPolicy(Policy):
"""Implement the Boltzmann Q Policy
Boltzmann Q Policy builds a probability law on q values and returns
an action selected randomly according to this law.
"""
def __init__(self, tau=1., clip=(-500., 500.)):
super(BoltzmannQPolicy, self).__init__()
self.tau = tau
self.clip = clip
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
q_values = q_values.astype('float64')
nb_actions = q_values.shape[0]
exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))
probs = exp_values / np.sum(exp_values)
action = np.random.choice(range(nb_actions), p=probs)
return action
def get_config(self):
"""Return configurations of BoltzmannQPolicy
# Returns
Dict of config
"""
config = super(BoltzmannQPolicy, self).get_config()
config['tau'] = self.tau
config['clip'] = self.clip
return config
class MaxBoltzmannQPolicy(Policy):
"""
A combination of the eps-greedy and Boltzman q-policy.
Wiering, M.: Explorations in Efficient Reinforcement Learning.
PhD thesis, University of Amsterdam, Amsterdam (1999)
https://pure.uva.nl/ws/files/3153478/8461_UBA003000033.pdf
"""
def __init__(self, eps=.1, tau=1., clip=(-500., 500.)):
super(MaxBoltzmannQPolicy, self).__init__()
self.eps = eps
self.tau = tau
self.clip = clip
def select_action(self, q_values):
"""Return the selected action
The selected action follows the BoltzmannQPolicy with probability epsilon
or return the Greedy Policy with probability (1 - epsilon)
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
q_values = q_values.astype('float64')
nb_actions = q_values.shape[0]
if np.random.uniform() < self.eps:
exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))
probs = exp_values / np.sum(exp_values)
action = np.random.choice(range(nb_actions), p=probs)
else:
action = np.argmax(q_values)
return action
def get_config(self):
"""Return configurations of MaxBoltzmannQPolicy
# Returns
Dict of config
"""
config = super(MaxBoltzmannQPolicy, self).get_config()
config['eps'] = self.eps
config['tau'] = self.tau
config['clip'] = self.clip
return config
class BoltzmannGumbelQPolicy(Policy):
"""Implements Boltzmann-Gumbel exploration (BGE) adapted for Q learning
based on the paper Boltzmann Exploration Done Right
(https://arxiv.org/pdf/1705.10257.pdf).
BGE is invariant with respect to the mean of the rewards but not their
variance. The parameter C, which defaults to 1, can be used to correct for
this, and should be set to the least upper bound on the standard deviation
of the rewards.
BGE is only available for training, not testing. For testing purposes, you
can achieve approximately the same result as BGE after training for N steps
on K actions with parameter C by using the BoltzmannQPolicy and setting
tau = C/sqrt(N/K)."""
def __init__(self, C=1.0):
assert C > 0, "BoltzmannGumbelQPolicy C parameter must be > 0, not " + repr(C)
super(BoltzmannGumbelQPolicy, self).__init__()
self.C = C
self.action_counts = None
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
# We can't use BGE during testing, since we don't have access to the
# action_counts at the end of training.
assert self.agent.training, "BoltzmannGumbelQPolicy should only be used for training, not testing"
assert q_values.ndim == 1, q_values.ndim
q_values = q_values.astype('float64')
# If we are starting training, we should reset the action_counts.
# Otherwise, action_counts should already be initialized, since we
# always do so when we begin training.
if self.agent.step == 0:
self.action_counts = np.ones(q_values.shape)
assert self.action_counts is not None, self.agent.step
assert self.action_counts.shape == q_values.shape, (self.action_counts.shape, q_values.shape)
beta = self.C/np.sqrt(self.action_counts)
Z = np.random.gumbel(size=q_values.shape)
perturbation = beta * Z
perturbed_q_values = q_values + perturbation
action = np.argmax(perturbed_q_values)
self.action_counts[action] += 1
return action
def get_config(self):
"""Return configurations of BoltzmannGumbelQPolicy
# Returns
Dict of config
"""
config = super(BoltzmannGumbelQPolicy, self).get_config()
config['C'] = self.C
return config
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Source apis layer."""
import json
import os
import sys
from apitools.base.py import exceptions
from googlecloudsdk.calliope import exceptions as base_exceptions
from googlecloudsdk.core import apis
from googlecloudsdk.core import exceptions as core_exceptions
class RepoCreationError(core_exceptions.Error):
"""Unable to create repo."""
def __init__(self, message):
super(RepoCreationError, self).__init__(message)
class RepoDeletionError(exceptions.Error):
"""Unable to delete repo."""
def __init__(self, message):
super(RepoDeletionError, self).__init__(message)
class RepoNoExistError(exceptions.Error):
"""Repo does not exist."""
def __init__(self, message):
super(RepoNoExistError, self).__init__(message)
# TODO(user): Avoid initializing this at import time.
messages = apis.GetMessagesModule('source', 'v1')
def _NormalizeToSourceAPIPath(path):
"""Fix an OS-native path to conform to the Unix/Source API style.
Args:
path: (string) An OS-native path (e.g. "/foo/bar" on Unix or "foo\bar" on
Windows.
Returns:
(string) The path converted to Unix/Source API style. '\' characters will
be converted to '/' on Windows.
TODO(user) Consider whether it makes sense to strip drive letters.
"""
return path.replace(os.sep, '/')
class NoEndpointException(Exception):
def __str__(self):
return (
'Source endpoint not initialized. Source.SetApiEndpoint must be '
'called before using this module.')
class FileTooBigException(Exception):
def __init__(self, name, size, max_size):
super(FileTooBigException, self).__init__()
self.name = name
self.size = size
self.max_size = max_size
def __str__(self):
return (
'Could not write file "{0}" because it was too large '
'({1} bytes). Max size is {2} bytes').format(
self.name, self.size, self.max_size)
def _GetViolationsFromError(error_info):
"""Looks for violations descriptions in error message.
Args:
error_info: json containing error information.
Returns:
List of violations descriptions.
"""
result = ''
details = None
try:
if 'details' in error_info:
details = error_info['details']
for field in details:
if 'fieldViolations' in field:
violations = field['fieldViolations']
for violation in violations:
if 'description' in violation:
result += violation['description'] + '\n'
except (ValueError, TypeError):
pass
return result
# TODO(b/26202997): make this more general to be used by other library code.
def GetHttpErrorMessage(error):
"""Returns a human readable string representation from the http response.
Args:
error: HttpException representing the error response.
Returns:
A human readable string representation of the error.
"""
status = error.response.status
code = error.response.reason
message = ''
try:
data = json.loads(error.content)
except ValueError:
data = error.content
if 'error' in data:
try:
error_info = data['error']
if 'message' in error_info:
message = error_info['message']
except (ValueError, TypeError):
message = data
violations = _GetViolationsFromError(error_info)
if violations:
message += '\nProblems:\n' + violations
else:
message = data
return 'ResponseError: status=[{0}], code=[{1}], message=[{2}]'.format(
status, code, message)
class Source(object):
"""Base class for source api wrappers."""
_client = None
_resource_parser = None
def _CheckClient(self):
if not self._client:
raise NoEndpointException()
@classmethod
def SetApiEndpoint(cls):
cls._client = apis.GetClientInstance('source', 'v1')
@classmethod
def SetResourceParser(cls, parser):
cls._resource_parser = parser
class Project(Source):
"""Abstracts source project."""
def __init__(self, project_id):
self._CheckClient()
self._id = project_id
def ListRepos(self):
"""Returns list of repos."""
request = messages.SourceProjectsReposListRequest(projectId=self._id)
try:
return self._client.projects_repos.List(request).repos
except exceptions.HttpError as error:
msg = GetHttpErrorMessage(error)
unused_type, unused_value, traceback = sys.exc_info()
raise base_exceptions.HttpException, msg, traceback
def GetRepo(self, repo_name):
"""Finds details on the named repo, if it exists.
Args:
repo_name: (string) The name of the repo to create.
Returns:
(messages.Repo) The full definition of the new repo, as reported by
the server.
Returns None if the repo does not exist.
"""
if not repo_name:
repo_name = 'default'
request = messages.SourceProjectsReposGetRequest(
projectId=self._id, repoName=repo_name)
try:
return self._client.projects_repos.Get(request)
except exceptions.HttpError as e:
# If the repo does not exist, we get an HTTP 404
if e.status_code != 404:
raise e
return None
def CreateRepo(self, repo_name, vcs=messages.Repo.VcsValueValuesEnum.GIT):
"""Creates a repo.
Args:
repo_name: (string) The name of the repo to create.
vcs: (messages.Repo.VcsValueValuesEnum) The repo type.
Returns:
(messages.Repo) The full definition of the new repo, as reported by
the server.
"""
request = messages.Repo(
projectId=self._id,
name=repo_name,
vcs=vcs)
return self._client.projects_repos.Create(request)
def DeleteRepo(self, repo_name):
"""Deletes a repo.
Args:
repo_name: (string) The name of the repo to delete.
"""
request = messages.SourceProjectsReposDeleteRequest(
projectId=self._id,
repoName=repo_name)
self._client.projects_repos.Delete(request)
class Repo(Source):
"""Abstracts a source repository.
TODO(user) Increase coverage of the API.
"""
def __init__(self, project_id, name=''):
"""Initialize to wrap the given repo in a project.
Args:
project_id: (string) The id of the project.
name: (string) The name of the repo. If not specified, use the default
repo for the project.
"""
self._CheckClient()
if not name:
name = 'default'
self._repo_name = name
self._project_id = project_id
def ListWorkspaces(self):
"""Request a list of workspaces.
Yields:
(Workspace) The list of workspaces.
"""
request = messages.SourceProjectsReposWorkspacesListRequest(
projectId=self._project_id, repoName=self._repo_name,
view=messages.SourceProjectsReposWorkspacesListRequest.
ViewValueValuesEnum.MINIMAL)
response = self._client.projects_repos_workspaces.List(request)
for ws in response.workspaces:
yield Workspace(self._project_id, ws.id.name, repo_name=self._repo_name,
state=ws)
def GetWorkspace(self, workspace_name):
"""Finds details on the named workspace, if it exists.
Args:
workspace_name: (string) The name of the workspace to create.
Returns:
(messages.Workspace) The full definition of the new workspace, as
reported by the server.
Returns None if the workspace does not exist.
"""
if not workspace_name:
workspace_name = 'default'
request = messages.SourceProjectsReposWorkspacesGetRequest(
projectId=self._project_id, repoName=self._repo_name,
name=workspace_name)
ws = self._client.projects_repos_workspaces.Get(request)
return Workspace(self._project_id, ws.id.name, repo_name=self._repo_name,
state=ws)
def CreateWorkspace(self, workspace_name, alias_name, expected_baseline=None):
"""Create a new workspace in the repo.
Args:
workspace_name: (string) The name of the new workspace. Must be unique.
alias_name: (string) The alias to use as a baseline for the workspace.
If the alias does not exist, the workspace will have no baseline, and
when it is commited, this name will be used to create a new movable
alias referring to the new root revision created by this workspace.
expected_baseline: (string) The expected current revision ID for the
alias specified by alias_name. If specified, this value must match the
alias's current revision ID at the time CreateWorkspace is called.
Returns:
(Workspace) The workspace that was created.
"""
request = messages.SourceProjectsReposWorkspacesCreateRequest(
projectId=self._project_id, repoName=self._repo_name,
createWorkspaceRequest=messages.CreateWorkspaceRequest(
workspace=messages.Workspace(
id=messages.CloudWorkspaceId(name=workspace_name),
alias=alias_name,
baseline=expected_baseline)))
return Workspace(
self._project_id, workspace_name, repo_name=self._repo_name,
state=self._client.projects_repos_workspaces.Create(request))
def DeleteWorkspace(self, workspace_name, current_snapshot=None):
"""Delete a workspace from the repo.
Args:
workspace_name: (string) The name of the new workspace. Must be unique.
current_snapshot: (string) The current snapshot ID of the workspace,
used to verify that the workspace hasn't changed. If not None, the
delete will succeed if and only if the snapshot ID of the workspace
matches this value.
"""
request = messages.SourceProjectsReposWorkspacesDeleteRequest(
projectId=self._project_id, repoName=self._repo_name,
name=workspace_name, currentSnapshotId=current_snapshot)
self._client.projects_repos_workspaces.Delete(request)
class Workspace(Source):
"""Abstracts a workspace."""
# Maximum amount of data to buffer/maximum file size. Each modification
# to the workspace is a single POST request, and anything more than a few
# hundred KB tends to trigger DEADLINE_EXCEEDED errors. Empirically, 256KB
# is a good threshold.
SIZE_THRESHOLD = 256 * 2**10
def __init__(self, project_id, workspace_name, repo_name='', state=None):
"""Initialize from a workspace message.
Args:
project_id: (string) The project ID for the workspace.
workspace_name: (string) The name of the workspace
repo_name: (string) The repo containing the workspace. If not specified,
use the default repo for the project.
state: (messages.Workspace) Server-supplied workspace information.
Since this argument usually comes from a call to the server, the repo
will usually be specified by a uid rather than a name.
"""
self._CheckClient()
self._project_id = project_id
self._repo_name = repo_name
self._workspace_name = workspace_name
self._pending_actions = []
self._workspace_state = state
self._post_callback = None
def __eq__(self, other):
return isinstance(other, self.__class__) and str(self) == str(other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<Workspace {0}, Project={1}, Repo={2}>'.format(
self._workspace_name, self._project_id, self._repo_name)
@property
def name(self):
return self._workspace_name
def SetPostCallback(self, callback):
"""Sets a notification function to be called when actions are posted.
Args:
callback: (lambda(int)) A function to call with the number of actions
posted to the server after the workspace has been modified.
"""
self._post_callback = callback
def FlushPendingActions(self, check_size_threshold=False):
"""Flushes all pending actions.
Args:
check_size_threshold: (boolean) If true, check if the total size of the
contents of all pending actions exceeds SIZE_THRESHOLD
"""
if not self._pending_actions:
return
if check_size_threshold:
total = 0
for a in self._pending_actions:
if a.writeAction:
total += len(a.writeAction.contents) + len(a.writeAction.path)
if total < self.SIZE_THRESHOLD:
return
request = messages.SourceProjectsReposWorkspacesModifyWorkspaceRequest(
projectId=self._project_id, repoName=self._repo_name,
name=self._workspace_name,
modifyWorkspaceRequest=messages.ModifyWorkspaceRequest(
actions=self._pending_actions))
self._workspace_state = (
self._client.projects_repos_workspaces.ModifyWorkspace(request))
if self._post_callback:
self._post_callback(len(self._pending_actions))
self._pending_actions = []
def WriteFile(self, path, contents,
mode=messages.WriteAction.ModeValueValuesEnum.NORMAL):
"""Schedule an action to create or modify a file.
Args:
path: The path of the file to write.
contents: The new contents of the file.
mode: The new mode of the file.
Raises:
FileTooBigException: Indicates that the file contents are bigger than the
maximum size supported by ModifyWorkspace.
"""
if len(contents) > self.SIZE_THRESHOLD:
raise FileTooBigException(path, len(contents), self.SIZE_THRESHOLD)
path = _NormalizeToSourceAPIPath(path)
self._pending_actions.append(messages.Action(
writeAction=messages.WriteAction(
path=path, contents=contents, mode=mode)))
self.FlushPendingActions(check_size_threshold=True)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapter
import (
"context"
grpc "google.golang.org/grpc"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
type kvs2kvc struct{ kvs pb.KVServer }
func KvServerToKvClient(kvs pb.KVServer) pb.KVClient {
return &kvs2kvc{kvs}
}
func (s *kvs2kvc) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (*pb.RangeResponse, error) {
return s.kvs.Range(ctx, in)
}
func (s *kvs2kvc) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (*pb.PutResponse, error) {
return s.kvs.Put(ctx, in)
}
func (s *kvs2kvc) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (*pb.DeleteRangeResponse, error) {
return s.kvs.DeleteRange(ctx, in)
}
func (s *kvs2kvc) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (*pb.TxnResponse, error) {
return s.kvs.Txn(ctx, in)
}
func (s *kvs2kvc) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (*pb.CompactionResponse, error) {
return s.kvs.Compact(ctx, in)
}
|
go
|
github
|
https://github.com/etcd-io/etcd
|
server/proxy/grpcproxy/adapter/kv_client_adapter.go
|
#pyCGM
import sys
import multiprocessing
import os
from math import *
import math
import numpy as np
from pycgmIO import *
# Lowerbody Coordinate System
def pelvisJointCenter(frame):
"""
Make the Pelvis Axis function
Takes in a dictionary of x,y,z positions and marker names, as well as an index
Calculates the pelvis joint center and axis and returns both.
-------------------------------------------------------------------------
INPUT:
dictionaries of marker lists.
{ [], [], [] }
OUTPUT: Returns the origin and pelvis axis also sacrum
Pelvis = [[origin x,y,z position],
[[pelvis x_axis x,y,z position],
[pelvis y_axis x,y,z position],
[pelvis z_axis x,y,z position]],
[sacrum x,y,z position]]
MODIFIES: -
-------------------------------------------------------------------------
EXAMPLE:
i = 3883
frame = {...,'LASIX': 183.18504333, 'LASIY': 422.78927612, 'LASIZ': 1033.07299805,
'LPSIX': 255.79994202, 'LPSIY': 241.42199707, 'LPSIZ': 1057.30065918,
'RASIX': 395.36532593, 'RASIY': 428.09790039, 'RASIZ': 1036.82763672,
'RPSIX': 341.41815186, 'RPSIY': 246.72117615, 'RPSIZ': 1055.99145508}
pelvisJointCenter(frame)
>>> [array([ 289.27518463, 425.44358826, 1034.95031738]),
array([[ 289.25243803, 426.43632163, 1034.8321521],
[ 288.27565385, 425.41858059, 1034.93263017],
[ 289.25467091, 425.56129577, 1035.94315379]]),
array([ 298,60904694, 244.07158661, 1056.64605715])]
"""
# Get the Pelvis Joint Centre
#REQUIRED MARKERS:
# RASI
# LASI
# RPSI
# LPSI
RASI = frame['RASI']
LASI = frame['LASI']
RPSI = frame['RPSI']
LPSI = frame['LPSI']
# REQUIRED LANDMARKS:
# origin
# sacrum
# Origin is Midpoint between RASI and LASI
origin = (RASI+LASI)/2
# If no sacrum, mean of posterior markers is used as the sacrum
sacrum = (RPSI+LPSI)/2
# This calculate the each axis
# beta1,2,3 is arbitrary name to help calculate.
beta1 = origin-sacrum
beta2 = LASI-RASI
# Y_axis is normalized beta2
y_axis = beta2/norm3d(beta2)
# X_axis computed with a Gram-Schmidt orthogonalization procedure(ref. Kadaba 1990)
# and then normalized.
beta3_cal = np.dot(beta1,y_axis)
beta3_cal2 = beta3_cal*y_axis
beta3 = beta1-beta3_cal2
x_axis = beta3/norm3d(beta3)
# Z-axis is cross product of x_axis and y_axis.
z_axis = cross(x_axis,y_axis)
# Add the origin back to the vector
y_axis = y_axis+origin
z_axis = z_axis+origin
x_axis = x_axis+origin
pelvis_axis = np.asarray([x_axis,y_axis,z_axis])
pelvis = [origin,pelvis_axis,sacrum]
return pelvis
def hipJointCenter(frame,pel_origin,pel_x,pel_y,pel_z,vsk=None):
"""
Calculate the hip joint center function.
Takes in a dictionary of x,y,z positions and marker names, as well as an index.
Calculates the hip joint center and returns the hip joint center.
-------------------------------------------------------------------------
INPUT:
An array of pel_origin, pel_x, pel_y, pel_z each x,y,z position.
and pel_x,y,z is axis of pelvis.
[(),(),()]
OUTPUT: Returns the hip joint center in two array
hip_JC = [[L_hipJC x,y,z position], [R_hipJC x,y,z position]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
pel_origin = [ 251.60830688, 391.74131775, 1032.89349365]
pel_x = [251.74063624, 392.72694721, 1032.78850073]
pel_y = [250.61711554, 391.87232862, 1032.8741063]
pel_z = [251.60295336, 391.84795134, 1033.88777762]
hipJointCenter(frame,pel_origin,pel_x,pel_y,pel_z)
>>> [[ 182.57097863, 339.43231855, 935.52900126],
[308.38050472, 322.80342417, 937.98979061]]
"""
#Get Global Values
# Requires
# pelvis axis
pel_origin=np.asarray(pel_origin)
pel_x=np.asarray(pel_x)
pel_y=np.asarray(pel_y)
pel_z=np.asarray(pel_z)
# Model's eigen value
#
# LegLength
# MeanLegLength
# mm (marker radius)
# interAsisMeasure
#Set the variables needed to calculate the joint angle
#Half of marker size
mm = 7.0
MeanLegLength = vsk['MeanLegLength']
R_AsisToTrocanterMeasure = vsk['R_AsisToTrocanterMeasure']
L_AsisToTrocanterMeasure = vsk['L_AsisToTrocanterMeasure']
interAsisMeasure = vsk['InterAsisDistance']
C = ( MeanLegLength * 0.115 ) - 15.3
theta = 0.500000178813934
beta = 0.314000427722931
aa = interAsisMeasure/2.0
S = -1
# Hip Joint Center Calculation (ref. Davis_1991)
# Left: Calculate the distance to translate along the pelvis axis
L_Xh = (-L_AsisToTrocanterMeasure - mm) * cos(beta) + C * cos(theta) * sin(beta)
L_Yh = S*(C*sin(theta)- aa)
L_Zh = (-L_AsisToTrocanterMeasure - mm) * sin(beta) - C * cos(theta) * cos(beta)
# Right: Calculate the distance to translate along the pelvis axis
R_Xh = (-R_AsisToTrocanterMeasure - mm) * cos(beta) + C * cos(theta) * sin(beta)
R_Yh = (C*sin(theta)- aa)
R_Zh = (-R_AsisToTrocanterMeasure - mm) * sin(beta) - C * cos(theta) * cos(beta)
# get the unit pelvis axis
pelvis_xaxis = pel_x-pel_origin
pelvis_yaxis = pel_y-pel_origin
pelvis_zaxis = pel_z-pel_origin
# multiply the distance to the unit pelvis axis
L_hipJCx = pelvis_xaxis*L_Xh
L_hipJCy = pelvis_yaxis*L_Yh
L_hipJCz = pelvis_zaxis*L_Zh
L_hipJC = np.asarray([ L_hipJCx[0]+L_hipJCy[0]+L_hipJCz[0],
L_hipJCx[1]+L_hipJCy[1]+L_hipJCz[1],
L_hipJCx[2]+L_hipJCy[2]+L_hipJCz[2]])
R_hipJCx = pelvis_xaxis*R_Xh
R_hipJCy = pelvis_yaxis*R_Yh
R_hipJCz = pelvis_zaxis*R_Zh
R_hipJC = np.asarray([ R_hipJCx[0]+R_hipJCy[0]+R_hipJCz[0],
R_hipJCx[1]+R_hipJCy[1]+R_hipJCz[1],
R_hipJCx[2]+R_hipJCy[2]+R_hipJCz[2]])
L_hipJC = L_hipJC+pel_origin
R_hipJC = R_hipJC+pel_origin
hip_JC = np.asarray([L_hipJC,R_hipJC])
return hip_JC
def hipAxisCenter(l_hip_jc,r_hip_jc,pelvis_axis):
"""
Calculate the hip joint axis function.
Takes in a hip joint center of x,y,z positions as well as an index.
and takes the hip joint center and pelvis origin/axis from previous functions.
Calculates the hip axis and returns hip joint origin and axis.
-------------------------------------------------------------------------
INPUT: Array of R_hip_jc, L_hip_jc, pelvis_axis each x,y,z position.
and pelvis_axis is array of pelvis origin and axis. the axis also composed of 3 arrays
each things are x axis, y axis, z axis.
OUTPUT: Returns the hip Axis Center and hip Axis.
return = [[hipaxis_center x,y,z position],
[array([[hipaxis_center x,y,z position],
[hip x_axis x,y,z position]]),
array([[hipaxis_center x,y,z position],
[hip y_axis x,y,z position]])
array([[hipaxis_center x,y,z position],
[hip z_axis x,y,z position]])]]","
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
r_hip_jc = [182.57097863, 339.43231855, 935.529000126]
l_hip_jc = [308.38050472, 322.80342417, 937.98979061]
pelvis_axis = [array([251.60830688, 391.74131775, 1032.89349365]),
array([[251.74063624, 392.72694721, 1032.78850073],
[250.61711554, 391.87232862, 1032.8741063],
[251.60295336, 391.84795134, 1033.88777762]]),
array([231.57849121, 210.25262451, 1052.24969482])]
hipAxisCenter(l_hip_jc,r_hip_jc,pelvis_axis)
>>> [[245.47574168208043, 331.1178713574418, 936.75939593146768],
[[245.60807102843359, 332.10350081526684, 936.65440301116018],
[244.48455032769033, 331.24888223306482, 936.74000858315412],
[245.47038814489719, 331.22450494659665, 937.75367990368613]]]
"""
# Get shared hip axis, it is inbetween the two hip joint centers
hipaxis_center = [(r_hip_jc[0]+l_hip_jc[0])/2,(r_hip_jc[1]+l_hip_jc[1])/2,(r_hip_jc[2]+l_hip_jc[2])/2]
#convert pelvis_axis to x,y,z axis to use more easy
pelvis_x_axis = np.subtract(pelvis_axis[1][0],pelvis_axis[0])
pelvis_y_axis = np.subtract(pelvis_axis[1][1],pelvis_axis[0])
pelvis_z_axis = np.subtract(pelvis_axis[1][2],pelvis_axis[0])
#Translate pelvis axis to shared hip centre
# Add the origin back to the vector
y_axis = [pelvis_y_axis[0]+hipaxis_center[0],pelvis_y_axis[1]+hipaxis_center[1],pelvis_y_axis[2]+hipaxis_center[2]]
z_axis = [pelvis_z_axis[0]+hipaxis_center[0],pelvis_z_axis[1]+hipaxis_center[1],pelvis_z_axis[2]+hipaxis_center[2]]
x_axis = [pelvis_x_axis[0]+hipaxis_center[0],pelvis_x_axis[1]+hipaxis_center[1],pelvis_x_axis[2]+hipaxis_center[2]]
axis = [x_axis,y_axis,z_axis]
return [hipaxis_center,axis]
def kneeJointCenter(frame,hip_JC,delta,vsk=None):
"""
Calculate the knee joint center and axis function.
Takes in a dictionary of xyz positions and marker names, as well as an index.
and takes the hip axis and pelvis axis.
Calculates the knee joint axis and returns the knee origin and axis
-------------------------------------------------------------------
INPUT:dictionaries of marker lists.
{ [], [], [] }
An array of hip_JC, pelvis_axis each x,y,z position.
delta = get from subject measurement file
OUTPUT: Returns the Knee Axis Center and Knee Axis.
return = [[kneeaxis_center x,y,z position],
[array([[kneeaxis_center x,y,z position],
[knee x_axis x,y,z position]]),
array([[kneeaxis_center x,y,z position],
[knee y_axis x,y,z position]])
array([[kneeaxis_center x,y,z position],
[knee z_axis x,y,z position]])]]
MODIFIES: delta is changed suitably to knee
-------------------------------------------------------------------
EXAMPLE:
i = 1
frame
= { 'RTHI': [426.50338745, 262.65310669, 673.66247559],
'LTHI': [51.93867874, 320.01849365, 723.03186035],
'RKNE': [416.98687744, 266.22558594, 524.04089355],
'LKNE': [84.62355804, 286.69122314, 529.39819336],...}
hip_JC: [[182.57097863, 339.43231855, 935.52900126],
[309.38050472, 32280342417, 937.98979061]]
delta: 0
kneeJointCenter(frame,hip_JC,delta,vsk=None)
>>> [array([364.17774614, 292.17051722, 515.19181496]),
array([143.55478579, 279.90370346, 524.78408753]),
array([[[364.64959153, 293.06758353, 515.18513093],
[363.29019771, 292.60656648, 515.04309095],
[364.04724541, 292.24216264, 516.18067112]],
[[143.65611282, 280.88685896, 524.63197541],
[142.56434499, 280.01777943, 524.86163553],
[143.64837987, 280.04650381, 525.76940383]]])]
"""
#Get Global Values
mm = 7.0
R_kneeWidth = vsk['RightKneeWidth']
L_kneeWidth = vsk['LeftKneeWidth']
R_delta = (R_kneeWidth/2.0)+mm
L_delta = (L_kneeWidth/2.0)+mm
#REQUIRED MARKERS:
# RTHI
# LTHI
# RKNE
# LKNE
# hip_JC
RTHI = frame['RTHI']
LTHI = frame['LTHI']
RKNE = frame['RKNE']
LKNE = frame['LKNE']
R_hip_JC = hip_JC[1]
L_hip_JC = hip_JC[0]
# Determine the position of kneeJointCenter using findJointC function
R = findJointC(RTHI,R_hip_JC,RKNE,R_delta)
L = findJointC(LTHI,L_hip_JC,LKNE,L_delta)
# Knee Axis Calculation(ref. Clinical Gait Analysis hand book, Baker2013)
#Right axis calculation
thi_kne_R = RTHI-RKNE
# Z axis is Thigh bone calculated by the hipJC and kneeJC
# the axis is then normalized
axis_z = R_hip_JC-R
# X axis is perpendicular to the points plane which is determined by KJC, HJC, KNE markers.
# and calculated by each point's vector cross vector.
# the axis is then normalized.
axis_x = cross(axis_z,thi_kne_R)
# Y axis is determined by cross product of axis_z and axis_x.
# the axis is then normalized.
axis_y = cross(axis_z,axis_x)
Raxis = np.asarray([axis_x,axis_y,axis_z])
#Left axis calculation
thi_kne_L = LTHI-LKNE
# Z axis is Thigh bone calculated by the hipJC and kneeJC
# the axis is then normalized
axis_z = L_hip_JC-L
# X axis is perpendicular to the points plane which is determined by KJC, HJC, KNE markers.
# and calculated by each point's vector cross vector.
# the axis is then normalized.
axis_x = cross(thi_kne_L,axis_z)
# Y axis is determined by cross product of axis_z and axis_x.
# the axis is then normalized.
axis_y = cross(axis_z,axis_x)
Laxis = np.asarray([axis_x,axis_y,axis_z])
# Clear the name of axis and then nomalize it.
R_knee_x_axis = Raxis[0]
R_knee_x_axis = R_knee_x_axis/norm3d(R_knee_x_axis)
R_knee_y_axis = Raxis[1]
R_knee_y_axis = R_knee_y_axis/norm3d(R_knee_y_axis)
R_knee_z_axis = Raxis[2]
R_knee_z_axis = R_knee_z_axis/norm3d(R_knee_z_axis)
L_knee_x_axis = Laxis[0]
L_knee_x_axis = L_knee_x_axis/norm3d(L_knee_x_axis)
L_knee_y_axis = Laxis[1]
L_knee_y_axis = L_knee_y_axis/norm3d(L_knee_y_axis)
L_knee_z_axis = Laxis[2]
L_knee_z_axis = L_knee_z_axis/norm3d(L_knee_z_axis)
#Put both axis in array
# Add the origin back to the vector
y_axis = R_knee_y_axis+R
z_axis = R_knee_z_axis+R
x_axis = R_knee_x_axis+R
Raxis = np.asarray([x_axis,y_axis,z_axis])
# Add the origin back to the vector
y_axis = L_knee_y_axis+L
z_axis = L_knee_z_axis+L
x_axis = L_knee_x_axis+L
Laxis = np.asarray([x_axis,y_axis,z_axis])
axis = np.asarray([Raxis,Laxis])
return [R,L,axis]
def ankleJointCenter(frame,knee_JC,delta,vsk=None):
"""
Calculate the ankle joint center and axis function.
Takes in a dictionary of xyz positions and marker names, as well as an index.
and takes the knee axis.
Calculates the ankle joint axis and returns the ankle origin and axis
-------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
An array of knee_JC each x,y,z position.
delta = 0
OUTPUT: Returns the Ankle Axis Center and Ankle Axis.
return = [[ankle axis_center x,y,z position],
[array([[ankleaxis_center x,y,z position],
[ankle x_axis x,y,z position]]),
array([[ankleaxis_center x,y,z position],
[ankle y_axis x,y,z position]])
array([[ankleaxis_center x,y,z position],
[ankle z_axis x,y,z position]])]]
MODIFIES: -
---------------------------------------------------------------------
EXAMPLE:
i = 1
frame
= { 'RTIB': [433.97537231, 211.93408203, 273.3008728],
'LTIB': [50.04016495, 235.90718079, 364.32226562],
'RANK': [422.77005005, 217.74053955, 92.86152649],
'LANK': [58.57380676, 208.54806519, 86.16953278],...}
knee_JC: [array([364.17774614, 292.17051722, 515.19181496]),
array([143.55478579, 279.90370346, 524.78408753]),
array([[[364.64959153, 293.06758353, 515.18513093],
[363.29019771, 292.60656648, 515.04309095],
[364.04724541, 292.24216264, 516.18067112]],
[[143.65611282, 280.88685896, 524.63197541],
[142.56434499, 280.01777943, 524.86163553],
[143.64837987, 280.04650381, 525.76940383]]])]
delta: 0
ankleJointCenter(frame,knee_JC,delta,vsk=None)
>>> [array([393.76181608, 247.67829633, 87.73775041]),
array([98.74901939, 219.46930221, 80.6306816]),
[[array([394.4817575, 248.37201348, 87.715368]),
array([393.07114384, 248.39110006, 87.61575574]),
array([393.69314056, 247.78157916, 88.73002876])],
[array([98.47494966, 220.42553803, 80.52821783]),
array([97.79246671, 219.20927275, 80.76255901]),
array([98.84848169, 219.60345781, 81.61663775])]]]
"""
#Get Global Values
R_ankleWidth = vsk['RightAnkleWidth']
L_ankleWidth = vsk['LeftAnkleWidth']
R_torsion = vsk['RightTibialTorsion']
L_torsion = vsk['LeftTibialTorsion']
mm = 7.0
R_delta = ((R_ankleWidth)/2.0)+mm
L_delta = ((L_ankleWidth)/2.0)+mm
#REQUIRED MARKERS:
# tib_R
# tib_L
# ank_R
# ank_L
# knee_JC
tib_R = frame['RTIB']
tib_L = frame['LTIB']
ank_R = frame['RANK']
ank_L = frame['LANK']
knee_JC_R = knee_JC[0]
knee_JC_L = knee_JC[1]
# This is Torsioned Tibia and this describe the ankle angles
# Tibial frontal plane being defined by ANK,TIB and KJC
# Determine the position of ankleJointCenter using findJointC function
R = findJointC(tib_R, knee_JC_R, ank_R, R_delta)
L = findJointC(tib_L, knee_JC_L, ank_L, L_delta)
# Ankle Axis Calculation(ref. Clinical Gait Analysis hand book, Baker2013)
#Right axis calculation
# Z axis is shank bone calculated by the ankleJC and kneeJC
axis_z = knee_JC_R-R
# X axis is perpendicular to the points plane which is determined by ANK,TIB and KJC markers.
# and calculated by each point's vector cross vector.
# tib_ank_R vector is making a tibia plane to be assumed as rigid segment.
tib_ank_R = tib_R-ank_R
axis_x = cross(axis_z,tib_ank_R)
# Y axis is determined by cross product of axis_z and axis_x.
axis_y = cross(axis_z,axis_x)
Raxis = [axis_x,axis_y,axis_z]
#Left axis calculation
# Z axis is shank bone calculated by the ankleJC and kneeJC
axis_z = knee_JC_L-L
# X axis is perpendicular to the points plane which is determined by ANK,TIB and KJC markers.
# and calculated by each point's vector cross vector.
# tib_ank_L vector is making a tibia plane to be assumed as rigid segment.
tib_ank_L = tib_L-ank_L
axis_x = cross(tib_ank_L,axis_z)
# Y axis is determined by cross product of axis_z and axis_x.
axis_y = cross(axis_z,axis_x)
Laxis = [axis_x,axis_y,axis_z]
# Clear the name of axis and then normalize it.
R_ankle_x_axis = Raxis[0]
R_ankle_x_axis_div = norm2d(R_ankle_x_axis)
R_ankle_x_axis = [R_ankle_x_axis[0]/R_ankle_x_axis_div,R_ankle_x_axis[1]/R_ankle_x_axis_div,R_ankle_x_axis[2]/R_ankle_x_axis_div]
R_ankle_y_axis = Raxis[1]
R_ankle_y_axis_div = norm2d(R_ankle_y_axis)
R_ankle_y_axis = [R_ankle_y_axis[0]/R_ankle_y_axis_div,R_ankle_y_axis[1]/R_ankle_y_axis_div,R_ankle_y_axis[2]/R_ankle_y_axis_div]
R_ankle_z_axis = Raxis[2]
R_ankle_z_axis_div = norm2d(R_ankle_z_axis)
R_ankle_z_axis = [R_ankle_z_axis[0]/R_ankle_z_axis_div,R_ankle_z_axis[1]/R_ankle_z_axis_div,R_ankle_z_axis[2]/R_ankle_z_axis_div]
L_ankle_x_axis = Laxis[0]
L_ankle_x_axis_div = norm2d(L_ankle_x_axis)
L_ankle_x_axis = [L_ankle_x_axis[0]/L_ankle_x_axis_div,L_ankle_x_axis[1]/L_ankle_x_axis_div,L_ankle_x_axis[2]/L_ankle_x_axis_div]
L_ankle_y_axis = Laxis[1]
L_ankle_y_axis_div = norm2d(L_ankle_y_axis)
L_ankle_y_axis = [L_ankle_y_axis[0]/L_ankle_y_axis_div,L_ankle_y_axis[1]/L_ankle_y_axis_div,L_ankle_y_axis[2]/L_ankle_y_axis_div]
L_ankle_z_axis = Laxis[2]
L_ankle_z_axis_div = norm2d(L_ankle_z_axis)
L_ankle_z_axis = [L_ankle_z_axis[0]/L_ankle_z_axis_div,L_ankle_z_axis[1]/L_ankle_z_axis_div,L_ankle_z_axis[2]/L_ankle_z_axis_div]
#Put both axis in array
Raxis = [R_ankle_x_axis,R_ankle_y_axis,R_ankle_z_axis]
Laxis = [L_ankle_x_axis,L_ankle_y_axis,L_ankle_z_axis]
# Rotate the axes about the tibia torsion.
R_torsion = np.radians(R_torsion)
L_torsion = np.radians(L_torsion)
Raxis = [[math.cos(R_torsion)*Raxis[0][0]-math.sin(R_torsion)*Raxis[1][0],
math.cos(R_torsion)*Raxis[0][1]-math.sin(R_torsion)*Raxis[1][1],
math.cos(R_torsion)*Raxis[0][2]-math.sin(R_torsion)*Raxis[1][2]],
[math.sin(R_torsion)*Raxis[0][0]+math.cos(R_torsion)*Raxis[1][0],
math.sin(R_torsion)*Raxis[0][1]+math.cos(R_torsion)*Raxis[1][1],
math.sin(R_torsion)*Raxis[0][2]+math.cos(R_torsion)*Raxis[1][2]],
[Raxis[2][0],Raxis[2][1],Raxis[2][2]]]
Laxis = [[math.cos(L_torsion)*Laxis[0][0]-math.sin(L_torsion)*Laxis[1][0],
math.cos(L_torsion)*Laxis[0][1]-math.sin(L_torsion)*Laxis[1][1],
math.cos(L_torsion)*Laxis[0][2]-math.sin(L_torsion)*Laxis[1][2]],
[math.sin(L_torsion)*Laxis[0][0]+math.cos(L_torsion)*Laxis[1][0],
math.sin(L_torsion)*Laxis[0][1]+math.cos(L_torsion)*Laxis[1][1],
math.sin(L_torsion)*Laxis[0][2]+math.cos(L_torsion)*Laxis[1][2]],
[Laxis[2][0],Laxis[2][1],Laxis[2][2]]]
# Add the origin back to the vector
x_axis = Raxis[0]+R
y_axis = Raxis[1]+R
z_axis = Raxis[2]+R
Raxis = [x_axis,y_axis,z_axis]
x_axis = Laxis[0]+L
y_axis = Laxis[1]+L
z_axis = Laxis[2]+L
Laxis = [x_axis,y_axis,z_axis]
# Both of axis in array.
axis = [Raxis,Laxis]
return [R,L,axis]
def footJointCenter(frame,vsk,ankle_JC,knee_JC,delta):
"""
Calculate the foot joint center and axis function.
Takes in a dictionary of xyz positions and marker names.
and takes the ankle axis and knee axis.
Calculate the foot joint axis by rotating incorrect foot joint axes about offset angle.
Returns the foot axis origin and axis.
In case of foot joint center, we've already make 2 kinds of axis for static offset angle.
and then, Call this static offset angle as an input of this function for dynamic trial.
Special Cases:
(anatomical uncorrect foot axis)
if foot flat is checked, make the reference markers instead of HEE marker which height is as same as TOE marker's height.
elif foot flat is not checked, use the HEE marker for making Z axis.
-------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
An array of ankle_JC,knee_JC each x,y,z position.
delta = 0
static_info = [[R_plantar_static_angle, R_static_rotation_angle, 0], # Right Static information
[L_plantar_static_angle, L_static_rotation_angle, 0]] # Left Static information
OUTPUT: Returns the footJointCenter and foot axis. and save the static offset angle in a global variable.
return = [[foot axis_center x,y,z position],
[array([[footaxis_center x,y,z position],
[foot x_axis x,y,z position]]),
array([[footaxis_center x,y,z position],
[foot y_axis x,y,z position]])
array([[footaxis_center x,y,z position],
[foot z_axis x,y,z position]])]]
MODIFIES: Axis changes following to the static info.
you can set the static_info by the button. and this will calculate the offset angles
the first setting, the foot axis show foot uncorrected anatomical reference axis(Z_axis point to the AJC from TOE)
if press the static_info button so if static_info is not None,
and then the static offsets angles are applied to the reference axis.
the reference axis is Z axis point to HEE from TOE
--------------------------------------------------------------------
EXAMPLE:
i = 1
frame = { 'RHEE': [374.01257324, 181.57929993, 49.50960922],
'LHEE': [105.30126953, 180.2130127, 47.15660858],
'RTOE': [442.81997681, 381.62280273, 42.66047668
'LTOE': [39.43652725, 382.44522095, 41.78911591],...}
static_info : [[0.03482194, 0.14879424, 0],
[0.01139704, 0.02142806, 0]]
knee_JC: [array([364.17774614, 292.17051722, 515.19181496]),
array([143.55478579, 279.90370346, 524.78408753]),
array([[[364.64959153, 293.06758353, 515.18513093],
[363.29019771, 292.60656648, 515.04309095],
[364.04724541, 292.24216264, 516.18067112]],
[[143.65611282, 280.88685896, 524.63197541],
[142.56434499, 280.01777943, 524.86163553],
[143.64837987, 280.04650381, 525.76940383]]])]
ankle_JC: [array([393.76181608, 247.67829633, 87.73775041]),
array([98.74901939, 219.46930221, 80.6306816]),
[[array([394.4817575, 248.37201348, 87.715368]),
array([393.07114384, 248.39110006, 87.61575574]),
array([393.69314056, 247.78157916, 88.73002876])],
[array([98.47494966, 220.42553803, 80.52821783]),
array([97.79246671, 219.20927275, 80.76255901]),
array([98.84848169, 219.60345781, 81.61663775])]]]
delta: 0
footJointCenter(frame,static_info,ankle_JC,knee_JC,delta,vsk=None)
>>> [array([442.81997681, 381.62280273, 42.66047668]),
array([39.43652725, 382.44522095, 41.78911591]),
[[[442.88815408948221, 381.7646059422284, 43.648020966284719],
[441.87135392672275, 381.93856951438391, 42.680625439845173],
[442.51100028681969, 380.68462194642137, 42.816522573058428]],
[[39.507852120747259, 382.67891585204035, 42.75880629687082],
[38.49231838166678, 385.14765969549836, 41.930278614215709],
[39.758058544512153, 381.51956226668784, 41.98854919067994]]]]
"""
#REQUIRED MARKERS:
# RTOE
# LTOE
TOE_R = frame["RTOE"]
TOE_L = frame["LTOE"]
#REQUIRE JOINT CENTER & AXIS
#KNEE JOINT CENTER
#ANKLE JOINT CENTER
#ANKLE FLEXION AXIS
ankle_JC_R = ankle_JC[0]
ankle_JC_L = ankle_JC[1]
ankle_flexion_R = ankle_JC[2][0][1]
ankle_flexion_L = ankle_JC[2][1][1]
# Toe axis's origin is marker position of TOE
R = TOE_R
L = TOE_L
# HERE IS THE INCORRECT AXIS
# the first setting, the foot axis show foot uncorrected anatomical axis and static_info is None
ankle_JC_R = [ankle_JC_R[0],ankle_JC_R[1],ankle_JC_R[2]]
ankle_JC_L = [ankle_JC_L[0],ankle_JC_L[1],ankle_JC_L[2]]
# Right
# z axis is from TOE marker to AJC. and normalized it.
R_axis_z = [ankle_JC_R[0]-TOE_R[0],ankle_JC_R[1]-TOE_R[1],ankle_JC_R[2]-TOE_R[2]]
R_axis_z_div = norm2d(R_axis_z)
R_axis_z = [R_axis_z[0]/R_axis_z_div,R_axis_z[1]/R_axis_z_div,R_axis_z[2]/R_axis_z_div]
# bring the flexion axis of ankle axes from AnkleJointCenter function. and normalized it.
y_flex_R = [ankle_flexion_R[0]-ankle_JC_R[0],ankle_flexion_R[1]-ankle_JC_R[1],ankle_flexion_R[2]-ankle_JC_R[2]]
y_flex_R_div = norm2d(y_flex_R)
y_flex_R = [y_flex_R[0]/y_flex_R_div,y_flex_R[1]/y_flex_R_div,y_flex_R[2]/y_flex_R_div]
# x axis is calculated as a cross product of z axis and ankle flexion axis.
R_axis_x = cross(y_flex_R,R_axis_z)
R_axis_x_div = norm2d(R_axis_x)
R_axis_x = [R_axis_x[0]/R_axis_x_div,R_axis_x[1]/R_axis_x_div,R_axis_x[2]/R_axis_x_div]
# y axis is then perpendicularly calculated from z axis and x axis. and normalized.
R_axis_y = cross(R_axis_z,R_axis_x)
R_axis_y_div = norm2d(R_axis_y)
R_axis_y = [R_axis_y[0]/R_axis_y_div,R_axis_y[1]/R_axis_y_div,R_axis_y[2]/R_axis_y_div]
R_foot_axis = [R_axis_x,R_axis_y,R_axis_z]
# Left
# z axis is from TOE marker to AJC. and normalized it.
L_axis_z = [ankle_JC_L[0]-TOE_L[0],ankle_JC_L[1]-TOE_L[1],ankle_JC_L[2]-TOE_L[2]]
L_axis_z_div = norm2d(L_axis_z)
L_axis_z = [L_axis_z[0]/L_axis_z_div,L_axis_z[1]/L_axis_z_div,L_axis_z[2]/L_axis_z_div]
# bring the flexion axis of ankle axes from AnkleJointCenter function. and normalized it.
y_flex_L = [ankle_flexion_L[0]-ankle_JC_L[0],ankle_flexion_L[1]-ankle_JC_L[1],ankle_flexion_L[2]-ankle_JC_L[2]]
y_flex_L_div = norm2d(y_flex_L)
y_flex_L = [y_flex_L[0]/y_flex_L_div,y_flex_L[1]/y_flex_L_div,y_flex_L[2]/y_flex_L_div]
# x axis is calculated as a cross product of z axis and ankle flexion axis.
L_axis_x = cross(y_flex_L,L_axis_z)
L_axis_x_div = norm2d(L_axis_x)
L_axis_x = [L_axis_x[0]/L_axis_x_div,L_axis_x[1]/L_axis_x_div,L_axis_x[2]/L_axis_x_div]
# y axis is then perpendicularly calculated from z axis and x axis. and normalized.
L_axis_y = cross(L_axis_z,L_axis_x)
L_axis_y_div = norm2d(L_axis_y)
L_axis_y = [L_axis_y[0]/L_axis_y_div,L_axis_y[1]/L_axis_y_div,L_axis_y[2]/L_axis_y_div]
L_foot_axis = [L_axis_x,L_axis_y,L_axis_z]
foot_axis = [R_foot_axis,L_foot_axis]
# Apply static offset angle to the incorrect foot axes
# static offset angle are taken from static_info variable in radians.
R_alpha = vsk['RightStaticRotOff']
R_beta = vsk['RightStaticPlantFlex']
L_alpha = vsk['LeftStaticRotOff']
L_beta = vsk['LeftStaticPlantFlex']
R_alpha = np.around(math.degrees(R_alpha),decimals=5)
R_beta = np.around(math.degrees(R_beta),decimals=5)
L_alpha = np.around(math.degrees(L_alpha),decimals=5)
L_beta = np.around(math.degrees(L_beta),decimals=5)
R_alpha = -math.radians(R_alpha)
R_beta = math.radians(R_beta)
L_alpha = math.radians(L_alpha)
L_beta = math.radians(L_beta)
R_axis = [[(R_foot_axis[0][0]),(R_foot_axis[0][1]),(R_foot_axis[0][2])],
[(R_foot_axis[1][0]),(R_foot_axis[1][1]),(R_foot_axis[1][2])],
[(R_foot_axis[2][0]),(R_foot_axis[2][1]),(R_foot_axis[2][2])]]
L_axis = [[(L_foot_axis[0][0]),(L_foot_axis[0][1]),(L_foot_axis[0][2])],
[(L_foot_axis[1][0]),(L_foot_axis[1][1]),(L_foot_axis[1][2])],
[(L_foot_axis[2][0]),(L_foot_axis[2][1]),(L_foot_axis[2][2])]]
# rotate incorrect foot axis around y axis first.
# right
R_rotmat = [[(math.cos(R_beta)*R_axis[0][0]+math.sin(R_beta)*R_axis[2][0]),
(math.cos(R_beta)*R_axis[0][1]+math.sin(R_beta)*R_axis[2][1]),
(math.cos(R_beta)*R_axis[0][2]+math.sin(R_beta)*R_axis[2][2])],
[R_axis[1][0],R_axis[1][1],R_axis[1][2]],
[(-1*math.sin(R_beta)*R_axis[0][0]+math.cos(R_beta)*R_axis[2][0]),
(-1*math.sin(R_beta)*R_axis[0][1]+math.cos(R_beta)*R_axis[2][1]),
(-1*math.sin(R_beta)*R_axis[0][2]+math.cos(R_beta)*R_axis[2][2])]]
# left
L_rotmat = [[(math.cos(L_beta)*L_axis[0][0]+math.sin(L_beta)*L_axis[2][0]),
(math.cos(L_beta)*L_axis[0][1]+math.sin(L_beta)*L_axis[2][1]),
(math.cos(L_beta)*L_axis[0][2]+math.sin(L_beta)*L_axis[2][2])],
[L_axis[1][0],L_axis[1][1],L_axis[1][2]],
[(-1*math.sin(L_beta)*L_axis[0][0]+math.cos(L_beta)*L_axis[2][0]),
(-1*math.sin(L_beta)*L_axis[0][1]+math.cos(L_beta)*L_axis[2][1]),
(-1*math.sin(L_beta)*L_axis[0][2]+math.cos(L_beta)*L_axis[2][2])]]
# rotate incorrect foot axis around x axis next.
# right
R_rotmat = [[R_rotmat[0][0],R_rotmat[0][1],R_rotmat[0][2]],
[(math.cos(R_alpha)*R_rotmat[1][0]-math.sin(R_alpha)*R_rotmat[2][0]),
(math.cos(R_alpha)*R_rotmat[1][1]-math.sin(R_alpha)*R_rotmat[2][1]),
(math.cos(R_alpha)*R_rotmat[1][2]-math.sin(R_alpha)*R_rotmat[2][2])],
[(math.sin(R_alpha)*R_rotmat[1][0]+math.cos(R_alpha)*R_rotmat[2][0]),
(math.sin(R_alpha)*R_rotmat[1][1]+math.cos(R_alpha)*R_rotmat[2][1]),
(math.sin(R_alpha)*R_rotmat[1][2]+math.cos(R_alpha)*R_rotmat[2][2])]]
# left
L_rotmat = [[L_rotmat[0][0],L_rotmat[0][1],L_rotmat[0][2]],
[(math.cos(L_alpha)*L_rotmat[1][0]-math.sin(L_alpha)*L_rotmat[2][0]),
(math.cos(L_alpha)*L_rotmat[1][1]-math.sin(L_alpha)*L_rotmat[2][1]),
(math.cos(L_alpha)*L_rotmat[1][2]-math.sin(L_alpha)*L_rotmat[2][2])],
[(math.sin(L_alpha)*L_rotmat[1][0]+math.cos(L_alpha)*L_rotmat[2][0]),
(math.sin(L_alpha)*L_rotmat[1][1]+math.cos(L_alpha)*L_rotmat[2][1]),
(math.sin(L_alpha)*L_rotmat[1][2]+math.cos(L_alpha)*L_rotmat[2][2])]]
# Bring each x,y,z axis from rotation axes
R_axis_x = R_rotmat[0]
R_axis_y = R_rotmat[1]
R_axis_z = R_rotmat[2]
L_axis_x = L_rotmat[0]
L_axis_y = L_rotmat[1]
L_axis_z = L_rotmat[2]
# Attach each axis to the origin
R_axis_x = [R_axis_x[0]+R[0],R_axis_x[1]+R[1],R_axis_x[2]+R[2]]
R_axis_y = [R_axis_y[0]+R[0],R_axis_y[1]+R[1],R_axis_y[2]+R[2]]
R_axis_z = [R_axis_z[0]+R[0],R_axis_z[1]+R[1],R_axis_z[2]+R[2]]
R_foot_axis = [R_axis_x,R_axis_y,R_axis_z]
L_axis_x = [L_axis_x[0]+L[0],L_axis_x[1]+L[1],L_axis_x[2]+L[2]]
L_axis_y = [L_axis_y[0]+L[0],L_axis_y[1]+L[1],L_axis_y[2]+L[2]]
L_axis_z = [L_axis_z[0]+L[0],L_axis_z[1]+L[1],L_axis_z[2]+L[2]]
L_foot_axis = [L_axis_x,L_axis_y,L_axis_z]
foot_axis = [R_foot_axis,L_foot_axis]
return [R,L,foot_axis]
# Upperbody Coordinate System
def headJC(frame,vsk=None):
"""
Calculate the head joint axis function.
Takes in a dictionary of x,y,z positions and marker names.
Calculates the head joint center and returns the head joint center and axis.
-------------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
OUTPUT: Returns the Head joint center and axis in three array
head_JC = [[[head x axis x,y,z position],
[head y axis x,y,z position],
[head z axis x,y,z position]],
[head x,y,z position]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
frame = {'RFHD': [325.82983398, 402.55450439, 1722.49816895],
'LFHD': [184.55158997, 409.68713379, 1721.34289551],
'RBHD': [304.39898682, 242.91339111, 1694.97497559],
'LBHD': [197.8621521, 251.28889465, 1696.90197754], ...}
headJC(frame,vsk=None)
>>> [[[255.21590217746564, 407.10741939149585, 1722.0817317995723],
[254.19105385179665, 406.146809183757, 1721.9176771191715],
[255.18370553356357, 405.959746549898, 1722.9074499262838]],
[255.19071197509766, 406.12081909179687, 1721.9205322265625]]
"""
#Get Global Values
head_off = vsk['HeadOffset']
head_off = -1*head_off
#Get the marker positions used for joint calculation
LFHD = frame['LFHD']
RFHD = frame['RFHD']
LBHD = frame['LBHD']
RBHD = frame['RBHD']
#get the midpoints of the head to define the sides
front = [(LFHD[0]+RFHD[0])/2.0, (LFHD[1]+RFHD[1])/2.0,(LFHD[2]+RFHD[2])/2.0]
back = [(LBHD[0]+RBHD[0])/2.0, (LBHD[1]+RBHD[1])/2.0,(LBHD[2]+RBHD[2])/2.0]
left = [(LFHD[0]+LBHD[0])/2.0, (LFHD[1]+LBHD[1])/2.0,(LFHD[2]+LBHD[2])/2.0]
right = [(RFHD[0]+RBHD[0])/2.0, (RFHD[1]+RBHD[1])/2.0,(RFHD[2]+RBHD[2])/2.0]
origin = front
#Get the vectors from the sides with primary x axis facing front
#First get the x direction
x_vec = [front[0]-back[0],front[1]-back[1],front[2]-back[2]]
x_vec_div = norm2d(x_vec)
x_vec = [x_vec[0]/x_vec_div,x_vec[1]/x_vec_div,x_vec[2]/x_vec_div]
#get the direction of the y axis
y_vec = [left[0]-right[0],left[1]-right[1],left[2]-right[2]]
y_vec_div = norm2d(y_vec)
y_vec = [y_vec[0]/y_vec_div,y_vec[1]/y_vec_div,y_vec[2]/y_vec_div]
# get z axis by cross-product of x axis and y axis.
z_vec = cross(x_vec,y_vec)
z_vec_div = norm2d(z_vec)
z_vec = [z_vec[0]/z_vec_div,z_vec[1]/z_vec_div,z_vec[2]/z_vec_div]
# make sure all x,y,z axis is orthogonal each other by cross-product
y_vec = cross(z_vec,x_vec)
y_vec_div = norm2d(y_vec)
y_vec = [y_vec[0]/y_vec_div,y_vec[1]/y_vec_div,y_vec[2]/y_vec_div]
x_vec = cross(y_vec,z_vec)
x_vec_div = norm2d(x_vec)
x_vec = [x_vec[0]/x_vec_div,x_vec[1]/x_vec_div,x_vec[2]/x_vec_div]
# rotate the head axis around y axis about head offset angle.
x_vec_rot = [x_vec[0]*math.cos(head_off)+z_vec[0]*math.sin(head_off),
x_vec[1]*math.cos(head_off)+z_vec[1]*math.sin(head_off),
x_vec[2]*math.cos(head_off)+z_vec[2]*math.sin(head_off)]
y_vec_rot = [y_vec[0],y_vec[1],y_vec[2]]
z_vec_rot = [x_vec[0]*-1*math.sin(head_off)+z_vec[0]*math.cos(head_off),
x_vec[1]*-1*math.sin(head_off)+z_vec[1]*math.cos(head_off),
x_vec[2]*-1*math.sin(head_off)+z_vec[2]*math.cos(head_off)]
#Add the origin back to the vector to get it in the right position
x_axis = [x_vec_rot[0]+origin[0],x_vec_rot[1]+origin[1],x_vec_rot[2]+origin[2]]
y_axis = [y_vec_rot[0]+origin[0],y_vec_rot[1]+origin[1],y_vec_rot[2]+origin[2]]
z_axis = [z_vec_rot[0]+origin[0],z_vec_rot[1]+origin[1],z_vec_rot[2]+origin[2]]
head_axis =[x_axis,y_axis,z_axis]
#Return the three axis and origin
return [head_axis,origin]
def thoraxJC(frame):
"""
Calculate the thorax joint axis function.
Takes in a dictionary of x,y,z positions and marker names.
Calculates the thorax joint center and returns the thorax joint center and axis.
-------------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
OUTPUT: Returns the Head joint center and axis in three array
thorax_JC = [[R_thorax x,y,z position], [L_thorax x,y,z position],
[[R_thorax x axis x,y,z position],
[R_thorax y axis x,y,z position],
[R_thorax z axis x,y,z position],
[L_thorax x axis x,y,z position],
[L_thorax y axis x,y,z position],
[L_thorax z axis x,y,z position]]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
frame = {'C7': [256.78051758, 371.28042603, 1459.70300293],
'T10': [228.64323425, 192.32041931, 1279.6418457],
'CLAV': [256.78051758, 371.28042603, 1459.70300293],
'STRN': [251.67492676, 414.10391235, 1292.08508301], ...}
thoraxJC(frame)
>>> [[[256.23991128535846, 365.30496976939753, 1459.662169500559],
[257.1435863244796, 364.21960599061947, 1459.5889787129829],
[256.08430536580352, 354.32180498523223, 1458.6575930699294]],
[256.14981023656401, 364.30906039339868, 1459.6553639290375]]
"""
#Set or get a marker size as mm
marker_size = (14.0) /2.0
#Get the marker positions used for joint calculation
CLAV = frame['CLAV']
C7 = frame['C7']
STRN = frame['STRN']
T10 = frame['T10']
#Temporary origin since the origin will be moved at the end
origin = CLAV
#Get the midpoints of the upper and lower sections, as well as the front and back sections
upper = [(CLAV[0]+C7[0])/2.0,(CLAV[1]+C7[1])/2.0,(CLAV[2]+C7[2])/2.0]
lower = [(STRN[0]+T10[0])/2.0,(STRN[1]+T10[1])/2.0,(STRN[2]+T10[2])/2.0]
front = [(CLAV[0]+STRN[0])/2.0,(CLAV[1]+STRN[1])/2.0,(CLAV[2]+STRN[2])/2.0]
back = [(T10[0]+C7[0])/2.0,(T10[1]+C7[1])/2.0,(T10[2]+C7[2])/2.0]
C7_CLAV = [C7[0]-CLAV[0],C7[1]-CLAV[1],C7[2]-CLAV[2]]
C7_CLAV = C7_CLAV/norm3d(C7_CLAV)
#Get the direction of the primary axis Z (facing down)
z_direc = [lower[0]-upper[0],lower[1]-upper[1],lower[2]-upper[2]]
z_vec = z_direc/norm3d(z_direc)
#The secondary axis X is from back to front
x_direc = [front[0]-back[0],front[1]-back[1],front[2]-back[2]]
x_vec = x_direc/norm3d(x_direc)
# make sure all the axes are orthogonal each othe by cross-product
y_direc = cross(z_vec,x_vec)
y_vec = y_direc/norm3d(y_direc)
x_direc = cross(y_vec,z_vec)
x_vec = x_direc/norm3d(x_direc)
z_direc = cross(x_vec,y_vec)
z_vec = z_direc/norm3d(z_direc)
# move the axes about offset along the x axis.
offset = [x_vec[0]*marker_size,x_vec[1]*marker_size,x_vec[2]*marker_size]
#Add the CLAV back to the vector to get it in the right position before translating it
origin = [CLAV[0]-offset[0],CLAV[1]-offset[1],CLAV[2]-offset[2]]
# Attach all the axes to the origin.
x_axis = [x_vec[0]+origin[0],x_vec[1]+origin[1],x_vec[2]+origin[2]]
y_axis = [y_vec[0]+origin[0],y_vec[1]+origin[1],y_vec[2]+origin[2]]
z_axis = [z_vec[0]+origin[0],z_vec[1]+origin[1],z_vec[2]+origin[2]]
thorax_axis = [x_axis,y_axis,z_axis]
return [thorax_axis,origin]
def findwandmarker(frame,thorax):
"""
Calculate the wand marker function.
Takes in a dictionary of x,y,z positions and marker names.
and takes the thorax axis.
Calculates the wand marker for calculating the clavicle.
-------------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
OUTPUT: Returns wand marker position for calculating knee joint center later.
return = [[R wand marker x,y,z position],
[L wand marker x,y,z position]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
frame : {'RSHO': [428.88496562, 270.552948, 1500.73010254],
'LSHO': [68.24668121, 269.01049805, 1510.1072998], ...}
thorax : [[[256.23991128535846, 365.30496976939753, 1459.662169500559],
[257.1435863244796, 364.21960599061947, 1459.5889787129829],
[256.08430536580352, 354.32180498523223, 1458.6575930699294]],
[256.14981023656401, 364.30906039339868, 1459.6553639290375]]
findwandmarker(frame,thorax)
>>> [[255.92550222678443, 364.32269504976051, 1460.6297868417887],
[256.42380097331767, 364.27770361353487, 1460.6165849382387]]
"""
thorax_origin = thorax[1]
tho_axis_x = thorax[0][0]
#REQUIRED MARKERS:
# RSHO
# LSHO
RSHO = frame['RSHO']
LSHO = frame['LSHO']
# Calculate for getting a wand marker
# bring x axis from thorax axis
axis_x_vec = [tho_axis_x[0]-thorax_origin[0],tho_axis_x[1]-thorax_origin[1],tho_axis_x[2]-thorax_origin[2]]
axis_x_vec = axis_x_vec/norm3d(axis_x_vec)
RSHO_vec = [RSHO[0]-thorax_origin[0],RSHO[1]-thorax_origin[1],RSHO[2]-thorax_origin[2]]
LSHO_vec = [LSHO[0]-thorax_origin[0],LSHO[1]-thorax_origin[1],LSHO[2]-thorax_origin[2]]
RSHO_vec = RSHO_vec/norm3d(RSHO_vec)
LSHO_vec = LSHO_vec/norm3d(LSHO_vec)
R_wand = cross(RSHO_vec,axis_x_vec)
R_wand = R_wand/norm3d(R_wand)
R_wand = [thorax_origin[0]+R_wand[0],
thorax_origin[1]+R_wand[1],
thorax_origin[2]+R_wand[2]]
L_wand = cross(axis_x_vec,LSHO_vec)
L_wand = L_wand/norm3d(L_wand)
L_wand = [thorax_origin[0]+L_wand[0],
thorax_origin[1]+L_wand[1],
thorax_origin[2]+L_wand[2]]
wand = [R_wand,L_wand]
return wand
def findshoulderJC(frame,thorax,wand,vsk=None):
"""
Calculate the Shoulder joint center function.
Takes in a dictionary of x,y,z positions and marker names.
and takes the thorax axis and wand marker.
Calculate each shoulder joint center and returns it.
-------------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
OUTPUT: Returns the Shoulder joint center in two array
head_JC = [[R_shoulderJC_x, R_shoulderJC_y, R_shoulderJC_z],
[L_shoulderJC_x,L_shoulderJC_y,L_shoulderJC_z]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
frame : {'RSHO': [428.88496562, 270.552948, 1500.73010254],
'LSHO': [68.24668121, 269.01049805, 1510.1072998], ...}
thorax : [[[256.23991128535846, 365.30496976939753, 1459.662169500559],
[257.1435863244796, 364.21960599061947, 1459.5889787129829],
[256.08430536580352, 354.32180498523223, 1458.6575930699294]],
[256.14981023656401, 364.30906039339868, 1459.6553639290375]]
wand : [[255.92550222678443, 364.32269504976051, 1460.6297868417887],
[256.42380097331767, 364.27770361353487, 1460.6165849382387]]
findshoulderJC(frame,thorax,wand,vsk=None)
>>> [array([429.66951995, 275.06718615, 1453.953978131]),
array([64.51952734, 274.93442161, 1463.6313334])]
"""
thorax_origin = thorax[1]
#Get Subject Measurement Values
R_shoulderoffset = vsk['RightShoulderOffset']
L_shoulderoffset = vsk['LeftShoulderOffset']
mm = 7.0
R_delta =( R_shoulderoffset + mm )
L_delta =( L_shoulderoffset + mm )
#REQUIRED MARKERS:
# RSHO
# LSHO
RSHO = frame['RSHO']
LSHO = frame['LSHO']
# Calculate the shoulder joint center first.
R_wand = wand[0]
L_wand = wand[1]
R_Sho_JC = findJointC(R_wand,thorax_origin,RSHO,R_delta)
L_Sho_JC = findJointC(L_wand,thorax_origin,LSHO,L_delta)
Sho_JC = [R_Sho_JC,L_Sho_JC]
return Sho_JC
def shoulderAxisCalc(frame,thorax,shoulderJC,wand):
"""
Calculate the Shoulder joint axis ( Clavicle) function.
Takes in a dictionary of x,y,z positions and marker names, as well as an index.
and takes the thorax axis and wand marker and then, shoulder joint center.
Calculate each shoulder joint axis and returns it.
-------------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
thorax = [[R_thorax joint center x,y,z position],
[L_thorax_joint center x,y,z position],
[[R_thorax x axis x,y,z position],
[R_thorax,y axis x,y,z position],
[R_thorax z axis x,y,z position]]]
shoulderJC = [[R shoulder joint center x,y,z position],
[L shoulder joint center x,y,z position]]
wand = [[R wand x,y,z, position],
[L wand x,y,z position]]
OUTPUT: Returns the Shoulder joint center and axis in three array
shoulder_JC = [[[[R_shoulder x axis, x,y,z position],
[R_shoulder y axis, x,y,z position],
[R_shoulder z axis, x,y,z position]],
[[L_shoulder x axis, x,y,z position],
[L_shoulder y axis, x,y,z position],
[L_shoulder z axis, x,y,z position]]],
[R_shoulderJC_x, R_shoulderJC_y, R_shoulderJC_z],
[L_shoulderJC_x,L_shoulderJC_y,L_shoulderJC_z]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
frame : {'RSHO': [428.88496562, 270.552948, 1500.73010254],
'LSHO': [68.24668121, 269.01049805, 1510.1072998], ...}
thorax : [[[256.23991128535846, 365.30496976939753, 1459.662169500559],
[257.1435863244796, 364.21960599061947, 1459.5889787129829],
[256.08430536580352, 354.32180498523223, 1458.6575930699294]],
[256.14981023656401, 364.30906039339868, 1459.6553639290375]]
wand : [[255.92550222678443, 364.32269504976051, 1460.6297868417887],
[256.42380097331767, 364.27770361353487, 1460.6165849382387]]
shoulderJC :[array([429.66951995, 275.06718615, 1453.953978131]),
array([64.51952734, 274.93442161, 1463.6313334])]
shoulderJointCenter(frame,thorax,shoulderJC,wand)
>>> [[array([429.66951995, 275.06718615, 1453.95397813]),
array([64.51952734, 274.93442161, 1463.6313334])],
[[[430.12731330596756, 275.95136619074628, 1454.0469882869343],
[429.6862168456729, 275.16323376713137, 1452.9587414419757],
[428.78061812142147, 275.52435187706021, 1453.9831850281803]],
[[64.104003248699883, 275.83192826468195, 1463.7790545425958],
[64.598828482031223, 274.8083068265837, 1464.6201837453893],
[65.425646015184384, 275.35702720425769, 1463.6125331307378]]]]
"""
thorax_origin = thorax[1]
R_shoulderJC = shoulderJC[0]
L_shoulderJC = shoulderJC[1]
R_wand = wand[0]
L_wand = wand[1]
R_wand_direc = [R_wand[0]-thorax_origin[0],R_wand[1]-thorax_origin[1],R_wand[2]-thorax_origin[2]]
L_wand_direc = [L_wand[0]-thorax_origin[0],L_wand[1]-thorax_origin[1],L_wand[2]-thorax_origin[2]]
R_wand_direc = R_wand_direc/norm3d(R_wand_direc)
L_wand_direc = L_wand_direc/norm3d(L_wand_direc)
# Right
#Get the direction of the primary axis Z,X,Y
z_direc = [(thorax_origin[0]-R_shoulderJC[0]),
(thorax_origin[1]-R_shoulderJC[1]),
(thorax_origin[2]-R_shoulderJC[2])]
z_direc = z_direc/norm3d(z_direc)
y_direc = [R_wand_direc[0]*-1,R_wand_direc[1]*-1,R_wand_direc[2]*-1]
x_direc = cross(y_direc,z_direc)
x_direc = x_direc/norm3d(x_direc)
y_direc = cross(z_direc,x_direc)
y_direc = y_direc/norm3d(y_direc)
# backwards to account for marker size
x_axis = [x_direc[0]+R_shoulderJC[0],x_direc[1]+R_shoulderJC[1],x_direc[2]+R_shoulderJC[2]]
y_axis = [y_direc[0]+R_shoulderJC[0],y_direc[1]+R_shoulderJC[1],y_direc[2]+R_shoulderJC[2]]
z_axis = [z_direc[0]+R_shoulderJC[0],z_direc[1]+R_shoulderJC[1],z_direc[2]+R_shoulderJC[2]]
R_axis = [x_axis,y_axis,z_axis]
# Left
#Get the direction of the primary axis Z,X,Y
z_direc = [(thorax_origin[0]-L_shoulderJC[0]),
(thorax_origin[1]-L_shoulderJC[1]),
(thorax_origin[2]-L_shoulderJC[2])]
z_direc = z_direc/norm3d(z_direc)
y_direc = L_wand_direc
x_direc = cross(y_direc,z_direc)
x_direc = x_direc/norm3d(x_direc)
y_direc = cross(z_direc,x_direc)
y_direc = y_direc/norm3d(y_direc)
# backwards to account for marker size
x_axis = [x_direc[0]+L_shoulderJC[0],x_direc[1]+L_shoulderJC[1],x_direc[2]+L_shoulderJC[2]]
y_axis = [y_direc[0]+L_shoulderJC[0],y_direc[1]+L_shoulderJC[1],y_direc[2]+L_shoulderJC[2]]
z_axis = [z_direc[0]+L_shoulderJC[0],z_direc[1]+L_shoulderJC[1],z_direc[2]+L_shoulderJC[2]]
L_axis = [x_axis,y_axis,z_axis]
axis = [R_axis,L_axis]
return [shoulderJC,axis]
def elbowJointCenter(frame,thorax,shoulderJC,wand,vsk=None):
"""
Calculate the Elbow joint axis ( Humerus) function.
Takes in a dictionary of x,y,z positions and marker names, as well as an index.
and takes the thorax axis and wand marker and then, shoulder joint center.
Calculate each elbow joint axis and returns it.
-------------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
thorax = [[R_thorax joint center x,y,z position],
[L_thorax_joint center x,y,z position],
[[R_thorax x axis x,y,z position],
[R_thorax,y axis x,y,z position],
[R_thorax z axis x,y,z position]]]
shoulderJC = [[R shoulder joint center x,y,z position],
[L shoulder joint center x,y,z position]]
wand = [[R wand x,y,z, position],
[L wand x,y,z position]]
OUTPUT: Returns the Shoulder joint center and axis in three array
elbow_JC = [[R_elbow_JC_x, R_elbow_JC_y, R_elbow_JC_z],
[L_elbow_JC_x,L_elbow_JC_y,L_elbow_JC_z]
[[[R_elbow x axis, x,y,z position],
[R_elbow y axis, x,y,z position],
[R_elbow z axis, x,y,z position]],
[[L_elbow x axis, x,y,z position],
[L_elbow y axis, x,y,z position],
[L_elbow z axis, x,y,z position]]],
[R_wrist_JC_x, R_wrist_JC_y, R_wrist_JC_z],
[L_wrist_JC_x,L_wrist_JC_y,L_wrist_JC_z]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
frame : {'RSHO': [428.88496562, 270.552948, 1500.73010254],
'LSHO': [68.24668121, 269.01049805, 1510.1072998],
'RELB': [658.90338135, 326.07580566, 1285.28515625],
'LELB': [-156.32162476, 335.2593313, 1287.39916992],
'RWRA': [776.51898193,495.68103027, 1108.38464355],
'RWRB': [830.9072876, 436.75341797, 1119.11901855],
'LWRA': [-249.28146362, 525.32977295, 1117.09057617],
'LWRB': [-311.77532959, 477.22512817, 1125.1619873],...}
thorax : [[[256.23991128535846, 365.30496976939753, 1459.662169500559],
[257.1435863244796, 364.21960599061947, 1459.5889787129829],
[256.08430536580352, 354.32180498523223, 1458.6575930699294]],
[256.14981023656401, 364.30906039339868, 1459.6553639290375]]
wand : [[255.92550222678443, 364.32269504976051, 1460.6297868417887],
[256.42380097331767, 364.27770361353487, 1460.6165849382387]]
shoulderJC :[array([429.66951995, 275.06718615, 1453.953978131]),
array([64.51952734, 274.93442161, 1463.6313334])]
elbowJointCenter(frame,thorax,shoulderJC,wand,vsk=None)
>>> [[array([633.66707587, 304.95542115, 1256.07799541]),
array([-129.1695218, 316.8671644, 1258.06440717])],
[[[633.81070138699954, 303.96579004975194, 1256.07658506845],
[634.35247991784638, 305.05386589332528, 1256.7994730142241],
[632.95321803901493, 304.85083190737765, 1256.7704317504911]],
[[-129.32391792749493, 315.88072913249465, 1258.0086629318362],
[-128.45117135279025, 316.79382333592832, 1257.37260287807],
[-128.49119037560905, 316.7203088419364, 1258.783373067024]]],
[[793.32814303250677, 451.29134788252043, 1084.4325513020426],
[-272.4594189740742, 485.80152210947699, 1091.3666238350822]]]
"""
RSHO = frame['RSHO']
LSHO = frame['LSHO']
RELB = frame['RELB']
LELB = frame['LELB']
RWRA = frame['RWRA']
RWRB = frame['RWRB']
LWRA = frame['LWRA']
LWRB = frame['LWRB']
R_elbowwidth = vsk['RightElbowWidth']
L_elbowwidth = vsk['LeftElbowWidth']
R_elbowwidth = R_elbowwidth * -1
L_elbowwidth = L_elbowwidth
mm = 7.0
R_delta =( (R_elbowwidth/2.0)-mm )
L_delta =( (L_elbowwidth/2.0)+mm )
RWRI = [(RWRA[0]+RWRB[0])/2.0,(RWRA[1]+RWRB[1])/2.0,(RWRA[2]+RWRB[2])/2.0]
LWRI = [(LWRA[0]+LWRB[0])/2.0,(LWRA[1]+LWRB[1])/2.0,(LWRA[2]+LWRB[2])/2.0]
# make humerus axis
tho_y_axis = np.subtract(thorax[0][1],thorax[1])
R_sho_mod = [(RSHO[0]-R_delta*tho_y_axis[0]-RELB[0]),
(RSHO[1]-R_delta*tho_y_axis[1]-RELB[1]),
(RSHO[2]-R_delta*tho_y_axis[2]-RELB[2])]
L_sho_mod = [(LSHO[0]+L_delta*tho_y_axis[0]-LELB[0]),
(LSHO[1]+L_delta*tho_y_axis[1]-LELB[1]),
(LSHO[2]+L_delta*tho_y_axis[2]-LELB[2])]
# right axis
z_axis = R_sho_mod
z_axis_div = norm2d(z_axis)
z_axis = [z_axis[0]/z_axis_div,z_axis[1]/z_axis_div,z_axis[2]/z_axis_div]
# this is reference axis
x_axis = np.subtract(RWRI,RELB)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
y_axis = cross(z_axis,x_axis)
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
x_axis = cross(y_axis,z_axis)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
R_axis = [x_axis,y_axis,z_axis]
# left axis
z_axis = np.subtract(L_sho_mod,LELB)
z_axis_div = norm2d(z_axis)
z_axis = [z_axis[0]/z_axis_div,z_axis[1]/z_axis_div,z_axis[2]/z_axis_div]
# this is reference axis
x_axis = L_sho_mod
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
y_axis = cross(z_axis,x_axis)
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
x_axis = cross(y_axis,z_axis)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
L_axis = [x_axis,y_axis,z_axis]
RSJC = shoulderJC[0]
LSJC = shoulderJC[1]
# make the construction vector for finding Elbow joint center
R_con_1 = np.subtract(RSJC,RELB)
R_con_1_div = norm2d(R_con_1)
R_con_1 = [R_con_1[0]/R_con_1_div,R_con_1[1]/R_con_1_div,R_con_1[2]/R_con_1_div]
R_con_2 = np.subtract(RWRI,RELB)
R_con_2_div = norm2d(R_con_2)
R_con_2 = [R_con_2[0]/R_con_2_div,R_con_2[1]/R_con_2_div,R_con_2[2]/R_con_2_div]
R_cons_vec = cross(R_con_1,R_con_2)
R_cons_vec_div = norm2d(R_cons_vec)
R_cons_vec = [R_cons_vec[0]/R_cons_vec_div,R_cons_vec[1]/R_cons_vec_div,R_cons_vec[2]/R_cons_vec_div]
R_cons_vec = [R_cons_vec[0]*500+RELB[0],R_cons_vec[1]*500+RELB[1],R_cons_vec[2]*500+RELB[2]]
L_con_1 = np.subtract(LSJC,LELB)
L_con_1_div = norm2d(L_con_1)
L_con_1 = [L_con_1[0]/L_con_1_div,L_con_1[1]/L_con_1_div,L_con_1[2]/L_con_1_div]
L_con_2 = np.subtract(LWRI,LELB)
L_con_2_div = norm2d(L_con_2)
L_con_2 = [L_con_2[0]/L_con_2_div,L_con_2[1]/L_con_2_div,L_con_2[2]/L_con_2_div]
L_cons_vec = cross(L_con_1,L_con_2)
L_cons_vec_div = norm2d(L_cons_vec)
L_cons_vec = [L_cons_vec[0]/L_cons_vec_div,L_cons_vec[1]/L_cons_vec_div,L_cons_vec[2]/L_cons_vec_div]
L_cons_vec = [L_cons_vec[0]*500+LELB[0],L_cons_vec[1]*500+LELB[1],L_cons_vec[2]*500+LELB[2]]
REJC = findJointC(R_cons_vec,RSJC,RELB,R_delta)
LEJC = findJointC(L_cons_vec,LSJC,LELB,L_delta)
# this is radius axis for humerus
# right
x_axis = np.subtract(RWRA,RWRB)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
z_axis = np.subtract(REJC,RWRI)
z_axis_div = norm2d(z_axis)
z_axis = [z_axis[0]/z_axis_div,z_axis[1]/z_axis_div,z_axis[2]/z_axis_div]
y_axis = cross(z_axis,x_axis)
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
x_axis = cross(y_axis,z_axis)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
R_radius = [x_axis,y_axis,z_axis]
# left
x_axis = np.subtract(LWRA,LWRB)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
z_axis = np.subtract(LEJC,LWRI)
z_axis_div = norm2d(z_axis)
z_axis = [z_axis[0]/z_axis_div,z_axis[1]/z_axis_div,z_axis[2]/z_axis_div]
y_axis = cross(z_axis,x_axis)
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
x_axis = cross(y_axis,z_axis)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
L_radius = [x_axis,y_axis,z_axis]
# calculate wrist joint center for humerus
R_wristThickness = vsk['RightWristWidth']
L_wristThickness = vsk['LeftWristWidth']
R_wristThickness = (R_wristThickness / 2 + mm )
L_wristThickness = (L_wristThickness / 2 + mm )
RWJC = [RWRI[0]+R_wristThickness*R_radius[1][0],RWRI[1]+R_wristThickness*R_radius[1][1],RWRI[2]+R_wristThickness*R_radius[1][2]]
LWJC = [LWRI[0]-L_wristThickness*L_radius[1][0],LWRI[1]-L_wristThickness*L_radius[1][1],LWRI[2]-L_wristThickness*L_radius[1][2]]
# recombine the humerus axis
#right
z_axis = np.subtract(RSJC,REJC)
z_axis_div = norm2d(z_axis)
z_axis = [z_axis[0]/z_axis_div,z_axis[1]/z_axis_div,z_axis[2]/z_axis_div]
x_axis = np.subtract(RWJC,REJC)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
y_axis = cross(x_axis,z_axis)
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
x_axis = cross(y_axis,z_axis)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
# attach each calulcated elbow axis to elbow joint center.
x_axis = [x_axis[0]+REJC[0],x_axis[1]+REJC[1],x_axis[2]+REJC[2]]
y_axis = [y_axis[0]+REJC[0],y_axis[1]+REJC[1],y_axis[2]+REJC[2]]
z_axis = [z_axis[0]+REJC[0],z_axis[1]+REJC[1],z_axis[2]+REJC[2]]
R_axis = [x_axis,y_axis,z_axis]
# left
z_axis = np.subtract(LSJC,LEJC)
z_axis_div = norm2d(z_axis)
z_axis = [z_axis[0]/z_axis_div,z_axis[1]/z_axis_div,z_axis[2]/z_axis_div]
x_axis = np.subtract(LWJC,LEJC)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
y_axis = cross(x_axis,z_axis)
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
x_axis = cross(y_axis,z_axis)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
# attach each calulcated elbow axis to elbow joint center.
x_axis = [x_axis[0]+LEJC[0],x_axis[1]+LEJC[1],x_axis[2]+LEJC[2]]
y_axis = [y_axis[0]+LEJC[0],y_axis[1]+LEJC[1],y_axis[2]+LEJC[2]]
z_axis = [z_axis[0]+LEJC[0],z_axis[1]+LEJC[1],z_axis[2]+LEJC[2]]
L_axis = [x_axis,y_axis,z_axis]
axis = [R_axis,L_axis]
origin = [REJC,LEJC]
wrist_O = [RWJC,LWJC]
return [origin,axis,wrist_O]
def wristJointCenter(frame,shoulderJC,wand,elbowJC):
"""
Calculate the Wrist joint axis ( Radius) function.
Takes in a dictionary of x,y,z positions and marker names, as well as an index.
and takes the elbow axis and wand marker and then, shoulder joint center.
Calculate each wrist joint axis and returns it.
-------------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
elbowJC = [[R_elbow_JC_x, R_elbow_JC_y, R_elbow_JC_z],
[L_elbow_JC_x,L_elbow_JC_y,L_elbow_JC_z]
[[[R_elbow x axis, x,y,z position],
[R_elbow y axis, x,y,z position],
[R_elbow z axis, x,y,z position]],
[[L_elbow x axis, x,y,z position],
[L_elbow y axis, x,y,z position],
[L_elbow z axis, x,y,z position]]],
[R_wrist_JC_x, R_wrist_JC_y, R_wrist_JC_z],
[L_wrist_JC_x,L_wrist_JC_y,L_wrist_JC_z]]
shoulderJC = [[R shoulder joint center x,y,z position],
[L shoulder joint center x,y,z position]]
wand = [[R wand x,y,z, position],
[L wand x,y,z position]]
OUTPUT: Returns the Shoulder joint center and axis in three array
wrist_JC = [[R_wrist_JC_x, R_wrist_JC_y, R_wrist_JC_z],
[L_wrist_JC_x,L_wrist_JC_y,L_wrist_JC_z],
[[[R_wrist x axis, x,y,z position],
[R_wrist y axis, x,y,z position],
[R_wrist z axis, x,y,z position]],
[[L_wrist x axis, x,y,z position],
[L_wrist y axis, x,y,z position],
[L_wrist z axis, x,y,z position]]]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
frame : {'RSHO': [428.88496562, 270.552948, 1500.73010254],
'LSHO': [68.24668121, 269.01049805, 1510.1072998],
'RELB': [658.90338135, 326.07580566, 1285.28515625],
'LELB': [-156.32162476, 335.2593313, 1287.39916992],
'RWRA': [776.51898193,495.68103027, 1108.38464355],
'RWRB': [830.9072876, 436.75341797, 1119.11901855],
'LWRA': [-249.28146362, 525.32977295, 1117.09057617],
'LWRB': [-311.77532959, 477.22512817, 1125.1619873],...}
wand : [[255.92550222678443, 364.32269504976051, 1460.6297868417887],
[256.42380097331767, 364.27770361353487, 1460.6165849382387]]
shoulderJC :[array([429.66951995, 275.06718615, 1453.953978131]),
array([64.51952734, 274.93442161, 1463.6313334])]
elbowJC: [[array([633.66707587, 304.95542115, 1256.07799541]),
array([-129.1695218, 316.8671644, 1258.06440717])],
[[[633.81070138699954, 303.96579004975194, 1256.07658506845],
[634.35247991784638, 305.05386589332528, 1256.7994730142241],
[632.95321803901493, 304.85083190737765, 1256.7704317504911]],
[[-129.32391792749493, 315.88072913249465, 1258.0086629318362],
[-128.45117135279025, 316.79382333592832, 1257.37260287807],
[-128.49119037560905, 316.7203088419364, 1258.783373067024]]],
[[793.32814303250677, 451.29134788252043, 1084.4325513020426],
[-272.4594189740742, 485.80152210947699, 1091.3666238350822]]]
wristJointCenter(frame,shoulderJC,wand,elbowJC)
>>> [[[793.32814303250677, 451.29134788252043, 1084.4325513020426],
[-272.4594189740742, 485.80152210947699, 1091.3666238350822]],
[[[793.77133727961598, 450.44879187190122, 1084.1264823093322],
[794.01354707689597, 451.38979262469761, 1085.1540289034019],
[792.7503886251119, 450761812234714, 1085.0536727414069]],
[[-272.9250728167512, 485.01202418036871, 1090.9667994752267],
[-271.74106814470946, 485.72818104689361, 1090.6748195459295],
[-271.94256446383838, 485.1921666233502, 1091.967911874857]]]]
"""
# Bring Elbow joint center, axes and Wrist Joint Center for calculating Radius Axes
REJC = elbowJC[0][0]
LEJC = elbowJC[0][1]
R_elbow_axis = elbowJC[1][0]
L_elbow_axis = elbowJC[1][1]
R_elbow_flex = [R_elbow_axis[1][0]-REJC[0],R_elbow_axis[1][1]-REJC[1],R_elbow_axis[1][2]-REJC[2]]
L_elbow_flex = [L_elbow_axis[1][0]-LEJC[0],L_elbow_axis[1][1]-LEJC[1],L_elbow_axis[1][2]-LEJC[2]]
RWJC = elbowJC[2][0]
LWJC = elbowJC[2][1]
# this is the axis of radius
# right
y_axis = R_elbow_flex
y_axis = y_axis/ norm3d(y_axis)
z_axis = np.subtract(REJC,RWJC)
z_axis = z_axis/ norm3d(z_axis)
x_axis = cross(y_axis,z_axis)
x_axis = x_axis/ norm3d(x_axis)
z_axis = cross(x_axis,y_axis)
z_axis = z_axis/ norm3d(z_axis)
# Attach all the axes to wrist joint center.
x_axis = [x_axis[0]+RWJC[0],x_axis[1]+RWJC[1],x_axis[2]+RWJC[2]]
y_axis = [y_axis[0]+RWJC[0],y_axis[1]+RWJC[1],y_axis[2]+RWJC[2]]
z_axis = [z_axis[0]+RWJC[0],z_axis[1]+RWJC[1],z_axis[2]+RWJC[2]]
R_axis = [x_axis,y_axis,z_axis]
# left
y_axis = L_elbow_flex
y_axis = y_axis/ norm3d(y_axis)
z_axis = np.subtract(LEJC,LWJC)
z_axis = z_axis/ norm3d(z_axis)
x_axis = cross(y_axis,z_axis)
x_axis = x_axis/ norm3d(x_axis)
z_axis = cross(x_axis,y_axis)
z_axis = z_axis/ norm3d(z_axis)
# Attach all the axes to wrist joint center.
x_axis = [x_axis[0]+LWJC[0],x_axis[1]+LWJC[1],x_axis[2]+LWJC[2]]
y_axis = [y_axis[0]+LWJC[0],y_axis[1]+LWJC[1],y_axis[2]+LWJC[2]]
z_axis = [z_axis[0]+LWJC[0],z_axis[1]+LWJC[1],z_axis[2]+LWJC[2]]
L_axis = [x_axis,y_axis,z_axis]
origin = [RWJC,LWJC]
axis = [R_axis,L_axis]
return [origin,axis]
def handJointCenter(frame,elbowJC,wristJC,vsk=None):
"""
Calculate the Hand joint axis ( Hand) function.
Takes in a dictionary of x,y,z positions and marker names.
and takes the elbow axis and wrist axis.
Calculate each Hand joint axis and returns it.
-------------------------------------------------------------------------
INPUT: dictionaries of marker lists.
{ [], [], [] }
elbowJC = [[R_elbow_JC_x, R_elbow_JC_y, R_elbow_JC_z],
[L_elbow_JC_x,L_elbow_JC_y,L_elbow_JC_z]
[[[R_elbow x axis, x,y,z position],
[R_elbow y axis, x,y,z position],
[R_elbow z axis, x,y,z position]],
[[L_elbow x axis, x,y,z position],
[L_elbow y axis, x,y,z position],
[L_elbow z axis, x,y,z position]]],
[R_wrist_JC_x, R_wrist_JC_y, R_wrist_JC_z],
[L_wrist_JC_x,L_wrist_JC_y,L_wrist_JC_z]]
wrist_JC = [[R_wrist_JC_x, R_wrist_JC_y, R_wrist_JC_z],
[L_wrist_JC_x,L_wrist_JC_y,L_wrist_JC_z],
[[[R_wrist x axis, x,y,z position],
[R_wrist y axis, x,y,z position],
[R_wrist z axis, x,y,z position]],
[[L_wrist x axis, x,y,z position],
[L_wrist y axis, x,y,z position],
[L_wrist z axis, x,y,z position]]]]
OUTPUT: Returns the Shoulder joint center and axis in three array
hand_JC = [[R_hand_JC_x, R_hand_JC_y, R_hand_JC_z],
[L_hand_JC_x,L_hand_JC_y,L_hand_JC_z],
[[[R_hand x axis, x,y,z position],
[R_hand y axis, x,y,z position],
[R_hand z axis, x,y,z position]],
[[L_hand x axis, x,y,z position],
[L_hand y axis, x,y,z position],
[L_hand z axis, x,y,z position]]]]
MODIFIES: -
---------------------------------------------------------------------------
EXAMPLE:
i = 1
frame : {'RWRA': [776.51898193,495.68103027, 1108.38464355],
'RWRB': [830.9072876, 436.75341797, 1119.11901855],
'LWRA': [-249.28146362, 525.32977295, 1117.09057617],
'LWRB': [-311.77532959, 477.22512817, 1125.1619873],
'RFIN': [863.71374512, 524.4475708, 1074.54248047],
'LFIN': [-326.65890503, 558.34338379, 1091.04284668],...}
elbowJC: [[array([633.66707587, 304.95542115, 1256.07799541]),
array([-129.1695218, 316.8671644, 1258.06440717])],
[[[633.81070138699954, 303.96579004975194, 1256.07658506845],
[634.35247991784638, 305.05386589332528, 1256.7994730142241],
[632.95321803901493, 304.85083190737765, 1256.7704317504911]],
[[-129.32391792749493, 315.88072913249465, 1258.0086629318362],
[-128.45117135279025, 316.79382333592832, 1257.37260287807],
[-128.49119037560905, 316.7203088419364, 1258.783373067024]]],
[[793.32814303250677, 451.29134788252043, 1084.4325513020426],
[-272.4594189740742, 485.80152210947699, 1091.3666238350822]]]
wristJC: [[[793.32814303250677, 451.29134788252043, 1084.4325513020426],
[-272.4594189740742, 485.80152210947699, 1091.3666238350822]],
[[[793.77133727961598, 450.44879187190122, 1084.1264823093322],
[794.01354707689597, 451.38979262469761, 1085.1540289034019],
[792.7503886251119, 450761812234714, 1085.0536727414069]],
[[-272.9250728167512, 485.01202418036871, 1090.9667994752267],
[-271.74106814470946, 485.72818104689361, 1090.6748195459295],
[-271.94256446383838, 485.1921666233502, 1091.967911874857]]]]
handJointCenter(frame,elbowJC,wristJC,vsk=None)
>>> [[array([ 859.80614366, 517.28239823, 1051.97278944]),
array([ -324.53477798,551.88744289, 1068.02526837])],
[[[859.95675978677366, 517.5924123242138, 1052.9115152009197],
[859.7975673441467,517.96120458893165, 10518651606187454],
[859.13556419718725, 516.61673075295846, 1052.300218811959]],
[[-324.61994077156373, 552.15893308424972, 1068.9839343010813],
[-325.33293185347873, 551.29292486183851, 1068.1227296356121],
[-323.93837401348799, 551.13058003505967, 1068.2925901317217]]]]
"""
RWRA = frame['RWRA']
RWRB = frame['RWRB']
LWRA = frame['LWRA']
LWRB = frame['LWRB']
RFIN = frame['RFIN']
LFIN = frame['LFIN']
RWRI = [(RWRA[0]+RWRB[0])/2.0,(RWRA[1]+RWRB[1])/2.0,(RWRA[2]+RWRB[2])/2.0]
LWRI = [(LWRA[0]+LWRB[0])/2.0,(LWRA[1]+LWRB[1])/2.0,(LWRA[2]+LWRB[2])/2.0]
LWJC = wristJC[0][1]
RWJC = wristJC[0][0]
mm = 7.0
R_handThickness = vsk['RightHandThickness']
L_handThickness = vsk['LeftHandThickness']
R_delta =( R_handThickness/2 + mm )
L_delta =( L_handThickness/2 + mm )
LHND = findJointC(LWRI,LWJC,LFIN,L_delta)
RHND = findJointC(RWRI,RWJC,RFIN,R_delta)
# Left
z_axis = [LWJC[0]-LHND[0],LWJC[1]-LHND[1],LWJC[2]-LHND[2]]
z_axis_div = norm2d(z_axis)
z_axis = [z_axis[0]/z_axis_div,z_axis[1]/z_axis_div,z_axis[2]/z_axis_div]
y_axis = [LWRI[0]-LWRA[0],LWRI[1]-LWRA[1],LWRI[2]-LWRA[2]]
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
x_axis = cross(y_axis,z_axis)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
y_axis = cross(z_axis,x_axis)
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
L_axis = [x_axis,y_axis,z_axis]
# Right
z_axis = [RWJC[0]-RHND[0],RWJC[1]-RHND[1],RWJC[2]-RHND[2]]
z_axis_div = norm2d(z_axis)
z_axis = [z_axis[0]/z_axis_div,z_axis[1]/z_axis_div,z_axis[2]/z_axis_div]
y_axis = [RWRA[0]-RWRI[0],RWRA[1]-RWRI[1],RWRA[2]-RWRI[2]]
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
x_axis = cross(y_axis,z_axis)
x_axis_div = norm2d(x_axis)
x_axis = [x_axis[0]/x_axis_div,x_axis[1]/x_axis_div,x_axis[2]/x_axis_div]
y_axis = cross(z_axis,x_axis)
y_axis_div = norm2d(y_axis)
y_axis = [y_axis[0]/y_axis_div,y_axis[1]/y_axis_div,y_axis[2]/y_axis_div]
R_axis = [x_axis,y_axis,z_axis]
R_origin = RHND
L_origin = LHND
# Attach it to the origin.
L_axis = [[L_axis[0][0]+L_origin[0],L_axis[0][1]+L_origin[1],L_axis[0][2]+L_origin[2]],
[L_axis[1][0]+L_origin[0],L_axis[1][1]+L_origin[1],L_axis[1][2]+L_origin[2]],
[L_axis[2][0]+L_origin[0],L_axis[2][1]+L_origin[1],L_axis[2][2]+L_origin[2]]]
R_axis = [[R_axis[0][0]+R_origin[0],R_axis[0][1]+R_origin[1],R_axis[0][2]+R_origin[2]],
[R_axis[1][0]+R_origin[0],R_axis[1][1]+R_origin[1],R_axis[1][2]+R_origin[2]],
[R_axis[2][0]+R_origin[0],R_axis[2][1]+R_origin[1],R_axis[2][2]+R_origin[2]]]
origin = [R_origin, L_origin]
axis = [R_axis, L_axis]
return [origin,axis]
def findJointC(a, b, c, delta):
"""
Calculate the Joint Center function.
This function is based on physical markers, a,b,c and joint center which will be calulcated in this function are all in the same plane.
----------------------------------------------
INPUT: three marker x,y,z position of a, b, c.
and delta which is the length from marker to joint center.
OUTPUT: Joint C x,y,z position
[joint C x position, joint C y position, joint C z position]
MODIFIES: -
----------------------------------------------
EXAMPLE: INPUT: a = [468.14532471, 325.09780884, 673.12591553]
b = [355.90861996, 365.38260964, 940.6974861]
c = [452.35180664, 329.0609436, 524.77893066]
delta = 59.5
OUTPUT: c+r = [396.26807934, 347.78080454, 518.62778789]
"""
# make the two vector using 3 markers, which is on the same plane.
v1 = (a[0]-c[0],a[1]-c[1],a[2]-c[2])
v2 = (b[0]-c[0],b[1]-c[1],b[2]-c[2])
# v3 is cross vector of v1, v2
# and then it normalized.
# v3 = cross(v1,v2)
v3 = [v1[1]*v2[2] - v1[2]*v2[1],v1[2]*v2[0] - v1[0]*v2[2],v1[0]*v2[1] - v1[1]*v2[0]]
v3_div = norm2d(v3)
v3 = [v3[0]/v3_div,v3[1]/v3_div,v3[2]/v3_div]
m = [(b[0]+c[0])/2,(b[1]+c[1])/2,(b[2]+c[2])/2]
length = np.subtract(b,m)
length = norm2d(length)
theta = math.acos(delta/norm2d(v2))
cs = math.cos(theta*2)
sn = math.sin(theta*2)
ux = v3[0]
uy = v3[1]
uz = v3[2]
# this rotation matrix is called Rodriques' rotation formula.
# In order to make a plane, at least 3 number of markers is required which means three physical markers on the segment can make a plane.
# then the orthogonal vector of the plane will be rotating axis.
# joint center is determined by rotating the one vector of plane around rotating axis.
rot = np.matrix([[cs+ux**2.0*(1.0-cs),ux*uy*(1.0-cs)-uz*sn,ux*uz*(1.0-cs)+uy*sn],
[uy*ux*(1.0-cs)+uz*sn,cs+uy**2.0*(1.0-cs),uy*uz*(1.0-cs)-ux*sn],
[uz*ux*(1.0-cs)-uy*sn,uz*uy*(1.0-cs)+ux*sn,cs+uz**2.0*(1.0-cs)]])
r = rot*(np.matrix(v2).transpose())
r = r* length/np.linalg.norm(r)
r = [r[0,0],r[1,0],r[2,0]]
mr = np.array([r[0]+m[0],r[1]+m[1],r[2]+m[2]])
return mr
def cross(a, b):
c = [a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]]
return c
def getPelangle(axisP,axisD):
# this is the angle calculation which order is Y-X-Z
# alpha is abdcution angle.
# beta is flextion angle
# gamma is rotation angle
beta = np.arctan2(((axisD[2][0]*axisP[1][0])+(axisD[2][1]*axisP[1][1])+(axisD[2][2]*axisP[1][2])),
np.sqrt(pow(axisD[2][0]*axisP[0][0]+axisD[2][1]*axisP[0][1]+axisD[2][2]*axisP[0][2],2)+pow((axisD[2][0]*axisP[2][0]+axisD[2][1]*axisP[2][1]+axisD[2][2]*axisP[2][2]),2)))
alpha = np.arctan2(((axisD[2][0]*axisP[0][0])+(axisD[2][1]*axisP[0][1])+(axisD[2][2]*axisP[0][2])),((axisD[2][0]*axisP[2][0])+(axisD[2][1]*axisP[2][1])+(axisD[2][2]*axisP[2][2])))
gamma = np.arctan2(((axisD[0][0]*axisP[1][0])+(axisD[0][1]*axisP[1][1])+(axisD[0][2]*axisP[1][2])),((axisD[1][0]*axisP[1][0])+(axisD[1][1]*axisP[1][1])+(axisD[1][2]*axisP[1][2])))
alpha = 180.0 * alpha/ pi
beta = 180.0 * beta/ pi
gamma = 180.0 * gamma/ pi
angle = [alpha, beta, gamma]
return angle
def getHeadangle(axisP,axisD):
# this is the angle calculation which order is Y-X-Z
# alpha is abdcution angle.
ang=((-1*axisD[2][0]*axisP[1][0])+(-1*axisD[2][1]*axisP[1][1])+(-1*axisD[2][2]*axisP[1][2]))
alpha = np.nan
if -1<=ang<=1:
alpha = np.arcsin(ang)
# check the abduction angle is in the area between -pi/2 and pi/2
# beta is flextion angle
# gamma is rotation angle
beta = np.arctan2(((axisD[2][0]*axisP[1][0])+(axisD[2][1]*axisP[1][1])+(axisD[2][2]*axisP[1][2])),
np.sqrt(pow(axisD[0][0]*axisP[1][0]+axisD[0][1]*axisP[1][1]+axisD[0][2]*axisP[1][2],2)+pow((axisD[1][0]*axisP[1][0]+axisD[1][1]*axisP[1][1]+axisD[1][2]*axisP[1][2]),2)))
alpha = np.arctan2(-1*((axisD[2][0]*axisP[0][0])+(axisD[2][1]*axisP[0][1])+(axisD[2][2]*axisP[0][2])),((axisD[2][0]*axisP[2][0])+(axisD[2][1]*axisP[2][1])+(axisD[2][2]*axisP[2][2])))
gamma = np.arctan2(-1*((axisD[0][0]*axisP[1][0])+(axisD[0][1]*axisP[1][1])+(axisD[0][2]*axisP[1][2])),((axisD[1][0]*axisP[1][0])+(axisD[1][1]*axisP[1][1])+(axisD[1][2]*axisP[1][2])))
alpha = 180.0 * alpha/ pi
beta = 180.0 * beta/ pi
gamma = 180.0 * gamma/ pi
beta = -1*beta
if alpha <0:
alpha = alpha *-1
else:
if 0<alpha < 180:
alpha = 180+(180-alpha)
if gamma > 90.0:
if gamma >120:
gamma = (gamma - 180)*-1
else:
gamma = (gamma + 180)*-1
else:
if gamma <0:
gamma = (gamma + 180)*-1
else:
gamma = (gamma*-1)-180.0
angle = [alpha, beta, gamma]
return angle
def getangle_sho(axisP,axisD):
"""
Shoulder angle calculation function.
This function takes in two axis and returns three angles.
and It use inverse Euler rotation matrix in XYZ order.
the output shows the angle in degrees.
------------------------------------------------------
INPUT: each axis show the unit vector of axis.
axisP = [[axisP-x axis x,y,z position],
[axisP-y axis x,y,z position],
[axisP-z axis x,y,z position]]
axisD = [[axisD-x axis x,y,z position],
[axisD-y axis x,y,z position],
[axisD-z axis x,y,z position]]
OUTPUT: these angles are show on degree.
angle = [gamma,beta,alpha]
MODIFIES:
------------------------------------------------------
EXAMPLE:
INPUT:
axisP: [[ 0.0464229 0.99648672 0.06970743]
[ 0.99734011 -0.04231089 -0.05935067]
[-0.05619277 0.07227725 -0.99580037]]
axisD: [[-0.18067218 -0.98329158 -0.02225371]
[ 0.71383942 -0.1155303 -0.69071415]
[ 0.67660243 -0.1406784 0.7227854 ]]
OUTPUT:
angle: [-3.3474505645829722, -140.28662967555562, 172.50982144894826]
"""
# beta is flexion /extension
# gamma is adduction / abduction
# alpha is internal / external rotation
# this is shoulder angle calculation
alpha = np.arcsin(((axisD[2][0]*axisP[0][0])+(axisD[2][1]*axisP[0][1])+(axisD[2][2]*axisP[0][2])))
beta = np.arctan2(-1*((axisD[2][0]*axisP[1][0])+(axisD[2][1]*axisP[1][1])+(axisD[2][2]*axisP[1][2])) , ((axisD[2][0]*axisP[2][0])+(axisD[2][1]*axisP[2][1])+(axisD[2][2]*axisP[2][2])))
gamma = np.arctan2(-1*((axisD[1][0]*axisP[0][0])+(axisD[1][1]*axisP[0][1])+(axisD[1][2]*axisP[0][2])) , ((axisD[0][0]*axisP[0][0])+(axisD[0][1]*axisP[0][1])+(axisD[0][2]*axisP[0][2])))
angle = [180.0 * alpha/ pi, 180.0 *beta/ pi, 180.0 * gamma/ pi]
return angle
def getangle_spi(axisP,axisD):
"""
Spine angle calculation function.
This function takes in two axis and returns three angles.
and It use inverse Euler rotation matrix in XZX order.
the output shows the angle in degrees.
------------------------------------------------------
INPUT: each axis show the unit vector of axis.
axisP = [[axisP-x axis x,y,z position],
[axisP-y axis x,y,z position],
[axisP-z axis x,y,z position]]
axisD = [[axisD-x axis x,y,z position],
[axisD-y axis x,y,z position],
[axisD-z axis x,y,z position]]
OUTPUT: these angles are show on degree.
angle = [gamma,beta,alpha]
MODIFIES:
------------------------------------------------------
EXAMPLE:
INPUT:
axisP: [[ 0.0464229 0.99648672 0.06970743]
[ 0.99734011 -0.04231089 -0.05935067]
[-0.05619277 0.07227725 -0.99580037]]
axisD: [[-0.18067218 -0.98329158 -0.02225371]
[ 0.71383942 -0.1155303 -0.69071415]
[ 0.67660243 -0.1406784 0.7227854 ]]
OUTPUT:
angle: [-9.8208335778582327, -2.8188534655021193, -4.0537090802755111]
"""
# this angle calculation is for spine angle.
alpha = np.arcsin(((axisD[1][0]*axisP[2][0])+(axisD[1][1]*axisP[2][1])+(axisD[1][2]*axisP[2][2])))
gamma = np.arcsin(((-1*axisD[1][0]*axisP[0][0])+(-1*axisD[1][1]*axisP[0][1])+(-1*axisD[1][2]*axisP[0][2])) / np.cos(alpha))
beta = np.arcsin(((-1*axisD[0][0]*axisP[2][0])+(-1*axisD[0][1]*axisP[2][1])+(-1*axisD[0][2]*axisP[2][2])) / np.cos(alpha))
angle = [180.0 * beta/ pi, 180.0 *gamma/ pi, 180.0 * alpha/ pi]
return angle
def getangle(axisP,axisD):
"""
Normal angle calculation function.
This function takes in two axis and returns three angles.
and It use inverse Euler rotation matrix in YXZ order.
the output shows the angle in degrees.
As we use arc sin we have to care about if the angle is in area between -pi/2 to pi/2
------------------------------------------------------
INPUT: each axis show the unit vector of axis.
axisP = [[axisP-x axis x,y,z position],
[axisP-y axis x,y,z position],
[axisP-z axis x,y,z position]]
axisD = [[axisD-x axis x,y,z position],
[axisD-y axis x,y,z position],
[axisD-z axis x,y,z position]]
OUTPUT: these angles are show on degree.
angle = [gamma,beta,alpha]
MODIFIES:
------------------------------------------------------
EXAMPLE:
INPUT:
axisP: [[ 0.0464229 0.99648672 0.06970743]
[ 0.99734011 -0.04231089 -0.05935067]
[-0.05619277 0.07227725 -0.99580037]]
axisD: [[-0.18067218 -0.98329158 -0.02225371]
[ 0.71383942 -0.1155303 -0.69071415]
[ 0.67660243 -0.1406784 0.7227854 ]]
OUTPUT:
angle: [37.141616013785963, -9.5416640443905497e-13, 89.999999999997684]
"""
# this is the angle calculation which order is Y-X-Z
# alpha is abdcution angle.
ang=((-1*axisD[2][0]*axisP[1][0])+(-1*axisD[2][1]*axisP[1][1])+(-1*axisD[2][2]*axisP[1][2]))
alpha = np.nan
if -1<=ang<=1:
# alpha = np.arcsin(ang)
alpha = np.arcsin(ang)
# check the abduction angle is in the area between -pi/2 and pi/2
# beta is flextion angle
# gamma is rotation angle
if -1.57079633<alpha<1.57079633:
beta = np.arctan2(((axisD[2][0]*axisP[0][0])+(axisD[2][1]*axisP[0][1])+(axisD[2][2]*axisP[0][2])) , ((axisD[2][0]*axisP[2][0])+(axisD[2][1]*axisP[2][1])+(axisD[2][2]*axisP[2][2])))
gamma = np.arctan2(((axisD[1][0]*axisP[1][0])+(axisD[1][1]*axisP[1][1])+(axisD[1][2]*axisP[1][2])) , ((axisD[0][0]*axisP[1][0])+(axisD[0][1]*axisP[1][1])+(axisD[0][2]*axisP[1][2])))
else:
beta = np.arctan2(-1*((axisD[2][0]*axisP[0][0])+(axisD[2][1]*axisP[0][1])+(axisD[2][2]*axisP[0][2])) , ((axisD[2][0]*axisP[2][0])+(axisD[2][1]*axisP[2][1])+(axisD[2][2]*axisP[2][2])))
gamma = np.arctan2(-1*((axisD[1][0]*axisP[1][0])+(axisD[1][1]*axisP[1][1])+(axisD[1][2]*axisP[1][2])) , ((axisD[0][0]*axisP[1][0])+(axisD[0][1]*axisP[1][1])+(axisD[0][2]*axisP[1][2])))
angle = [180.0 * beta/ math.pi, 180.0 *alpha/ math.pi, 180.0 * gamma / math.pi ]
return angle
def norm2d(v):
try:
return sqrt((v[0]*v[0]+v[1]*v[1]+v[2]*v[2]))
except:
return np.nan
def norm3d(v):
try:
return np.asarray(sqrt((v[0]*v[0]+v[1]*v[1]+v[2]*v[2])))
except:
return np.nan
def normDiv(v):
try:
vec = sqrt((v[0]*v[0]+v[1]*v[1]+v[2]*v[2]))
v = [v[0]/vec,v[1]/vec,v[2]/vec]
except:
vec = np.nan
return [v[0]/vec,v[1]/vec,v[2]/vec]
def matrixmult (A, B):
C = [[0 for row in range(len(A))] for col in range(len(B[0]))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
C[i][j] += A[i][k]*B[k][j]
return C
def JointAngleCalc(frame,vsk):
"""
Calculates the Joint angles of plugingait and stores the data in array
Stores
RPel_angle = []
LPel_angle = []
RHip_angle = []
LHip_angle = []
RKnee_angle = []
LKnee_angle = []
RAnkle_angle = []
LAnkle_angle = []
Joint Axis store like below form
Basically, the axis form is like [[origin],[axis]]
So, there's origin which define the position of axis
and there's Unit vector of each axis which is attach to the origin.
If it is just single one (Pelvis, Hip, Head, Thorax)
Axis = [[origin_x, origin_y, origin_z],[[Xaxis_x,Xaxis_y,Xaxis_z],
[Yaxis_x,Yaxis_y,Yaxis_z],
[Zaxis_x,Zaxis_y,Zaxis_z]]]
If it has both of Right and Left ( knee, angle, foot, clavicle, humerus, radius, hand)
Axis = [[[R_origin_x,R_origin_y,R_origin_z],
[L_origin_x,L_origin_y,L_origin_z]],[[[R_Xaxis_x,R_Xaxis_y,R_Xaxis_z],
[R_Yaxis_x,R_Yaxis_y,R_Yaxis_z],
[R_Zaxis_x,R_Zaxis_y,R_Zaxis_z]],
[[L_Xaxis_x,L_Xaxis_y,L_Xaxis_z],
[L_Yaxis_x,L_Yaxis_y,L_Yaxis_z],
[L_Zaxis_x,L_Zaxis_y,L_Zaxis_z]]]]
"""
# THIS IS FOOT PROGRESS ANGLE
rfoot_prox,rfoot_proy,rfoot_proz,lfoot_prox,lfoot_proy,lfoot_proz = [None]*6
#First Calculate Pelvis
pelvis_axis = pelvisJointCenter(frame)
#change to same format
Pelvis_axis_form = pelvis_axis[1]
Pelvis_center_form = pelvis_axis[0]
Global_axis_form = [[0,1,0],[-1,0,0],[0,0,1]]
Global_center_form = [0,0,0]
#make the array which will be the input of findangle function
pelvis_Axis_mod = np.vstack([np.subtract(Pelvis_axis_form[0],Pelvis_center_form),
np.subtract(Pelvis_axis_form[1],Pelvis_center_form),
np.subtract(Pelvis_axis_form[2],Pelvis_center_form)])
global_Axis = np.vstack([np.subtract(Global_axis_form[0],Global_center_form),
np.subtract(Global_axis_form[1],Global_center_form),
np.subtract(Global_axis_form[2],Global_center_form)])
global_pelvis_angle = getangle(global_Axis,pelvis_Axis_mod)
pelx=global_pelvis_angle[0]
pely=global_pelvis_angle[1]*-1
pelz=global_pelvis_angle[2]*-1+90
# and then find hip JC
hip_JC = hipJointCenter(frame,pelvis_axis[0],pelvis_axis[1][0],pelvis_axis[1][1],pelvis_axis[1][2],vsk=vsk)
hip_axis = hipAxisCenter(hip_JC[0],hip_JC[1],pelvis_axis)
knee_JC = kneeJointCenter(frame,hip_JC,0,vsk=vsk)
#change to same format
Hip_axis_form = hip_axis[1]
Hip_center_form = hip_axis[0]
R_Knee_axis_form = knee_JC[2][0]
R_Knee_center_form = knee_JC[0]
L_Knee_axis_form = knee_JC[2][1]
L_Knee_center_form = knee_JC[1]
#make the array which will be the input of findangle function
hip_Axis = np.vstack([np.subtract(Hip_axis_form[0],Hip_center_form),
np.subtract(Hip_axis_form[1],Hip_center_form),
np.subtract(Hip_axis_form[2],Hip_center_form)])
R_knee_Axis = np.vstack([np.subtract(R_Knee_axis_form[0],R_Knee_center_form),
np.subtract(R_Knee_axis_form[1],R_Knee_center_form),
np.subtract(R_Knee_axis_form[2],R_Knee_center_form)])
L_knee_Axis = np.vstack([np.subtract(L_Knee_axis_form[0],L_Knee_center_form),
np.subtract(L_Knee_axis_form[1],L_Knee_center_form),
np.subtract(L_Knee_axis_form[2],L_Knee_center_form)])
R_pelvis_knee_angle = getangle(hip_Axis,R_knee_Axis)
L_pelvis_knee_angle = getangle(hip_Axis,L_knee_Axis)
rhipx=R_pelvis_knee_angle[0]*-1
rhipy=R_pelvis_knee_angle[1]
rhipz=R_pelvis_knee_angle[2]*-1+90
lhipx=L_pelvis_knee_angle[0]*-1
lhipy=L_pelvis_knee_angle[1]*-1
lhipz=L_pelvis_knee_angle[2]-90
ankle_JC = ankleJointCenter(frame,knee_JC,0,vsk=vsk)
#change to same format
R_Ankle_axis_form = ankle_JC[2][0]
R_Ankle_center_form = ankle_JC[0]
L_Ankle_axis_form = ankle_JC[2][1]
L_Ankle_center_form = ankle_JC[1]
#make the array which will be the input of findangle function
# In case of knee axis I mentioned it before as R_knee_Axis and L_knee_Axis
R_ankle_Axis = np.vstack([np.subtract(R_Ankle_axis_form[0],R_Ankle_center_form),
np.subtract(R_Ankle_axis_form[1],R_Ankle_center_form),
np.subtract(R_Ankle_axis_form[2],R_Ankle_center_form)])
L_ankle_Axis = np.vstack([np.subtract(L_Ankle_axis_form[0],L_Ankle_center_form),
np.subtract(L_Ankle_axis_form[1],L_Ankle_center_form),
np.subtract(L_Ankle_axis_form[2],L_Ankle_center_form)])
R_knee_ankle_angle = getangle(R_knee_Axis,R_ankle_Axis)
L_knee_ankle_angle = getangle(L_knee_Axis,L_ankle_Axis)
rkneex=R_knee_ankle_angle[0]
rkneey=R_knee_ankle_angle[1]
rkneez=R_knee_ankle_angle[2]*-1+90
lkneex=L_knee_ankle_angle[0]
lkneey=L_knee_ankle_angle[1]*-1
lkneez=L_knee_ankle_angle[2] - 90
# ANKLE ANGLE
offset = 0
foot_JC = footJointCenter(frame,vsk,ankle_JC,knee_JC,offset)
# Change to same format
R_Foot_axis_form = foot_JC[2][0]
R_Foot_center_form = foot_JC[0]
L_Foot_axis_form = foot_JC[2][1]
L_Foot_center_form = foot_JC[1]
R_foot_Axis = np.vstack([np.subtract(R_Foot_axis_form[0],R_Foot_center_form),
np.subtract(R_Foot_axis_form[1],R_Foot_center_form),
np.subtract(R_Foot_axis_form[2],R_Foot_center_form)])
L_foot_Axis = np.vstack([np.subtract(L_Foot_axis_form[0],L_Foot_center_form),
np.subtract(L_Foot_axis_form[1],L_Foot_center_form),
np.subtract(L_Foot_axis_form[2],L_Foot_center_form)])
R_ankle_foot_angle = getangle(R_ankle_Axis,R_foot_Axis)
L_ankle_foot_angle = getangle(L_ankle_Axis,L_foot_Axis)
ranklex=R_ankle_foot_angle[0]*-1-90
rankley=R_ankle_foot_angle[2]*-1+90
ranklez=R_ankle_foot_angle[1]
lanklex=L_ankle_foot_angle[0]*(-1)-90
lankley=L_ankle_foot_angle[2]-90
lanklez=L_ankle_foot_angle[1]*(-1)
# ABSOLUTE FOOT ANGLE
R_global_foot_angle = getangle(global_Axis,R_foot_Axis)
L_global_foot_angle = getangle(global_Axis,L_foot_Axis)
rfootx=R_global_foot_angle[0]
rfooty=R_global_foot_angle[2]-90
rfootz=R_global_foot_angle[1]
lfootx=L_global_foot_angle[0]
lfooty=(L_global_foot_angle[2]-90)*-1
lfootz=L_global_foot_angle[1]*-1
#First Calculate HEAD
head_axis = headJC(frame,vsk=vsk)
#change to same format
Head_axis_form = head_axis[0]
Head_center_form = head_axis[1]
Global_axis_form = [[0,1,0],[-1,0,0],[0,0,1]]
Global_center_form = [0,0,0]
#make the array which will be the input of findangle function
head_Axis_mod = np.vstack([np.subtract(Head_axis_form[0],Head_center_form),
np.subtract(Head_axis_form[1],Head_center_form),
np.subtract(Head_axis_form[2],Head_center_form)])
global_Axis = np.vstack([np.subtract(Global_axis_form[0],Global_center_form),
np.subtract(Global_axis_form[1],Global_center_form),
np.subtract(Global_axis_form[2],Global_center_form)])
global_head_angle = getHeadangle(global_Axis,head_Axis_mod)
headx=global_head_angle[0]*-1
if headx <-180:
headx = headx+360
heady=global_head_angle[1]*-1
headz=global_head_angle[2]+180
if headz <-180:
headz = headz-360
# Calculate THORAX
thorax_axis = thoraxJC(frame)
# Change to same format
Thorax_axis_form = thorax_axis[0]
Thorax_center_form = thorax_axis[1]
Global_axis_form = [[0,1,0],[-1,0,0],[0,0,1]]
Global_center_form = [0,0,0]
#make the array which will be the input of findangle function
thorax_Axis_mod = np.vstack([np.subtract(Thorax_axis_form[0],Thorax_center_form),
np.subtract(Thorax_axis_form[1],Thorax_center_form),
np.subtract(Thorax_axis_form[2],Thorax_center_form)])
global_Axis = np.vstack([np.subtract(Global_axis_form[0],Global_center_form),
np.subtract(Global_axis_form[1],Global_center_form),
np.subtract(Global_axis_form[2],Global_center_form)])
global_thorax_angle = getangle(global_Axis,thorax_Axis_mod)
if global_thorax_angle[0] > 0:
global_thorax_angle[0] = global_thorax_angle[0] - 180
elif global_thorax_angle[0] < 0:
global_thorax_angle[0] = global_thorax_angle[0] + 180
thox=global_thorax_angle[0]
thoy=global_thorax_angle[1]
thoz=global_thorax_angle[2]+90
# Calculate NECK
head_thorax_angle = getHeadangle(head_Axis_mod,thorax_Axis_mod)
neckx=(head_thorax_angle[0]-180)*-1
necky=head_thorax_angle[1]
neckz=head_thorax_angle[2]*-1
# Calculate SPINE
pel_tho_angle = getangle_spi(pelvis_Axis_mod,thorax_Axis_mod)
spix=pel_tho_angle[0]
spiy=pel_tho_angle[2]*-1
spiz=pel_tho_angle[1]
# Calculate SHOULDER
wand = findwandmarker(frame,thorax_axis)
shoulder_JC = findshoulderJC(frame,thorax_axis,wand,vsk=vsk)
shoulder_axis = shoulderAxisCalc(frame,thorax_axis,shoulder_JC,wand)
humerus_JC = elbowJointCenter(frame,thorax_axis,shoulder_JC,wand,vsk=vsk)
# Change to same format
R_Clavicle_axis_form = shoulder_axis[1][0]
L_Clavicle_axis_form = shoulder_axis[1][1]
R_Clavicle_center_form = shoulder_axis[0][0]
L_Clavicle_center_form = shoulder_axis[0][1]
# Change to same format
R_Humerus_axis_form = humerus_JC[1][0]
L_Humerus_axis_form = humerus_JC[1][1]
R_Humerus_center_form = humerus_JC[0][0]
L_Humerus_center_form = humerus_JC[0][1]
# make the array which will be the input of findangle function
R_humerus_Axis_mod = np.vstack([np.subtract(R_Humerus_axis_form[0],R_Humerus_center_form),
np.subtract(R_Humerus_axis_form[1],R_Humerus_center_form),
np.subtract(R_Humerus_axis_form[2],R_Humerus_center_form)])
L_humerus_Axis_mod = np.vstack([np.subtract(L_Humerus_axis_form[0],L_Humerus_center_form),
np.subtract(L_Humerus_axis_form[1],L_Humerus_center_form),
np.subtract(L_Humerus_axis_form[2],L_Humerus_center_form)])
R_thorax_shoulder_angle = getangle_sho(thorax_Axis_mod,R_humerus_Axis_mod)
L_thorax_shoulder_angle = getangle_sho(thorax_Axis_mod,L_humerus_Axis_mod)
if R_thorax_shoulder_angle[2] < 0:
R_thorax_shoulder_angle[2]=R_thorax_shoulder_angle[2]+180
elif R_thorax_shoulder_angle[2] >0:
R_thorax_shoulder_angle[2] = R_thorax_shoulder_angle[2]-180
if R_thorax_shoulder_angle[1] > 0:
R_thorax_shoulder_angle[1] = R_thorax_shoulder_angle[1]-180
elif R_thorax_shoulder_angle[1] <0:
R_thorax_shoulder_angle[1] = R_thorax_shoulder_angle[1]*-1-180
if L_thorax_shoulder_angle[1] < 0:
L_thorax_shoulder_angle[1]=L_thorax_shoulder_angle[1]+180
elif L_thorax_shoulder_angle[1] >0:
L_thorax_shoulder_angle[1] = L_thorax_shoulder_angle[1]-180
rshox=R_thorax_shoulder_angle[0]*-1
rshoy=R_thorax_shoulder_angle[1]*-1
rshoz=R_thorax_shoulder_angle[2]
lshox=L_thorax_shoulder_angle[0]*-1
lshoy=L_thorax_shoulder_angle[1]
lshoz=(L_thorax_shoulder_angle[2]-180)*-1
if lshoz >180:
lshoz = lshoz - 360
# Calculate ELBOW
radius_JC = wristJointCenter(frame,shoulder_JC,wand,humerus_JC)
# Change to same format
R_Radius_axis_form = radius_JC[1][0]
L_Radius_axis_form = radius_JC[1][1]
R_Radius_center_form = radius_JC[0][0]
L_Radius_center_form = radius_JC[0][1]
# make the array which will be the input of findangle function
R_radius_Axis_mod = np.vstack([np.subtract(R_Radius_axis_form[0],R_Radius_center_form),
np.subtract(R_Radius_axis_form[1],R_Radius_center_form),
np.subtract(R_Radius_axis_form[2],R_Radius_center_form)])
L_radius_Axis_mod = np.vstack([np.subtract(L_Radius_axis_form[0],L_Radius_center_form),
np.subtract(L_Radius_axis_form[1],L_Radius_center_form),
np.subtract(L_Radius_axis_form[2],L_Radius_center_form)])
R_humerus_radius_angle = getangle(R_humerus_Axis_mod,R_radius_Axis_mod)
L_humerus_radius_angle = getangle(L_humerus_Axis_mod,L_radius_Axis_mod)
relbx=R_humerus_radius_angle[0]
relby=R_humerus_radius_angle[1]
relbz=R_humerus_radius_angle[2]-90.0
lelbx=L_humerus_radius_angle[0]
lelby=L_humerus_radius_angle[1]
lelbz=L_humerus_radius_angle[2]-90.0
# Calculate WRIST
hand_JC = handJointCenter(frame,humerus_JC,radius_JC,vsk=vsk)
# Change to same format
R_Hand_axis_form = hand_JC[1][0]
L_Hand_axis_form = hand_JC[1][1]
R_Hand_center_form = hand_JC[0][0]
L_Hand_center_form = hand_JC[0][1]
# make the array which will be the input of findangle function
R_hand_Axis_mod = np.vstack([np.subtract(R_Hand_axis_form[0],R_Hand_center_form),
np.subtract(R_Hand_axis_form[1],R_Hand_center_form),
np.subtract(R_Hand_axis_form[2],R_Hand_center_form)])
L_hand_Axis_mod = np.vstack([np.subtract(L_Hand_axis_form[0],L_Hand_center_form),
np.subtract(L_Hand_axis_form[1],L_Hand_center_form),
np.subtract(L_Hand_axis_form[2],L_Hand_center_form)])
R_radius_hand_angle = getangle(R_radius_Axis_mod,R_hand_Axis_mod)
L_radius_hand_angle = getangle(L_radius_Axis_mod,L_hand_Axis_mod)
rwrtx=R_radius_hand_angle[0]
rwrty=R_radius_hand_angle[1]
rwrtz=R_radius_hand_angle[2]*-1 + 90
lwrtx=L_radius_hand_angle[0]
lwrty=L_radius_hand_angle[1]*-1
lwrtz=L_radius_hand_angle[2]-90
if lwrtz < -180:
lwrtz = lwrtz + 360
# make each axis as same format to store
# Pelvis
# origin
pel_origin = Pelvis_center_form
pel_ox=pel_origin[0]
pel_oy=pel_origin[1]
pel_oz=pel_origin[2]
# xaxis
pel_x_axis = Pelvis_axis_form[0]
pel_xx=pel_x_axis[0]
pel_xy=pel_x_axis[1]
pel_xz=pel_x_axis[2]
# yaxis
pel_y_axis = Pelvis_axis_form[1]
pel_yx=pel_y_axis[0]
pel_yy=pel_y_axis[1]
pel_yz=pel_y_axis[2]
# zaxis
pel_z_axis = Pelvis_axis_form[2]
pel_zx=pel_z_axis[0]
pel_zy=pel_z_axis[1]
pel_zz=pel_z_axis[2]
# Hip
# origin
hip_origin = Hip_center_form
hip_ox=hip_origin[0]
hip_oy=hip_origin[1]
hip_oz=hip_origin[2]
# xaxis
hip_x_axis = Hip_axis_form[0]
hip_xx=hip_x_axis[0]
hip_xy=hip_x_axis[1]
hip_xz=hip_x_axis[2]
# yaxis
hip_y_axis = Hip_axis_form[1]
hip_yx=hip_y_axis[0]
hip_yy=hip_y_axis[1]
hip_yz=hip_y_axis[2]
# zaxis
hip_z_axis = Hip_axis_form[2]
hip_zx=hip_z_axis[0]
hip_zy=hip_z_axis[1]
hip_zz=hip_z_axis[2]
# R KNEE
# origin
rknee_origin = R_Knee_center_form
rknee_ox=rknee_origin[0]
rknee_oy=rknee_origin[1]
rknee_oz=rknee_origin[2]
# xaxis
rknee_x_axis = R_Knee_axis_form[0]
rknee_xx=rknee_x_axis[0]
rknee_xy=rknee_x_axis[1]
rknee_xz=rknee_x_axis[2]
# yaxis
rknee_y_axis = R_Knee_axis_form[1]
rknee_yx=rknee_y_axis[0]
rknee_yy=rknee_y_axis[1]
rknee_yz=rknee_y_axis[2]
# zaxis
rknee_z_axis = R_Knee_axis_form[2]
rknee_zx=rknee_z_axis[0]
rknee_zy=rknee_z_axis[1]
rknee_zz=rknee_z_axis[2]
# L KNEE
# origin
lknee_origin = L_Knee_center_form
lknee_ox=lknee_origin[0]
lknee_oy=lknee_origin[1]
lknee_oz=lknee_origin[2]
# xaxis
lknee_x_axis = L_Knee_axis_form[0]
lknee_xx=lknee_x_axis[0]
lknee_xy=lknee_x_axis[1]
lknee_xz=lknee_x_axis[2]
# yaxis
lknee_y_axis = L_Knee_axis_form[1]
lknee_yx=lknee_y_axis[0]
lknee_yy=lknee_y_axis[1]
lknee_yz=lknee_y_axis[2]
# zaxis
lknee_z_axis = L_Knee_axis_form[2]
lknee_zx=lknee_z_axis[0]
lknee_zy=lknee_z_axis[1]
lknee_zz=lknee_z_axis[2]
# R ANKLE
# origin
rank_origin = R_Ankle_center_form
rank_ox=rank_origin[0]
rank_oy=rank_origin[1]
rank_oz=rank_origin[2]
# xaxis
rank_x_axis = R_Ankle_axis_form[0]
rank_xx=rank_x_axis[0]
rank_xy=rank_x_axis[1]
rank_xz=rank_x_axis[2]
# yaxis
rank_y_axis = R_Ankle_axis_form[1]
rank_yx=rank_y_axis[0]
rank_yy=rank_y_axis[1]
rank_yz=rank_y_axis[2]
# zaxis
rank_z_axis = R_Ankle_axis_form[2]
rank_zx=rank_z_axis[0]
rank_zy=rank_z_axis[1]
rank_zz=rank_z_axis[2]
# L ANKLE
# origin
lank_origin = L_Ankle_center_form
lank_ox=lank_origin[0]
lank_oy=lank_origin[1]
lank_oz=lank_origin[2]
# xaxis
lank_x_axis = L_Ankle_axis_form[0]
lank_xx=lank_x_axis[0]
lank_xy=lank_x_axis[1]
lank_xz=lank_x_axis[2]
# yaxis
lank_y_axis = L_Ankle_axis_form[1]
lank_yx=lank_y_axis[0]
lank_yy=lank_y_axis[1]
lank_yz=lank_y_axis[2]
# zaxis
lank_z_axis = L_Ankle_axis_form[2]
lank_zx=lank_z_axis[0]
lank_zy=lank_z_axis[1]
lank_zz=lank_z_axis[2]
# R FOOT
# origin
rfoot_origin = R_Foot_center_form
rfoot_ox=rfoot_origin[0]
rfoot_oy=rfoot_origin[1]
rfoot_oz=rfoot_origin[2]
# xaxis
rfoot_x_axis = R_Foot_axis_form[0]
rfoot_xx=rfoot_x_axis[0]
rfoot_xy=rfoot_x_axis[1]
rfoot_xz=rfoot_x_axis[2]
# yaxis
rfoot_y_axis = R_Foot_axis_form[1]
rfoot_yx=rfoot_y_axis[0]
rfoot_yy=rfoot_y_axis[1]
rfoot_yz=rfoot_y_axis[2]
# zaxis
rfoot_z_axis = R_Foot_axis_form[2]
rfoot_zx=rfoot_z_axis[0]
rfoot_zy=rfoot_z_axis[1]
rfoot_zz=rfoot_z_axis[2]
# L FOOT
# origin
lfoot_origin = L_Foot_center_form
lfoot_ox=lfoot_origin[0]
lfoot_oy=lfoot_origin[1]
lfoot_oz=lfoot_origin[2]
# xaxis
lfoot_x_axis = L_Foot_axis_form[0]
lfoot_xx=lfoot_x_axis[0]
lfoot_xy=lfoot_x_axis[1]
lfoot_xz=lfoot_x_axis[2]
# yaxis
lfoot_y_axis = L_Foot_axis_form[1]
lfoot_yx=lfoot_y_axis[0]
lfoot_yy=lfoot_y_axis[1]
lfoot_yz=lfoot_y_axis[2]
# zaxis
lfoot_z_axis = L_Foot_axis_form[2]
lfoot_zx=lfoot_z_axis[0]
lfoot_zy=lfoot_z_axis[1]
lfoot_zz=lfoot_z_axis[2]
# HEAD
# origin
head_origin = Head_center_form
head_ox=head_origin[0]
head_oy=head_origin[1]
head_oz=head_origin[2]
# xaxis
head_x_axis = Head_axis_form[0]
head_xx=head_x_axis[0]
head_xy=head_x_axis[1]
head_xz=head_x_axis[2]
# yaxis
head_y_axis = Head_axis_form[1]
head_yx=head_y_axis[0]
head_yy=head_y_axis[1]
head_yz=head_y_axis[2]
# zaxis
head_z_axis = Head_axis_form[2]
head_zx=head_z_axis[0]
head_zy=head_z_axis[1]
head_zz=head_z_axis[2]
# THORAX
# origin
tho_origin = Thorax_center_form
tho_ox=tho_origin[0]
tho_oy=tho_origin[1]
tho_oz=tho_origin[2]
# xaxis
tho_x_axis = Thorax_axis_form[0]
tho_xx=tho_x_axis[0]
tho_xy=tho_x_axis[1]
tho_xz=tho_x_axis[2]
# yaxis
tho_y_axis = Thorax_axis_form[1]
tho_yx=tho_y_axis[0]
tho_yy=tho_y_axis[1]
tho_yz=tho_y_axis[2]
# zaxis
tho_z_axis = Thorax_axis_form[2]
tho_zx=tho_z_axis[0]
tho_zy=tho_z_axis[1]
tho_zz=tho_z_axis[2]
# R CLAVICLE
# origin
rclav_origin = R_Clavicle_center_form
rclav_ox=rclav_origin[0]
rclav_oy=rclav_origin[1]
rclav_oz=rclav_origin[2]
# xaxis
rclav_x_axis = R_Clavicle_axis_form[0]
rclav_xx=rclav_x_axis[0]
rclav_xy=rclav_x_axis[1]
rclav_xz=rclav_x_axis[2]
# yaxis
rclav_y_axis = R_Clavicle_axis_form[1]
rclav_yx=rclav_y_axis[0]
rclav_yy=rclav_y_axis[1]
rclav_yz=rclav_y_axis[2]
# zaxis
rclav_z_axis = R_Clavicle_axis_form[2]
rclav_zx=rclav_z_axis[0]
rclav_zy=rclav_z_axis[1]
rclav_zz=rclav_z_axis[2]
# L CLAVICLE
# origin
lclav_origin = L_Clavicle_center_form
lclav_ox=lclav_origin[0]
lclav_oy=lclav_origin[1]
lclav_oz=lclav_origin[2]
# xaxis
lclav_x_axis = L_Clavicle_axis_form[0]
lclav_xx=lclav_x_axis[0]
lclav_xy=lclav_x_axis[1]
lclav_xz=lclav_x_axis[2]
# yaxis
lclav_y_axis = L_Clavicle_axis_form[1]
lclav_yx=lclav_y_axis[0]
lclav_yy=lclav_y_axis[1]
lclav_yz=lclav_y_axis[2]
# zaxis
lclav_z_axis = L_Clavicle_axis_form[2]
lclav_zx=lclav_z_axis[0]
lclav_zy=lclav_z_axis[1]
lclav_zz=lclav_z_axis[2]
# R HUMERUS
# origin
rhum_origin = R_Humerus_center_form
rhum_ox=rhum_origin[0]
rhum_oy=rhum_origin[1]
rhum_oz=rhum_origin[2]
# xaxis
rhum_x_axis = R_Humerus_axis_form[0]
rhum_xx=rhum_x_axis[0]
rhum_xy=rhum_x_axis[1]
rhum_xz=rhum_x_axis[2]
# yaxis
rhum_y_axis = R_Humerus_axis_form[1]
rhum_yx=rhum_y_axis[0]
rhum_yy=rhum_y_axis[1]
rhum_yz=rhum_y_axis[2]
# zaxis
rhum_z_axis = R_Humerus_axis_form[2]
rhum_zx=rhum_z_axis[0]
rhum_zy=rhum_z_axis[1]
rhum_zz=rhum_z_axis[2]
# L HUMERUS
# origin
lhum_origin = L_Humerus_center_form
lhum_ox=lhum_origin[0]
lhum_oy=lhum_origin[1]
lhum_oz=lhum_origin[2]
# xaxis
lhum_x_axis = L_Humerus_axis_form[0]
lhum_xx=lhum_x_axis[0]
lhum_xy=lhum_x_axis[1]
lhum_xz=lhum_x_axis[2]
# yaxis
lhum_y_axis = L_Humerus_axis_form[1]
lhum_yx=lhum_y_axis[0]
lhum_yy=lhum_y_axis[1]
lhum_yz=lhum_y_axis[2]
# zaxis
lhum_z_axis = L_Humerus_axis_form[2]
lhum_zx=lhum_z_axis[0]
lhum_zy=lhum_z_axis[1]
lhum_zz=lhum_z_axis[2]
# R RADIUS
# origin
rrad_origin = R_Radius_center_form
rrad_ox=rrad_origin[0]
rrad_oy=rrad_origin[1]
rrad_oz=rrad_origin[2]
# xaxis
rrad_x_axis = R_Radius_axis_form[0]
rrad_xx=rrad_x_axis[0]
rrad_xy=rrad_x_axis[1]
rrad_xz=rrad_x_axis[2]
# yaxis
rrad_y_axis = R_Radius_axis_form[1]
rrad_yx=rrad_y_axis[0]
rrad_yy=rrad_y_axis[1]
rrad_yz=rrad_y_axis[2]
# zaxis
rrad_z_axis = R_Radius_axis_form[2]
rrad_zx=rrad_z_axis[0]
rrad_zy=rrad_z_axis[1]
rrad_zz=rrad_z_axis[2]
# L RADIUS
# origin
lrad_origin = L_Radius_center_form
lrad_ox=lrad_origin[0]
lrad_oy=lrad_origin[1]
lrad_oz=lrad_origin[2]
# xaxis
lrad_x_axis = L_Radius_axis_form[0]
lrad_xx=lrad_x_axis[0]
lrad_xy=lrad_x_axis[1]
lrad_xz=lrad_x_axis[2]
# yaxis
lrad_y_axis = L_Radius_axis_form[1]
lrad_yx=lrad_y_axis[0]
lrad_yy=lrad_y_axis[1]
lrad_yz=lrad_y_axis[2]
# zaxis
lrad_z_axis = L_Radius_axis_form[2]
lrad_zx=lrad_z_axis[0]
lrad_zy=lrad_z_axis[1]
lrad_zz=lrad_z_axis[2]
# R HAND
# origin
rhand_origin = R_Hand_center_form
rhand_ox=rhand_origin[0]
rhand_oy=rhand_origin[1]
rhand_oz=rhand_origin[2]
# xaxis
rhand_x_axis= R_Hand_axis_form[0]
rhand_xx=rhand_x_axis[0]
rhand_xy=rhand_x_axis[1]
rhand_xz=rhand_x_axis[2]
# yaxis
rhand_y_axis= R_Hand_axis_form[1]
rhand_yx=rhand_y_axis[0]
rhand_yy=rhand_y_axis[1]
rhand_yz=rhand_y_axis[2]
# zaxis
rhand_z_axis= R_Hand_axis_form[2]
rhand_zx=rhand_z_axis[0]
rhand_zy=rhand_z_axis[1]
rhand_zz=rhand_z_axis[2]
# L HAND
# origin
lhand_origin = L_Hand_center_form
lhand_ox=lhand_origin[0]
lhand_oy=lhand_origin[1]
lhand_oz=lhand_origin[2]
# xaxis
lhand_x_axis = L_Hand_axis_form[0]
lhand_xx=lhand_x_axis[0]
lhand_xy=lhand_x_axis[1]
lhand_xz=lhand_x_axis[2]
# yaxis
lhand_y_axis = L_Hand_axis_form[1]
lhand_yx=lhand_y_axis[0]
lhand_yy=lhand_y_axis[1]
lhand_yz=lhand_y_axis[2]
# zaxis
lhand_z_axis = L_Hand_axis_form[2]
lhand_zx=lhand_z_axis[0]
lhand_zy=lhand_z_axis[1]
lhand_zz=lhand_z_axis[2]
#-----------------------------------------------------
#Store everything in an array to send back to results of process
r=[
pelx,pely,pelz,
rhipx,rhipy,rhipz,
lhipx,lhipy,lhipz,
rkneex,rkneey,rkneez,
lkneex,lkneey,lkneez,
ranklex,rankley,ranklez,
lanklex,lankley,lanklez,
rfootx,rfooty,rfootz,
lfootx,lfooty,lfootz,
headx,heady,headz,
thox,thoy,thoz,
neckx,necky,neckz,
spix,spiy,spiz,
rshox,rshoy,rshoz,
lshox,lshoy,lshoz,
relbx,relby,relbz,
lelbx,lelby,lelbz,
rwrtx,rwrty,rwrtz,
lwrtx,lwrty,lwrtz,
pel_ox,pel_oy,pel_oz,pel_xx,pel_xy,pel_xz,pel_yx,pel_yy,pel_yz,pel_zx,pel_zy,pel_zz,
hip_ox,hip_oy,hip_oz,hip_xx,hip_xy,hip_xz,hip_yx,hip_yy,hip_yz,hip_zx,hip_zy,hip_zz,
rknee_ox,rknee_oy,rknee_oz,rknee_xx,rknee_xy,rknee_xz,rknee_yx,rknee_yy,rknee_yz,rknee_zx,rknee_zy,rknee_zz,
lknee_ox,lknee_oy,lknee_oz,lknee_xx,lknee_xy,lknee_xz,lknee_yx,lknee_yy,lknee_yz,lknee_zx,lknee_zy,lknee_zz,
rank_ox,rank_oy,rank_oz,rank_xx,rank_xy,rank_xz,rank_yx,rank_yy,rank_yz,rank_zx,rank_zy,rank_zz,
lank_ox,lank_oy,lank_oz,lank_xx,lank_xy,lank_xz,lank_yx,lank_yy,lank_yz,lank_zx,lank_zy,lank_zz,
rfoot_ox,rfoot_oy,rfoot_oz,rfoot_xx,rfoot_xy,rfoot_xz,rfoot_yx,rfoot_yy,rfoot_yz,rfoot_zx,rfoot_zy,rfoot_zz,
lfoot_ox,lfoot_oy,lfoot_oz,lfoot_xx,lfoot_xy,lfoot_xz,lfoot_yx,lfoot_yy,lfoot_yz,lfoot_zx,lfoot_zy,lfoot_zz,
head_ox,head_oy,head_oz,head_xx,head_xy,head_xz,head_yx,head_yy,head_yz,head_zx,head_zy,head_zz,
tho_ox,tho_oy,tho_oz,tho_xx,tho_xy,tho_xz,tho_yx,tho_yy,tho_yz,tho_zx,tho_zy,tho_zz,
rclav_ox,rclav_oy,rclav_oz,rclav_xx,rclav_xy,rclav_xz,rclav_yx,rclav_yy,rclav_yz,rclav_zx,rclav_zy,rclav_zz,
lclav_ox,lclav_oy,lclav_oz,lclav_xx,lclav_xy,lclav_xz,lclav_yx,lclav_yy,lclav_yz,lclav_zx,lclav_zy,lclav_zz,
rhum_ox,rhum_oy,rhum_oz,rhum_xx,rhum_xy,rhum_xz,rhum_yx,rhum_yy,rhum_yz,rhum_zx,rhum_zy,rhum_zz,
lhum_ox,lhum_oy,lhum_oz,lhum_xx,lhum_xy,lhum_xz,lhum_yx,lhum_yy,lhum_yz,lhum_zx,lhum_zy,lhum_zz,
rrad_ox,rrad_oy,rrad_oz,rrad_xx,rrad_xy,rrad_xz,rrad_yx,rrad_yy,rrad_yz,rrad_zx,rrad_zy,rrad_zz,
lrad_ox,lrad_oy,lrad_oz,lrad_xx,lrad_xy,lrad_xz,lrad_yx,lrad_yy,lrad_yz,lrad_zx,lrad_zy,lrad_zz,
rhand_ox,rhand_oy,rhand_oz,rhand_xx,rhand_xy,rhand_xz,rhand_yx,rhand_yy,rhand_yz,rhand_zx,rhand_zy,rhand_zz,
lhand_ox,lhand_oy,lhand_oz,lhand_xx,lhand_xy,lhand_xz,lhand_yx,lhand_yy,lhand_yz,lhand_zx,lhand_zy,lhand_zz
]
r=np.array(r,dtype=np.float64)
return r
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Remote API resource implementations.
PUT or POST?
============
According to RFC2616 the main difference between PUT and POST is that
POST can create new resources but PUT can only create the resource the
URI was pointing to on the PUT request.
In the context of this module POST on ``/2/instances`` to change an existing
entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
new instance) with a name specified in the request.
Quoting from RFC2616, section 9.6::
The fundamental difference between the POST and PUT requests is reflected in
the different meaning of the Request-URI. The URI in a POST request
identifies the resource that will handle the enclosed entity. That resource
might be a data-accepting process, a gateway to some other protocol, or a
separate entity that accepts annotations. In contrast, the URI in a PUT
request identifies the entity enclosed with the request -- the user agent
knows what URI is intended and the server MUST NOT attempt to apply the
request to some other resource. If the server desires that the request be
applied to a different URI, it MUST send a 301 (Moved Permanently) response;
the user agent MAY then make its own decision regarding whether or not to
redirect the request.
So when adding new methods, if they are operating on the URI entity itself,
PUT should be prefered over POST.
"""
# pylint: disable=C0103
# C0103: Invalid name, since the R_* names are not conforming
from ganeti import opcodes
from ganeti import objects
from ganeti import http
from ganeti import constants
from ganeti import cli
from ganeti import rapi
from ganeti import ht
from ganeti import compat
from ganeti import ssconf
from ganeti.rapi import baserlib
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
I_FIELDS = ["name", "admin_state", "os",
"pnode", "snodes",
"disk_template",
"nic.ips", "nic.macs", "nic.modes",
"nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
"network_port",
"disk.sizes", "disk_usage",
"beparams", "hvparams",
"oper_state", "oper_ram", "oper_vcpus", "status",
"custom_hvparams", "custom_beparams", "custom_nicparams",
] + _COMMON_FIELDS
N_FIELDS = ["name", "offline", "master_candidate", "drained",
"dtotal", "dfree",
"mtotal", "mnode", "mfree",
"pinst_cnt", "sinst_cnt",
"ctotal", "cnodes", "csockets",
"pip", "sip", "role",
"pinst_list", "sinst_list",
"master_capable", "vm_capable",
"ndparams",
"group.uuid",
] + _COMMON_FIELDS
NET_FIELDS = ["name", "network", "gateway",
"network6", "gateway6",
"mac_prefix",
"free_count", "reserved_count",
"map", "group_list", "inst_list",
"external_reservations",
] + _COMMON_FIELDS
G_FIELDS = [
"alloc_policy",
"name",
"node_cnt",
"node_list",
"ipolicy",
"custom_ipolicy",
"diskparams",
"custom_diskparams",
"ndparams",
"custom_ndparams",
] + _COMMON_FIELDS
J_FIELDS_BULK = [
"id", "ops", "status", "summary",
"opstatus",
"received_ts", "start_ts", "end_ts",
]
J_FIELDS = J_FIELDS_BULK + [
"oplog",
"opresult",
]
_NR_DRAINED = "drained"
_NR_MASTER_CANDIDATE = "master-candidate"
_NR_MASTER = "master"
_NR_OFFLINE = "offline"
_NR_REGULAR = "regular"
_NR_MAP = {
constants.NR_MASTER: _NR_MASTER,
constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
constants.NR_DRAINED: _NR_DRAINED,
constants.NR_OFFLINE: _NR_OFFLINE,
constants.NR_REGULAR: _NR_REGULAR,
}
assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
# Request data version field
_REQ_DATA_VERSION = "__version__"
# Feature string for instance creation request data version 1
_INST_CREATE_REQV1 = "instance-create-reqv1"
# Feature string for instance reinstall request version 1
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
# Feature string for node migration version 1
_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
# Feature string for node evacuation with LU-generated jobs
_NODE_EVAC_RES1 = "node-evac-res1"
ALL_FEATURES = compat.UniqueFrozenset([
_INST_CREATE_REQV1,
_INST_REINSTALL_REQV1,
_NODE_MIGRATE_REQV1,
_NODE_EVAC_RES1,
])
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
_WFJC_TIMEOUT = 10
# FIXME: For compatibility we update the beparams/memory field. Needs to be
# removed in Ganeti 2.8
def _UpdateBeparams(inst):
"""Updates the beparams dict of inst to support the memory field.
@param inst: Inst dict
@return: Updated inst dict
"""
beparams = inst["beparams"]
beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
return inst
class R_root(baserlib.ResourceBase):
"""/ resource.
"""
@staticmethod
def GET():
"""Supported for legacy reasons.
"""
return None
class R_2(R_root):
"""/2 resource.
"""
class R_version(baserlib.ResourceBase):
"""/version resource.
This resource should be used to determine the remote API version and
to adapt clients accordingly.
"""
@staticmethod
def GET():
"""Returns the remote API version.
"""
return constants.RAPI_VERSION
class R_2_info(baserlib.OpcodeResource):
"""/2/info resource.
"""
GET_OPCODE = opcodes.OpClusterQuery
def GET(self):
"""Returns cluster information.
"""
client = self.GetClient(query=True)
return client.QueryClusterInfo()
class R_2_features(baserlib.ResourceBase):
"""/2/features resource.
"""
@staticmethod
def GET():
"""Returns list of optional RAPI features implemented.
"""
return list(ALL_FEATURES)
class R_2_os(baserlib.OpcodeResource):
"""/2/os resource.
"""
GET_OPCODE = opcodes.OpOsDiagnose
def GET(self):
"""Return a list of all OSes.
Can return error 500 in case of a problem.
Example: ["debian-etch"]
"""
cl = self.GetClient()
op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
job_id = self.SubmitJob([op], cl=cl)
# we use custom feedback function, instead of print we log the status
result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
diagnose_data = result[0]
if not isinstance(diagnose_data, list):
raise http.HttpBadGateway(message="Can't get OS list")
os_names = []
for (name, variants) in diagnose_data:
os_names.extend(cli.CalculateOSNames(name, variants))
return os_names
class R_2_redist_config(baserlib.OpcodeResource):
"""/2/redistribute-config resource.
"""
PUT_OPCODE = opcodes.OpClusterRedistConf
class R_2_cluster_modify(baserlib.OpcodeResource):
"""/2/modify resource.
"""
PUT_OPCODE = opcodes.OpClusterSetParams
class R_2_jobs(baserlib.ResourceBase):
"""/2/jobs resource.
"""
def GET(self):
"""Returns a dictionary of jobs.
@return: a dictionary with jobs id and uri.
"""
client = self.GetClient(query=True)
if self.useBulk():
bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
else:
jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
uri_fields=("id", "uri"))
class R_2_jobs_id(baserlib.ResourceBase):
"""/2/jobs/[job_id] resource.
"""
def GET(self):
"""Returns a job status.
@return: a dictionary with job parameters.
The result includes:
- id: job ID as a number
- status: current job status as a string
- ops: involved OpCodes as a list of dictionaries for each
opcodes in the job
- opstatus: OpCodes status as a list
- opresult: OpCodes results as a list of lists
"""
job_id = self.items[0]
result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0]
if result is None:
raise http.HttpNotFound()
return baserlib.MapFields(J_FIELDS, result)
def DELETE(self):
"""Cancel not-yet-started job.
"""
job_id = self.items[0]
result = self.GetClient().CancelJob(job_id)
return result
class R_2_jobs_id_wait(baserlib.ResourceBase):
"""/2/jobs/[job_id]/wait resource.
"""
# WaitForJobChange provides access to sensitive information and blocks
# machine resources (it's a blocking RAPI call), hence restricting access.
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
def GET(self):
"""Waits for job changes.
"""
job_id = self.items[0]
fields = self.getBodyParameter("fields")
prev_job_info = self.getBodyParameter("previous_job_info", None)
prev_log_serial = self.getBodyParameter("previous_log_serial", None)
if not isinstance(fields, list):
raise http.HttpBadRequest("The 'fields' parameter should be a list")
if not (prev_job_info is None or isinstance(prev_job_info, list)):
raise http.HttpBadRequest("The 'previous_job_info' parameter should"
" be a list")
if not (prev_log_serial is None or
isinstance(prev_log_serial, (int, long))):
raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
" be a number")
client = self.GetClient()
result = client.WaitForJobChangeOnce(job_id, fields,
prev_job_info, prev_log_serial,
timeout=_WFJC_TIMEOUT)
if not result:
raise http.HttpNotFound()
if result == constants.JOB_NOTCHANGED:
# No changes
return None
(job_info, log_entries) = result
return {
"job_info": job_info,
"log_entries": log_entries,
}
class R_2_nodes(baserlib.OpcodeResource):
"""/2/nodes resource.
"""
GET_OPCODE = opcodes.OpNodeQuery
def GET(self):
"""Returns a list of all nodes.
"""
client = self.GetClient(query=False)
if self.useBulk():
bulkdata = client.QueryNodes([], N_FIELDS, False)
return baserlib.MapBulkFields(bulkdata, N_FIELDS)
else:
nodesdata = client.QueryNodes([], ["name"], False)
nodeslist = [row[0] for row in nodesdata]
return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
uri_fields=("id", "uri"))
class R_2_nodes_name(baserlib.OpcodeResource):
"""/2/nodes/[node_name] resource.
"""
GET_OPCODE = opcodes.OpNodeQuery
def GET(self):
"""Send information about a node.
"""
node_name = self.items[0]
client = self.GetClient(query=False)
result = baserlib.HandleItemQueryErrors(client.QueryNodes,
names=[node_name], fields=N_FIELDS,
use_locking=self.useLocking())
return baserlib.MapFields(N_FIELDS, result[0])
class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/powercycle resource.
"""
POST_OPCODE = opcodes.OpNodePowercycle
def GetPostOpInput(self):
"""Tries to powercycle a node.
"""
return (self.request_body, {
"node_name": self.items[0],
"force": self.useForce(),
})
class R_2_nodes_name_role(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/role resource.
"""
PUT_OPCODE = opcodes.OpNodeSetParams
def GET(self):
"""Returns the current node role.
@return: Node role
"""
node_name = self.items[0]
client = self.GetClient(query=True)
result = client.QueryNodes(names=[node_name], fields=["role"],
use_locking=self.useLocking())
return _NR_MAP[result[0][0]]
def GetPutOpInput(self):
"""Sets the node role.
"""
baserlib.CheckType(self.request_body, basestring, "Body contents")
role = self.request_body
if role == _NR_REGULAR:
candidate = False
offline = False
drained = False
elif role == _NR_MASTER_CANDIDATE:
candidate = True
offline = drained = None
elif role == _NR_DRAINED:
drained = True
candidate = offline = None
elif role == _NR_OFFLINE:
offline = True
candidate = drained = None
else:
raise http.HttpBadRequest("Can't set '%s' role" % role)
assert len(self.items) == 1
return ({}, {
"node_name": self.items[0],
"master_candidate": candidate,
"offline": offline,
"drained": drained,
"force": self.useForce(),
"auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
})
class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/evacuate resource.
"""
POST_OPCODE = opcodes.OpNodeEvacuate
def GetPostOpInput(self):
"""Evacuate all instances off a node.
"""
return (self.request_body, {
"node_name": self.items[0],
"dry_run": self.dryRun(),
})
class R_2_nodes_name_migrate(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/migrate resource.
"""
POST_OPCODE = opcodes.OpNodeMigrate
def GetPostOpInput(self):
"""Migrate all primary instances from a node.
"""
if self.queryargs:
# Support old-style requests
if "live" in self.queryargs and "mode" in self.queryargs:
raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
" be passed")
if "live" in self.queryargs:
if self._checkIntVariable("live", default=1):
mode = constants.HT_MIGRATION_LIVE
else:
mode = constants.HT_MIGRATION_NONLIVE
else:
mode = self._checkStringVariable("mode", default=None)
data = {
"mode": mode,
}
else:
data = self.request_body
return (data, {
"node_name": self.items[0],
})
class R_2_nodes_name_modify(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/modify resource.
"""
POST_OPCODE = opcodes.OpNodeSetParams
def GetPostOpInput(self):
"""Changes parameters of a node.
"""
assert len(self.items) == 1
return (self.request_body, {
"node_name": self.items[0],
})
class R_2_nodes_name_storage(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage resource.
"""
# LUNodeQueryStorage acquires locks, hence restricting access to GET
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
GET_OPCODE = opcodes.OpNodeQueryStorage
def GetGetOpInput(self):
"""List storage available on a node.
"""
storage_type = self._checkStringVariable("storage_type", None)
output_fields = self._checkStringVariable("output_fields", None)
if not output_fields:
raise http.HttpBadRequest("Missing the required 'output_fields'"
" parameter")
return ({}, {
"nodes": [self.items[0]],
"storage_type": storage_type,
"output_fields": output_fields.split(","),
})
class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage/modify resource.
"""
PUT_OPCODE = opcodes.OpNodeModifyStorage
def GetPutOpInput(self):
"""Modifies a storage volume on a node.
"""
storage_type = self._checkStringVariable("storage_type", None)
name = self._checkStringVariable("name", None)
if not name:
raise http.HttpBadRequest("Missing the required 'name'"
" parameter")
changes = {}
if "allocatable" in self.queryargs:
changes[constants.SF_ALLOCATABLE] = \
bool(self._checkIntVariable("allocatable", default=1))
return ({}, {
"node_name": self.items[0],
"storage_type": storage_type,
"name": name,
"changes": changes,
})
class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage/repair resource.
"""
PUT_OPCODE = opcodes.OpRepairNodeStorage
def GetPutOpInput(self):
"""Repairs a storage volume on a node.
"""
storage_type = self._checkStringVariable("storage_type", None)
name = self._checkStringVariable("name", None)
if not name:
raise http.HttpBadRequest("Missing the required 'name'"
" parameter")
return ({}, {
"node_name": self.items[0],
"storage_type": storage_type,
"name": name,
})
class R_2_networks(baserlib.OpcodeResource):
"""/2/networks resource.
"""
GET_OPCODE = opcodes.OpNetworkQuery
POST_OPCODE = opcodes.OpNetworkAdd
POST_RENAME = {
"name": "network_name",
}
def GetPostOpInput(self):
"""Create a network.
"""
assert not self.items
return (self.request_body, {
"dry_run": self.dryRun(),
})
def GET(self):
"""Returns a list of all networks.
"""
client = self.GetClient()
if self.useBulk():
bulkdata = client.QueryNetworks([], NET_FIELDS, False)
return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
else:
data = client.QueryNetworks([], ["name"], False)
networknames = [row[0] for row in data]
return baserlib.BuildUriList(networknames, "/2/networks/%s",
uri_fields=("name", "uri"))
class R_2_networks_name(baserlib.OpcodeResource):
"""/2/networks/[network_name] resource.
"""
DELETE_OPCODE = opcodes.OpNetworkRemove
def GET(self):
"""Send information about a network.
"""
network_name = self.items[0]
client = self.GetClient()
result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
names=[network_name],
fields=NET_FIELDS,
use_locking=self.useLocking())
return baserlib.MapFields(NET_FIELDS, result[0])
def GetDeleteOpInput(self):
"""Delete a network.
"""
assert len(self.items) == 1
return (self.request_body, {
"network_name": self.items[0],
"dry_run": self.dryRun(),
})
class R_2_networks_name_connect(baserlib.OpcodeResource):
"""/2/networks/[network_name]/connect resource.
"""
PUT_OPCODE = opcodes.OpNetworkConnect
def GetPutOpInput(self):
"""Changes some parameters of node group.
"""
assert self.items
return (self.request_body, {
"network_name": self.items[0],
"dry_run": self.dryRun(),
})
class R_2_networks_name_disconnect(baserlib.OpcodeResource):
"""/2/networks/[network_name]/disconnect resource.
"""
PUT_OPCODE = opcodes.OpNetworkDisconnect
def GetPutOpInput(self):
"""Changes some parameters of node group.
"""
assert self.items
return (self.request_body, {
"network_name": self.items[0],
"dry_run": self.dryRun(),
})
class R_2_networks_name_modify(baserlib.OpcodeResource):
"""/2/networks/[network_name]/modify resource.
"""
PUT_OPCODE = opcodes.OpNetworkSetParams
def GetPutOpInput(self):
"""Changes some parameters of network.
"""
assert self.items
return (self.request_body, {
"network_name": self.items[0],
})
class R_2_groups(baserlib.OpcodeResource):
"""/2/groups resource.
"""
GET_OPCODE = opcodes.OpGroupQuery
POST_OPCODE = opcodes.OpGroupAdd
POST_RENAME = {
"name": "group_name",
}
def GetPostOpInput(self):
"""Create a node group.
"""
assert not self.items
return (self.request_body, {
"dry_run": self.dryRun(),
})
def GET(self):
"""Returns a list of all node groups.
"""
client = self.GetClient(query=True)
if self.useBulk():
bulkdata = client.QueryGroups([], G_FIELDS, False)
return baserlib.MapBulkFields(bulkdata, G_FIELDS)
else:
data = client.QueryGroups([], ["name"], False)
groupnames = [row[0] for row in data]
return baserlib.BuildUriList(groupnames, "/2/groups/%s",
uri_fields=("name", "uri"))
class R_2_groups_name(baserlib.OpcodeResource):
"""/2/groups/[group_name] resource.
"""
DELETE_OPCODE = opcodes.OpGroupRemove
def GET(self):
"""Send information about a node group.
"""
group_name = self.items[0]
client = self.GetClient(query=True)
result = baserlib.HandleItemQueryErrors(client.QueryGroups,
names=[group_name], fields=G_FIELDS,
use_locking=self.useLocking())
return baserlib.MapFields(G_FIELDS, result[0])
def GetDeleteOpInput(self):
"""Delete a node group.
"""
assert len(self.items) == 1
return ({}, {
"group_name": self.items[0],
"dry_run": self.dryRun(),
})
class R_2_groups_name_modify(baserlib.OpcodeResource):
"""/2/groups/[group_name]/modify resource.
"""
PUT_OPCODE = opcodes.OpGroupSetParams
def GetPutOpInput(self):
"""Changes some parameters of node group.
"""
assert self.items
return (self.request_body, {
"group_name": self.items[0],
})
class R_2_groups_name_rename(baserlib.OpcodeResource):
"""/2/groups/[group_name]/rename resource.
"""
PUT_OPCODE = opcodes.OpGroupRename
def GetPutOpInput(self):
"""Changes the name of a node group.
"""
assert len(self.items) == 1
return (self.request_body, {
"group_name": self.items[0],
"dry_run": self.dryRun(),
})
class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
"""/2/groups/[group_name]/assign-nodes resource.
"""
PUT_OPCODE = opcodes.OpGroupAssignNodes
def GetPutOpInput(self):
"""Assigns nodes to a group.
"""
assert len(self.items) == 1
return (self.request_body, {
"group_name": self.items[0],
"dry_run": self.dryRun(),
"force": self.useForce(),
})
class R_2_instances(baserlib.OpcodeResource):
"""/2/instances resource.
"""
GET_OPCODE = opcodes.OpInstanceQuery
POST_OPCODE = opcodes.OpInstanceCreate
POST_RENAME = {
"os": "os_type",
"name": "instance_name",
}
def GET(self):
"""Returns a list of all available instances.
"""
client = self.GetClient()
use_locking = self.useLocking()
if self.useBulk():
bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
else:
instancesdata = client.QueryInstances([], ["name"], use_locking)
instanceslist = [row[0] for row in instancesdata]
return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
uri_fields=("id", "uri"))
def GetPostOpInput(self):
"""Create an instance.
@return: a job id
"""
baserlib.CheckType(self.request_body, dict, "Body contents")
# Default to request data version 0
data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
if data_version == 0:
raise http.HttpBadRequest("Instance creation request version 0 is no"
" longer supported")
elif data_version != 1:
raise http.HttpBadRequest("Unsupported request data version %s" %
data_version)
data = self.request_body.copy()
# Remove "__version__"
data.pop(_REQ_DATA_VERSION, None)
return (data, {
"dry_run": self.dryRun(),
})
class R_2_instances_multi_alloc(baserlib.OpcodeResource):
"""/2/instances-multi-alloc resource.
"""
POST_OPCODE = opcodes.OpInstanceMultiAlloc
def GetPostOpInput(self):
"""Try to allocate multiple instances.
@return: A dict with submitted jobs, allocatable instances and failed
allocations
"""
if "instances" not in self.request_body:
raise http.HttpBadRequest("Request is missing required 'instances' field"
" in body")
op_id = {
"OP_ID": self.POST_OPCODE.OP_ID, # pylint: disable=E1101
}
body = objects.FillDict(self.request_body, {
"instances": [objects.FillDict(inst, op_id)
for inst in self.request_body["instances"]],
})
return (body, {
"dry_run": self.dryRun(),
})
class R_2_instances_name(baserlib.OpcodeResource):
"""/2/instances/[instance_name] resource.
"""
GET_OPCODE = opcodes.OpInstanceQuery
DELETE_OPCODE = opcodes.OpInstanceRemove
def GET(self):
"""Send information about an instance.
"""
client = self.GetClient()
instance_name = self.items[0]
result = baserlib.HandleItemQueryErrors(client.QueryInstances,
names=[instance_name],
fields=I_FIELDS,
use_locking=self.useLocking())
return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
def GetDeleteOpInput(self):
"""Delete an instance.
"""
assert len(self.items) == 1
return ({}, {
"instance_name": self.items[0],
"ignore_failures": False,
"dry_run": self.dryRun(),
})
class R_2_instances_name_info(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/info resource.
"""
GET_OPCODE = opcodes.OpInstanceQueryData
def GetGetOpInput(self):
"""Request detailed instance information.
"""
assert len(self.items) == 1
return ({}, {
"instances": [self.items[0]],
"static": bool(self._checkIntVariable("static", default=0)),
})
class R_2_instances_name_reboot(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/reboot resource.
Implements an instance reboot.
"""
POST_OPCODE = opcodes.OpInstanceReboot
def GetPostOpInput(self):
"""Reboot an instance.
The URI takes type=[hard|soft|full] and
ignore_secondaries=[False|True] parameters.
"""
return ({}, {
"instance_name": self.items[0],
"reboot_type":
self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
"ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
"dry_run": self.dryRun(),
})
class R_2_instances_name_startup(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/startup resource.
Implements an instance startup.
"""
PUT_OPCODE = opcodes.OpInstanceStartup
def GetPutOpInput(self):
"""Startup an instance.
The URI takes force=[False|True] parameter to start the instance
if even if secondary disks are failing.
"""
return ({}, {
"instance_name": self.items[0],
"force": self.useForce(),
"dry_run": self.dryRun(),
"no_remember": bool(self._checkIntVariable("no_remember")),
})
class R_2_instances_name_shutdown(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/shutdown resource.
Implements an instance shutdown.
"""
PUT_OPCODE = opcodes.OpInstanceShutdown
def GetPutOpInput(self):
"""Shutdown an instance.
"""
return (self.request_body, {
"instance_name": self.items[0],
"no_remember": bool(self._checkIntVariable("no_remember")),
"dry_run": self.dryRun(),
})
def _ParseInstanceReinstallRequest(name, data):
"""Parses a request for reinstalling an instance.
"""
if not isinstance(data, dict):
raise http.HttpBadRequest("Invalid body contents, not a dictionary")
ostype = baserlib.CheckParameter(data, "os", default=None)
start = baserlib.CheckParameter(data, "start", exptype=bool,
default=True)
osparams = baserlib.CheckParameter(data, "osparams", default=None)
ops = [
opcodes.OpInstanceShutdown(instance_name=name),
opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
osparams=osparams),
]
if start:
ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
return ops
class R_2_instances_name_reinstall(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/reinstall resource.
Implements an instance reinstall.
"""
POST_OPCODE = opcodes.OpInstanceReinstall
def POST(self):
"""Reinstall an instance.
The URI takes os=name and nostartup=[0|1] optional
parameters. By default, the instance will be started
automatically.
"""
if self.request_body:
if self.queryargs:
raise http.HttpBadRequest("Can't combine query and body parameters")
body = self.request_body
elif self.queryargs:
# Legacy interface, do not modify/extend
body = {
"os": self._checkStringVariable("os"),
"start": not self._checkIntVariable("nostartup"),
}
else:
body = {}
ops = _ParseInstanceReinstallRequest(self.items[0], body)
return self.SubmitJob(ops)
class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/replace-disks resource.
"""
POST_OPCODE = opcodes.OpInstanceReplaceDisks
def GetPostOpInput(self):
"""Replaces disks on an instance.
"""
static = {
"instance_name": self.items[0],
}
if self.request_body:
data = self.request_body
elif self.queryargs:
# Legacy interface, do not modify/extend
data = {
"remote_node": self._checkStringVariable("remote_node", default=None),
"mode": self._checkStringVariable("mode", default=None),
"disks": self._checkStringVariable("disks", default=None),
"iallocator": self._checkStringVariable("iallocator", default=None),
}
else:
data = {}
# Parse disks
try:
raw_disks = data.pop("disks")
except KeyError:
pass
else:
if raw_disks:
if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
data["disks"] = raw_disks
else:
# Backwards compatibility for strings of the format "1, 2, 3"
try:
data["disks"] = [int(part) for part in raw_disks.split(",")]
except (TypeError, ValueError), err:
raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
return (data, static)
class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/activate-disks resource.
"""
PUT_OPCODE = opcodes.OpInstanceActivateDisks
def GetPutOpInput(self):
"""Activate disks for an instance.
The URI might contain ignore_size to ignore current recorded size.
"""
return ({}, {
"instance_name": self.items[0],
"ignore_size": bool(self._checkIntVariable("ignore_size")),
})
class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/deactivate-disks resource.
"""
PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
def GetPutOpInput(self):
"""Deactivate disks for an instance.
"""
return ({}, {
"instance_name": self.items[0],
})
class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/recreate-disks resource.
"""
POST_OPCODE = opcodes.OpInstanceRecreateDisks
def GetPostOpInput(self):
"""Recreate disks for an instance.
"""
return ({}, {
"instance_name": self.items[0],
})
class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/prepare-export resource.
"""
PUT_OPCODE = opcodes.OpBackupPrepare
def GetPutOpInput(self):
"""Prepares an export for an instance.
"""
return ({}, {
"instance_name": self.items[0],
"mode": self._checkStringVariable("mode"),
})
class R_2_instances_name_export(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/export resource.
"""
PUT_OPCODE = opcodes.OpBackupExport
PUT_RENAME = {
"destination": "target_node",
}
def GetPutOpInput(self):
"""Exports an instance.
"""
return (self.request_body, {
"instance_name": self.items[0],
})
class R_2_instances_name_migrate(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/migrate resource.
"""
PUT_OPCODE = opcodes.OpInstanceMigrate
def GetPutOpInput(self):
"""Migrates an instance.
"""
return (self.request_body, {
"instance_name": self.items[0],
})
class R_2_instances_name_failover(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/failover resource.
"""
PUT_OPCODE = opcodes.OpInstanceFailover
def GetPutOpInput(self):
"""Does a failover of an instance.
"""
return (self.request_body, {
"instance_name": self.items[0],
})
class R_2_instances_name_rename(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/rename resource.
"""
PUT_OPCODE = opcodes.OpInstanceRename
def GetPutOpInput(self):
"""Changes the name of an instance.
"""
return (self.request_body, {
"instance_name": self.items[0],
})
class R_2_instances_name_modify(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/modify resource.
"""
PUT_OPCODE = opcodes.OpInstanceSetParams
def GetPutOpInput(self):
"""Changes parameters of an instance.
"""
return (self.request_body, {
"instance_name": self.items[0],
})
class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/disk/[disk_index]/grow resource.
"""
POST_OPCODE = opcodes.OpInstanceGrowDisk
def GetPostOpInput(self):
"""Increases the size of an instance disk.
"""
return (self.request_body, {
"instance_name": self.items[0],
"disk": int(self.items[1]),
})
class R_2_instances_name_console(baserlib.ResourceBase):
"""/2/instances/[instance_name]/console resource.
"""
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
GET_OPCODE = opcodes.OpInstanceConsole
def GET(self):
"""Request information for connecting to instance's console.
@return: Serialized instance console description, see
L{objects.InstanceConsole}
"""
client = self.GetClient()
((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
if console is None:
raise http.HttpServiceUnavailable("Instance console unavailable")
assert isinstance(console, dict)
return console
def _GetQueryFields(args):
"""Tries to extract C{fields} query parameter.
@type args: dictionary
@rtype: list of string
@raise http.HttpBadRequest: When parameter can't be found
"""
try:
fields = args["fields"]
except KeyError:
raise http.HttpBadRequest("Missing 'fields' query argument")
return _SplitQueryFields(fields[0])
def _SplitQueryFields(fields):
"""Splits fields as given for a query request.
@type fields: string
@rtype: list of string
"""
return [i.strip() for i in fields.split(",")]
class R_2_query(baserlib.ResourceBase):
"""/2/query/[resource] resource.
"""
# Results might contain sensitive information
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
PUT_ACCESS = GET_ACCESS
GET_OPCODE = opcodes.OpQuery
PUT_OPCODE = opcodes.OpQuery
def _Query(self, fields, qfilter):
return self.GetClient().Query(self.items[0], fields, qfilter).ToDict()
def GET(self):
"""Returns resource information.
@return: Query result, see L{objects.QueryResponse}
"""
return self._Query(_GetQueryFields(self.queryargs), None)
def PUT(self):
"""Submits job querying for resources.
@return: Query result, see L{objects.QueryResponse}
"""
body = self.request_body
baserlib.CheckType(body, dict, "Body contents")
try:
fields = body["fields"]
except KeyError:
fields = _GetQueryFields(self.queryargs)
qfilter = body.get("qfilter", None)
# TODO: remove this after 2.7
if qfilter is None:
qfilter = body.get("filter", None)
return self._Query(fields, qfilter)
class R_2_query_fields(baserlib.ResourceBase):
"""/2/query/[resource]/fields resource.
"""
GET_OPCODE = opcodes.OpQueryFields
def GET(self):
"""Retrieves list of available fields for a resource.
@return: List of serialized L{objects.QueryFieldDefinition}
"""
try:
raw_fields = self.queryargs["fields"]
except KeyError:
fields = None
else:
fields = _SplitQueryFields(raw_fields[0])
return self.GetClient().QueryFields(self.items[0], fields).ToDict()
class _R_Tags(baserlib.OpcodeResource):
"""Quasiclass for tagging resources.
Manages tags. When inheriting this class you must define the
TAG_LEVEL for it.
"""
TAG_LEVEL = None
GET_OPCODE = opcodes.OpTagsGet
PUT_OPCODE = opcodes.OpTagsSet
DELETE_OPCODE = opcodes.OpTagsDel
def __init__(self, items, queryargs, req, **kwargs):
"""A tag resource constructor.
We have to override the default to sort out cluster naming case.
"""
baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
if self.TAG_LEVEL == constants.TAG_CLUSTER:
self.name = None
else:
self.name = items[0]
def GET(self):
"""Returns a list of tags.
Example: ["tag1", "tag2", "tag3"]
"""
kind = self.TAG_LEVEL
if kind in (constants.TAG_INSTANCE,
constants.TAG_NODEGROUP,
constants.TAG_NODE):
if not self.name:
raise http.HttpBadRequest("Missing name on tag request")
cl = self.GetClient(query=True)
tags = list(cl.QueryTags(kind, self.name))
elif kind == constants.TAG_CLUSTER:
assert not self.name
# TODO: Use query API?
ssc = ssconf.SimpleStore()
tags = ssc.GetClusterTags()
return list(tags)
def GetPutOpInput(self):
"""Add a set of tags.
The request as a list of strings should be PUT to this URI. And
you'll have back a job id.
"""
return ({}, {
"kind": self.TAG_LEVEL,
"name": self.name,
"tags": self.queryargs.get("tag", []),
"dry_run": self.dryRun(),
})
def GetDeleteOpInput(self):
"""Delete a tag.
In order to delete a set of tags, the DELETE
request should be addressed to URI like:
/tags?tag=[tag]&tag=[tag]
"""
# Re-use code
return self.GetPutOpInput()
class R_2_instances_name_tags(_R_Tags):
""" /2/instances/[instance_name]/tags resource.
Manages per-instance tags.
"""
TAG_LEVEL = constants.TAG_INSTANCE
class R_2_nodes_name_tags(_R_Tags):
""" /2/nodes/[node_name]/tags resource.
Manages per-node tags.
"""
TAG_LEVEL = constants.TAG_NODE
class R_2_groups_name_tags(_R_Tags):
""" /2/groups/[group_name]/tags resource.
Manages per-nodegroup tags.
"""
TAG_LEVEL = constants.TAG_NODEGROUP
class R_2_networks_name_tags(_R_Tags):
""" /2/networks/[network_name]/tags resource.
Manages per-network tags.
"""
TAG_LEVEL = constants.TAG_NETWORK
class R_2_tags(_R_Tags):
""" /2/tags resource.
Manages cluster tags.
"""
TAG_LEVEL = constants.TAG_CLUSTER
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/nxp,lpc1850-cgu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP LPC1850 Clock Generation Unit (CGU)
description: >
The CGU generates multiple independent clocks for the core and the
peripheral blocks of the LPC18xx. Each independent clock is called
a base clock and itself is one of the inputs to the two Clock
Control Units (CCUs) which control the branch clocks to the
individual peripherals.
The CGU selects the inputs to the clock generators from multiple
clock sources, controls the clock generation, and routes the outputs
of the clock generators through the clock source bus to the output
stages. Each output stage provides an independent clock source and
corresponds to one of the base clocks for the LPC18xx.
Above text taken from NXP LPC1850 User Manual.
maintainers:
- Frank Li <Frank.Li@nxp.com>
properties:
compatible:
const: nxp,lpc1850-cgu
reg:
maxItems: 1
'#clock-cells':
const: 1
description: |
Which base clocks that are available on the CGU depends on the
specific LPC part. Base clocks are numbered from 0 to 27.
Number: Name: Description:
0 BASE_SAFE_CLK Base safe clock (always on) for WWDT
1 BASE_USB0_CLK Base clock for USB0
2 BASE_PERIPH_CLK Base clock for Cortex-M0SUB subsystem,
SPI, and SGPIO
3 BASE_USB1_CLK Base clock for USB1
4 BASE_CPU_CLK System base clock for ARM Cortex-M core
and APB peripheral blocks #0 and #2
5 BASE_SPIFI_CLK Base clock for SPIFI
6 BASE_SPI_CLK Base clock for SPI
7 BASE_PHY_RX_CLK Base clock for Ethernet PHY Receive clock
8 BASE_PHY_TX_CLK Base clock for Ethernet PHY Transmit clock
9 BASE_APB1_CLK Base clock for APB peripheral block # 1
10 BASE_APB3_CLK Base clock for APB peripheral block # 3
11 BASE_LCD_CLK Base clock for LCD
12 BASE_ADCHS_CLK Base clock for ADCHS
13 BASE_SDIO_CLK Base clock for SD/MMC
14 BASE_SSP0_CLK Base clock for SSP0
15 BASE_SSP1_CLK Base clock for SSP1
16 BASE_UART0_CLK Base clock for UART0
17 BASE_UART1_CLK Base clock for UART1
18 BASE_UART2_CLK Base clock for UART2
19 BASE_UART3_CLK Base clock for UART3
20 BASE_OUT_CLK Base clock for CLKOUT pin
24-21 - Reserved
25 BASE_AUDIO_CLK Base clock for audio system (I2S)
26 BASE_CGU_OUT0_CLK Base clock for CGU_OUT0 clock output
27 BASE_CGU_OUT1_CLK Base clock for CGU_OUT1 clock output
BASE_PERIPH_CLK and BASE_SPI_CLK is only available on LPC43xx.
BASE_ADCHS_CLK is only available on LPC4370.
clocks:
maxItems: 5
clock-indices:
minItems: 1
maxItems: 28
clock-output-names:
minItems: 1
maxItems: 28
required:
- compatible
- reg
- clocks
- '#clock-cells'
additionalProperties: false
examples:
- |
clock-controller@40050000 {
compatible = "nxp,lpc1850-cgu";
reg = <0x40050000 0x1000>;
#clock-cells = <1>;
clocks = <&xtal>, <&creg_clk 1>, <&enet_rx_clk>, <&enet_tx_clk>, <&gp_clkin>;
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/clock/nxp,lpc1850-cgu.yaml
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains helper functionality for writing CanTherm output files.
"""
import ast
################################################################################
class PrettifyVisitor(ast.NodeVisitor):
"""
A class for traversing an abstract syntax tree to assemble a prettier
version of the code used to create the tree. Used by the :func:`prettify`
function.
"""
def __init__(self, level=0, indent=4):
self.string = ''
self.level = level
self.indent = indent
def visit_Call(self, node):
"""
Return a pretty representation of the class or function call
represented by `node`.
"""
result = node.func.id + '(\n'
keywords = []
for keyword in node.keywords:
keywords.append('{0}={1}'.format(keyword.arg, self.visit(keyword.value)))
result = '{0}({1})'.format(node.func.id, ', '.join(keywords))
if len(result) > 80:
result = node.func.id + '(\n'
self.level += 1
for keyword in node.keywords:
result += '{2}{0} = {1},\n'.format(keyword.arg, self.visit(keyword.value), ' ' * (self.level * self.indent))
self.level -= 1
result += ' ' * (self.level * self.indent) + ')'
self.string = result
return result
def visit_List(self, node):
"""
Return a pretty representation of the list represented by `node`.
"""
if any([not isinstance(e, (ast.Str, ast.Num)) for e in node.elts]):
# Split elements onto multiple lines
result = '[\n'
self.level += 1
for element in node.elts:
result += '{1}{0},\n'.format(self.visit(element), ' ' * (self.level * self.indent))
self.level -= 1
result += '{0}]'.format(' ' * (self.level * self.indent))
return result
else:
# Keep elements on one line
result = '[{0}]'.format(', '.join([self.visit(e) for e in node.elts]))
self.string = result
return result
def visit_Tuple(self, node):
"""
Return a pretty representation of the tuple represented by `node`.
"""
# If the tuple represents a quantity, keep it on one line
isQuantity = True
if len(node.elts) == 0 or not isinstance(node.elts[0], (ast.Num,ast.List)) or (
isinstance(node.elts[0], ast.List) and any([not isinstance(e, ast.Num) for e in node.elts[0].elts])):
isQuantity = False
elif len(node.elts) < 2 or not isinstance(node.elts[1], ast.Str):
isQuantity = False
if not isQuantity:
# Split elements onto multiple lines
result = '(\n'
self.level += 1
for element in node.elts:
result += '{1}{0},\n'.format(self.visit(element), ' ' * (self.level * self.indent))
self.level -= 1
result += '{0})'.format(' ' * (self.level * self.indent))
return result
else:
# Keep elements on one line
result = '({0})'.format(', '.join([self.visit(e) for e in node.elts]))
self.string = result
return result
def visit_Dict(self, node):
"""
Return a pretty representation of the dict represented by `node`.
"""
if any([not isinstance(e, (ast.Str, ast.Num)) for e in node.keys]) or any([not isinstance(e, (ast.Str, ast.Num)) for e in node.values]):
# Split elements onto multiple lines
result = '{\n'
self.level += 1
for key, value in zip(node.keys, node.values):
result += '{2}{0}: {1},\n'.format(self.visit(key), self.visit(value), ' ' * (self.level * self.indent))
self.level -= 1
result += '{0}}}'.format(' ' * (self.level * self.indent))
self.string = result
return result
else:
# Keep elements on one line
result = '{{{0}}}'.format(', '.join(['{0}: {1}'.format(self.visit(key), self.visit(value)) for key, value in zip(node.keys, node.values)]))
self.string = result
return result
def visit_Str(self, node):
"""
Return a pretty representation of the string represented by `node`.
"""
result = repr(node.s)
self.string = result
return result
def visit_Num(self, node):
"""
Return a pretty representation of the number represented by `node`.
"""
result = '{0:g}'.format(node.n)
#result = repr(node.n)
self.string = result
return result
def prettify(string, indent=4):
"""
Return a "pretty" version of the given `string`, representing a snippet of
Python code such as a representation of an object or function. This
involves splitting of tuples, lists, and dicts (including parameter lists)
onto multiple lines, indenting as appropriate for readability.
"""
# Parse the node into an abstract syntax tree
node = ast.parse(string)
# Traverse the tree, assembling the pretty version of the string
visitor = PrettifyVisitor(indent=indent)
visitor.visit(node)
# Return the pretty version of the string
return visitor.string
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015, Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from openstack_dashboard.api import neutron as api
from openstack_dashboard import policy
from horizon import tables
class AddRouterRoute(policy.PolicyTargetMixin, tables.LinkAction):
name = "create"
verbose_name = _("Add Static Route")
url = "horizon:project:routers:addrouterroute"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "update_router"),)
def get_link_url(self, datum=None):
router_id = self.table.kwargs['router_id']
return reverse(self.url, args=(router_id,))
class RemoveRouterRoute(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Static Route",
u"Delete Static Routes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Static Route",
u"Deleted Static Routes",
count
)
failure_url = 'horizon:project:routers:detail'
policy_rules = (("network", "update_router"),)
def delete(self, request, obj_id):
router_id = self.table.kwargs['router_id']
api.router_static_route_remove(request, router_id, [obj_id])
class ExtraRoutesTable(tables.DataTable):
destination = tables.Column("destination",
verbose_name=_("Destination CIDR"))
nexthop = tables.Column("nexthop", verbose_name=_("Next Hop"))
def get_object_display(self, datum):
"""Display ExtraRoutes when deleted."""
return (super(ExtraRoutesTable, self).get_object_display(datum)
or datum.destination + " -> " + datum.nexthop)
class Meta(object):
name = "extra_routes"
verbose_name = _("Static Routes")
table_actions = (AddRouterRoute, RemoveRouterRoute)
row_actions = (RemoveRouterRoute, )
|
unknown
|
codeparrot/codeparrot-clean
| ||
pr: 140069
summary: "Aggs: Fix class cast exceptions in pipeline aggs"
area: Aggregations
type: bug
issues:
- 137624
- 136173
|
unknown
|
github
|
https://github.com/elastic/elasticsearch
|
docs/changelog/140069.yaml
|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.support;
import java.lang.reflect.Method;
import java.util.Properties;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.FactoryBean;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.BeanDefinitionHolder;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.beans.factory.config.DependencyDescriptor;
import org.springframework.core.ResolvableType;
import org.springframework.util.ClassUtils;
/**
* Basic {@link AutowireCandidateResolver} that performs a full generic type
* match with the candidate's type if the dependency is declared as a generic type
* (for example, {@code Repository<Customer>}).
*
* <p>This is the base class for
* {@link org.springframework.beans.factory.annotation.QualifierAnnotationAutowireCandidateResolver},
* providing an implementation for all non-annotation-based resolution steps at this level.
*
* @author Juergen Hoeller
* @since 4.0
*/
public class GenericTypeAwareAutowireCandidateResolver extends SimpleAutowireCandidateResolver
implements BeanFactoryAware, Cloneable {
private @Nullable BeanFactory beanFactory;
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
protected final @Nullable BeanFactory getBeanFactory() {
return this.beanFactory;
}
@Override
public boolean isAutowireCandidate(BeanDefinitionHolder bdHolder, DependencyDescriptor descriptor) {
if (!super.isAutowireCandidate(bdHolder, descriptor)) {
// If explicitly false, do not proceed with any other checks...
return false;
}
return checkGenericTypeMatch(bdHolder, descriptor);
}
/**
* Match the given dependency type with its generic type information against the given
* candidate bean definition.
*/
@SuppressWarnings("NullAway") // Dataflow analysis limitation
protected boolean checkGenericTypeMatch(BeanDefinitionHolder bdHolder, DependencyDescriptor descriptor) {
ResolvableType dependencyType = descriptor.getResolvableType();
if (dependencyType.getType() instanceof Class) {
// No generic type -> we know it's a Class type-match, so no need to check again.
return true;
}
ResolvableType targetType = null;
boolean cacheType = false;
RootBeanDefinition rbd = null;
if (bdHolder.getBeanDefinition() instanceof RootBeanDefinition rootBeanDef) {
rbd = rootBeanDef;
}
if (rbd != null) {
targetType = rbd.targetType;
if (targetType == null) {
cacheType = true;
// First, check factory method return type, if applicable
targetType = getReturnTypeForFactoryMethod(rbd, descriptor);
if (targetType == null) {
RootBeanDefinition dbd = getResolvedDecoratedDefinition(rbd);
if (dbd != null) {
targetType = dbd.targetType;
if (targetType == null) {
targetType = getReturnTypeForFactoryMethod(dbd, descriptor);
}
}
}
}
}
if (targetType == null) {
// Regular case: straight bean instance, with BeanFactory available.
if (this.beanFactory != null) {
Class<?> beanType = this.beanFactory.getType(bdHolder.getBeanName());
if (beanType != null) {
targetType = ResolvableType.forClass(ClassUtils.getUserClass(beanType));
}
}
// Fallback: no BeanFactory set, or no type resolvable through it
// -> best-effort match against the target class if applicable.
if (targetType == null && rbd != null && rbd.hasBeanClass() && rbd.getFactoryMethodName() == null) {
Class<?> beanClass = rbd.getBeanClass();
if (!FactoryBean.class.isAssignableFrom(beanClass)) {
targetType = ResolvableType.forClass(ClassUtils.getUserClass(beanClass));
}
}
}
if (targetType == null) {
return true;
}
if (cacheType) {
rbd.targetType = targetType;
}
// Pre-declared target type: In case of a generic FactoryBean type,
// unwrap nested generic type when matching a non-FactoryBean type.
Class<?> targetClass = targetType.resolve();
if (targetClass != null && FactoryBean.class.isAssignableFrom(targetClass)) {
Class<?> classToMatch = dependencyType.resolve();
if (classToMatch != null && !FactoryBean.class.isAssignableFrom(classToMatch) &&
!classToMatch.isAssignableFrom(targetClass)) {
targetType = targetType.getGeneric();
if (descriptor.fallbackMatchAllowed()) {
// Matching the Class-based type determination for FactoryBean
// objects in the lazy-determination getType code path above.
targetType = ResolvableType.forClass(targetType.resolve());
}
}
}
if (descriptor.fallbackMatchAllowed()) {
// Fallback matches allow unresolvable generics, for example, plain HashMap to Map<String,String>;
// and pragmatically also java.util.Properties to any Map (since despite formally being a
// Map<Object,Object>, java.util.Properties is usually perceived as a Map<String,String>).
if (targetType.hasUnresolvableGenerics()) {
return dependencyType.isAssignableFromResolvedPart(targetType);
}
else if (targetType.resolve() == Properties.class) {
return true;
}
}
// Full check for complex generic type match...
return dependencyType.isAssignableFrom(targetType);
}
protected @Nullable RootBeanDefinition getResolvedDecoratedDefinition(RootBeanDefinition rbd) {
BeanDefinitionHolder decDef = rbd.getDecoratedDefinition();
if (decDef != null && this.beanFactory instanceof ConfigurableListableBeanFactory clbf) {
if (clbf.containsBeanDefinition(decDef.getBeanName())) {
BeanDefinition dbd = clbf.getMergedBeanDefinition(decDef.getBeanName());
if (dbd instanceof RootBeanDefinition rootBeanDef) {
return rootBeanDef;
}
}
}
return null;
}
protected @Nullable ResolvableType getReturnTypeForFactoryMethod(RootBeanDefinition rbd, DependencyDescriptor descriptor) {
// Should typically be set for any kind of factory method, since the BeanFactory
// pre-resolves them before reaching out to the AutowireCandidateResolver...
ResolvableType returnType = rbd.factoryMethodReturnType;
if (returnType == null) {
Method factoryMethod = rbd.getResolvedFactoryMethod();
if (factoryMethod != null) {
returnType = ResolvableType.forMethodReturnType(factoryMethod);
}
}
if (returnType != null) {
Class<?> resolvedClass = returnType.resolve();
if (resolvedClass != null && descriptor.getDependencyType().isAssignableFrom(resolvedClass)) {
// Only use factory method metadata if the return type is actually expressive enough
// for our dependency. Otherwise, the returned instance type may have matched instead
// in case of a singleton instance having been registered with the container already.
return returnType;
}
}
return null;
}
/**
* This implementation clones all instance fields through standard
* {@link Cloneable} support, allowing for subsequent reconfiguration
* of the cloned instance through a fresh {@link #setBeanFactory} call.
* @see #clone()
*/
@Override
public AutowireCandidateResolver cloneIfNecessary() {
try {
return (AutowireCandidateResolver) clone();
}
catch (CloneNotSupportedException ex) {
throw new IllegalStateException(ex);
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/support/GenericTypeAwareAutowireCandidateResolver.java
|
import threading
from urllib import quote, getproxies
from urlparse import urlparse
import os.path
import time
import traceback
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp
from couchpotato.core.helpers.variable import md5, isLocalIP, scanForPassword, tryInt, getIdentifier, \
randomString
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import MaxRetryError
from tornado import template
log = CPLog(__name__)
class Plugin(object):
_class_name = None
_database = None
plugin_path = None
enabled_option = 'enabled'
_needs_shutdown = False
_running = None
_locks = {}
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:45.0) Gecko/20100101 Firefox/45.0'
http_last_use = {}
http_last_use_queue = {}
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
def __new__(cls, *args, **kwargs):
new_plugin = super(Plugin, cls).__new__(cls)
new_plugin.registerPlugin()
return new_plugin
def registerPlugin(self):
addEvent('app.do_shutdown', self.doShutdown)
addEvent('plugin.running', self.isRunning)
self._running = []
# Setup database
if self._database:
addEvent('database.setup', self.databaseSetup)
def databaseSetup(self):
for index_name in self._database:
klass = self._database[index_name]
fireEvent('database.setup_index', index_name, klass)
def conf(self, attr, value = None, default = None, section = None):
class_name = self.getName().lower().split(':')[0].lower()
return Env.setting(attr, section = section if section else class_name, value = value, default = default)
def deleteConf(self, attr):
return Env._settings.delete(attr, section = self.getName().lower().split(':')[0].lower())
def getName(self):
return self._class_name or self.__class__.__name__
def setName(self, name):
self._class_name = name
def renderTemplate(self, parent_file, templ, **params):
t = template.Template(open(os.path.join(os.path.dirname(parent_file), templ), 'r').read())
return t.generate(**params)
def createFile(self, path, content, binary = False):
path = sp(path)
self.makeDir(os.path.dirname(path))
if os.path.exists(path):
log.debug('%s already exists, overwriting file with new version', path)
write_type = 'w+' if not binary else 'w+b'
# Stream file using response object
if isinstance(content, requests.models.Response):
# Write file to temp
with open('%s.tmp' % path, write_type) as f:
for chunk in content.iter_content(chunk_size = 1048576):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
# Rename to destination
os.rename('%s.tmp' % path, path)
else:
try:
f = open(path, write_type)
f.write(content)
f.close()
try:
os.chmod(path, Env.getPermission('file'))
except:
log.error('Failed writing permission to file "%s": %s', (path, traceback.format_exc()))
except:
log.error('Unable to write file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path):
path = sp(path)
try:
if not os.path.isdir(path):
os.makedirs(path, Env.getPermission('folder'))
os.chmod(path, Env.getPermission('folder'))
return True
except Exception as e:
log.error('Unable to create folder "%s": %s', (path, e))
return False
def deleteEmptyFolder(self, folder, show_error = True, only_clean = None):
folder = sp(folder)
for item in os.listdir(folder):
full_folder = sp(os.path.join(folder, item))
if not only_clean or (item in only_clean and os.path.isdir(full_folder)):
for subfolder, dirs, files in os.walk(full_folder, topdown = False):
try:
os.rmdir(subfolder)
except:
if show_error:
log.info2('Couldn\'t remove directory %s: %s', (subfolder, traceback.format_exc()))
try:
os.rmdir(folder)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
# http request
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, stream = False):
url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not data: data = {}
# Fill in some headers
parsed_url = urlparse(url)
host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else ''))
headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host))
headers['Host'] = headers.get('Host', None)
headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
use_proxy = Env.setting('use_proxy')
proxy_url = None
if use_proxy:
proxy_server = Env.setting('proxy_server')
proxy_username = Env.setting('proxy_username')
proxy_password = Env.setting('proxy_password')
if proxy_server:
loc = "{0}:{1}@{2}".format(proxy_username, proxy_password, proxy_server) if proxy_username else proxy_server
proxy_url = {
"http": "http://"+loc,
"https": "https://"+loc,
}
else:
proxy_url = getproxies()
r = Env.get('http_opener')
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error:
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host)
else:
return ''
else:
del self.http_failed_request[host]
del self.http_failed_disabled[host]
self.wait(host, url)
status_code = None
try:
kwargs = {
'headers': headers,
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates..
'stream': stream,
'proxies': proxy_url,
}
method = 'post' if len(data) > 0 or files else 'get'
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, **kwargs)
status_code = response.status_code
if response.status_code == requests.codes.ok:
data = response if stream else response.content
else:
response.raise_for_status()
self.http_failed_request[host] = 0
except (IOError, MaxRetryError, Timeout):
if show_error:
log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0)))
# Save failed requests by hosts
try:
# To many requests
if status_code in [429]:
self.http_failed_request[host] = 1
self.http_failed_disabled[host] = time.time()
if not self.http_failed_request.get(host):
self.http_failed_request[host] = 1
else:
self.http_failed_request[host] += 1
# Disable temporarily
if self.http_failed_request[host] > 5 and not isLocalIP(host):
self.http_failed_disabled[host] = time.time()
except:
log.debug('Failed logging failed requests for %s: %s', (url, traceback.format_exc()))
raise
self.http_last_use[host] = time.time()
return data
def wait(self, host = '', url = ''):
if self.http_time_between_calls == 0:
return
try:
if host not in self.http_last_use_queue:
self.http_last_use_queue[host] = []
self.http_last_use_queue[host].append(url)
while True and not self.shuttingDown():
wait = (self.http_last_use.get(host, 0) - time.time()) + self.http_time_between_calls
if self.http_last_use_queue[host][0] != url:
time.sleep(.1)
continue
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait)))
time.sleep(min(wait, 30))
else:
self.http_last_use_queue[host] = self.http_last_use_queue[host][1:]
self.http_last_use[host] = time.time()
break
except:
log.error('Failed handling waiting call: %s', traceback.format_exc())
time.sleep(self.http_time_between_calls)
def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__))
def afterCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__), False)
def doShutdown(self, *args, **kwargs):
self.shuttingDown(True)
return True
def shuttingDown(self, value = None):
if value is None:
return self._needs_shutdown
self._needs_shutdown = value
def isRunning(self, value = None, boolean = True):
if value is None:
return self._running
if boolean:
self._running.append(value)
else:
try:
self._running.remove(value)
except:
log.error("Something went wrong when finishing the plugin function. Could not find the 'is_running' key")
def getCache(self, cache_key, url = None, **kwargs):
use_cache = not len(kwargs.get('data', {})) > 0 and not kwargs.get('files')
if use_cache:
cache_key_md5 = md5(cache_key)
cache = Env.get('cache').get(cache_key_md5)
if cache:
if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
return cache
if url:
try:
cache_timeout = 300
if 'cache_timeout' in kwargs:
cache_timeout = kwargs.get('cache_timeout')
del kwargs['cache_timeout']
data = self.urlopen(url, **kwargs)
if data and cache_timeout > 0 and use_cache:
self.setCache(cache_key, data, timeout = cache_timeout)
return data
except:
if not kwargs.get('show_error', True):
raise
log.debug('Failed getting cache: %s', (traceback.format_exc(0)))
return ''
def setCache(self, cache_key, value, timeout = 300):
cache_key_md5 = md5(cache_key)
log.debug('Setting cache %s', cache_key)
Env.get('cache').set(cache_key_md5, value, timeout)
return value
def createNzbName(self, data, media, unique_tag = False):
release_name = data.get('name')
tag = self.cpTag(media, unique_tag = unique_tag)
# Check if password is filename
name_password = scanForPassword(data.get('name'))
if name_password:
release_name, password = name_password
tag += '{{%s}}' % password
elif data.get('password'):
tag += '{{%s}}' % data.get('password')
max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames
return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag)
def createFileName(self, data, filedata, media, unique_tag = False):
name = self.createNzbName(data, media, unique_tag = unique_tag)
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, media, unique_tag = False):
tag = ''
if Env.setting('enabled', 'renamer') or unique_tag:
identifier = getIdentifier(media) or ''
unique_tag = ', ' + randomString() if unique_tag else ''
tag = '.cp('
tag += identifier
tag += ', ' if unique_tag and identifier else ''
tag += randomString() if unique_tag else ''
tag += ')'
return tag if len(tag) > 7 else ''
def checkFilesChanged(self, files, unchanged_for = 60):
now = time.time()
file_too_new = False
file_time = []
for cur_file in files:
# File got removed while checking
if not os.path.isfile(cur_file):
file_too_new = now
break
# File has changed in last 60 seconds
file_time = self.getFileTimes(cur_file)
for t in file_time:
if t > now - unchanged_for:
file_too_new = tryInt(time.time() - t)
break
if file_too_new:
break
if file_too_new:
try:
time_string = time.ctime(file_time[0])
except:
try:
time_string = time.ctime(file_time[1])
except:
time_string = 'unknown'
return file_too_new, time_string
return False, None
def getFileTimes(self, file_path):
return [os.path.getmtime(file_path), os.path.getctime(file_path) if os.name != 'posix' else 0]
def isDisabled(self):
return not self.isEnabled()
def isEnabled(self):
return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None
def acquireLock(self, key):
lock = self._locks.get(key)
if not lock:
self._locks[key] = threading.RLock()
log.debug('Acquiring lock: %s', key)
self._locks.get(key).acquire()
def releaseLock(self, key):
lock = self._locks.get(key)
if lock:
log.debug('Releasing lock: %s', key)
self._locks.get(key).release()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Form generation utilities for App Engine's new ``ndb.Model`` class.
The goal of ``model_form()`` is to provide a clean, explicit and predictable
way to create forms based on ``ndb.Model`` classes. No malabarism or black
magic should be necessary to generate a form for models, and to add custom
non-model related fields: ``model_form()`` simply generates a form class
that can be used as it is, or that can be extended directly or even be used
to create other forms using ``model_form()``.
Example usage:
.. code-block:: python
from google.appengine.ext import ndb
from wtforms.ext.appengine.ndb import model_form
# Define an example model and add a record.
class Contact(ndb.Model):
name = ndb.StringProperty(required=True)
city = ndb.StringProperty()
age = ndb.IntegerProperty(required=True)
is_admin = ndb.BooleanProperty(default=False)
new_entity = Contact(key_name='test', name='Test Name', age=17)
new_entity.put()
# Generate a form based on the model.
ContactForm = model_form(Contact)
# Get a form populated with entity data.
entity = Contact.get_by_key_name('test')
form = ContactForm(obj=entity)
Properties from the model can be excluded from the generated form, or it can
include just a set of properties. For example:
.. code-block:: python
# Generate a form based on the model, excluding 'city' and 'is_admin'.
ContactForm = model_form(Contact, exclude=('city', 'is_admin'))
# or...
# Generate a form based on the model, only including 'name' and 'age'.
ContactForm = model_form(Contact, only=('name', 'age'))
The form can be generated setting field arguments:
.. code-block:: python
ContactForm = model_form(Contact, only=('name', 'age'), field_args={
'name': {
'label': 'Full name',
'description': 'Your name',
},
'age': {
'label': 'Age',
'validators': [validators.NumberRange(min=14, max=99)],
}
})
The class returned by ``model_form()`` can be used as a base class for forms
mixing non-model fields and/or other model forms. For example:
.. code-block:: python
# Generate a form based on the model.
BaseContactForm = model_form(Contact)
# Generate a form based on other model.
ExtraContactForm = model_form(MyOtherModel)
class ContactForm(BaseContactForm):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Add the other model form as a subform.
extra = f.FormField(ExtraContactForm)
The class returned by ``model_form()`` can also extend an existing form
class:
.. code-block:: python
class BaseContactForm(Form):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Generate a form based on the model.
ContactForm = model_form(Contact, base_class=BaseContactForm)
"""
from wtforms import Form, validators, fields as f
from wtforms.compat import string_types
from wtforms.ext.appengine.fields import GeoPtPropertyField, KeyPropertyField, StringListPropertyField, IntegerListPropertyField
def get_TextField(kwargs):
"""
Returns a ``TextField``, applying the ``ndb.StringProperty`` length limit
of 500 bytes.
"""
kwargs['validators'].append(validators.length(max=500))
return f.TextField(**kwargs)
def get_IntegerField(kwargs):
"""
Returns an ``IntegerField``, applying the ``ndb.IntegerProperty`` range
limits.
"""
v = validators.NumberRange(min=-0x8000000000000000, max=0x7fffffffffffffff)
kwargs['validators'].append(v)
return f.IntegerField(**kwargs)
class ModelConverterBase(object):
def __init__(self, converters=None):
"""
Constructs the converter, setting the converter callables.
:param converters:
A dictionary of converter callables for each property type. The
callable must accept the arguments (model, prop, kwargs).
"""
self.converters = {}
for name in dir(self):
if not name.startswith('convert_'):
continue
self.converters[name[8:]] = getattr(self, name)
def convert(self, model, prop, field_args):
"""
Returns a form field for a single model property.
:param model:
The ``db.Model`` class that contains the property.
:param prop:
The model property: a ``db.Property`` instance.
:param field_args:
Optional keyword arguments to construct the field.
"""
prop_type_name = type(prop).__name__
# Check for generic property
if(prop_type_name == "GenericProperty"):
# Try to get type from field args
generic_type = field_args.get("type")
if generic_type:
prop_type_name = field_args.get("type")
# If no type is found, the generic property uses string set in convert_GenericProperty
kwargs = {
'label': prop._code_name.replace('_', ' ').title(),
'default': prop._default,
'validators': [],
}
if field_args:
kwargs.update(field_args)
if prop._required and prop_type_name not in self.NO_AUTO_REQUIRED:
kwargs['validators'].append(validators.required())
if kwargs.get('choices', None):
# Use choices in a select field.
kwargs['choices'] = [(v, v) for v in kwargs.get('choices')]
return f.SelectField(**kwargs)
if prop._choices:
# Use choices in a select field.
kwargs['choices'] = [(v, v) for v in prop._choices]
return f.SelectField(**kwargs)
else:
converter = self.converters.get(prop_type_name, None)
if converter is not None:
return converter(model, prop, kwargs)
else:
return self.fallback_converter(model, prop, kwargs)
class ModelConverter(ModelConverterBase):
"""
Converts properties from a ``ndb.Model`` class to form fields.
Default conversions between properties and fields:
+====================+===================+==============+==================+
| Property subclass | Field subclass | datatype | notes |
+====================+===================+==============+==================+
| StringProperty | TextField | unicode | TextArea | repeated support
| | | | if multiline |
+--------------------+-------------------+--------------+------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+------------------+
| IntegerProperty | IntegerField | int or long | | repeated support
+--------------------+-------------------+--------------+------------------+
| FloatProperty | TextField | float | |
+--------------------+-------------------+--------------+------------------+
| DateTimeProperty | DateTimeField | datetime | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| DateProperty | DateField | date | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TimeProperty | DateTimeField | time | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TextProperty | TextAreaField | unicode | |
+--------------------+-------------------+--------------+------------------+
| GeoPtProperty | TextField | db.GeoPt | |
+--------------------+-------------------+--------------+------------------+
| KeyProperty | KeyProperyField | ndb.Key | |
+--------------------+-------------------+--------------+------------------+
| BlobKeyProperty | None | ndb.BlobKey | always skipped |
+--------------------+-------------------+--------------+------------------+
| UserProperty | None | users.User | always skipped |
+--------------------+-------------------+--------------+------------------+
| StructuredProperty | None | ndb.Model | always skipped |
+--------------------+-------------------+--------------+------------------+
| LocalStructuredPro | None | ndb.Model | always skipped |
+--------------------+-------------------+--------------+------------------+
| JsonProperty | TextField | unicode | |
+--------------------+-------------------+--------------+------------------+
| PickleProperty | None | bytedata | always skipped |
+--------------------+-------------------+--------------+------------------+
| GenericProperty | None | generic | always skipped |
+--------------------+-------------------+--------------+------------------+
| ComputedProperty | none | | always skipped |
+====================+===================+==============+==================+
"""
# Don't automatically add a required validator for these properties
NO_AUTO_REQUIRED = frozenset(['ListProperty', 'StringListProperty', 'BooleanProperty'])
def convert_StringProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.StringProperty``."""
if prop._repeated:
return StringListPropertyField(**kwargs)
kwargs['validators'].append(validators.length(max=500))
return get_TextField(kwargs)
def convert_BooleanProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BooleanProperty``."""
return f.BooleanField(**kwargs)
def convert_IntegerProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.IntegerProperty``."""
if prop._repeated:
return IntegerListPropertyField(**kwargs)
return get_IntegerField(kwargs)
def convert_FloatProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.FloatProperty``."""
return f.FloatField(**kwargs)
def convert_DateTimeProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.DateTimeProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateTimeField(format='%Y-%m-%d %H:%M:%S', **kwargs)
def convert_DateProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.DateProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateField(format='%Y-%m-%d', **kwargs)
def convert_TimeProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.TimeProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateTimeField(format='%H:%M:%S', **kwargs)
def convert_RepeatedProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_UserProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.UserProperty``."""
return None
def convert_StructuredProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_LocalStructuredProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_JsonProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_PickleProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_GenericProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
kwargs['validators'].append(validators.length(max=500))
return get_TextField(kwargs)
def convert_BlobKeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BlobKeyProperty``."""
return f.FileField(**kwargs)
def convert_TextProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.TextProperty``."""
return f.TextAreaField(**kwargs)
def convert_ComputedProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ComputedProperty``."""
return None
def convert_GeoPtProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.GeoPtProperty``."""
return GeoPtPropertyField(**kwargs)
def convert_KeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.KeyProperty``."""
if 'reference_class' not in kwargs:
try:
reference_class = prop._kind
except AttributeError:
reference_class = prop._reference_class
if isinstance(reference_class, string_types):
# reference class is a string, try to retrieve the model object.
mod = __import__(model.__module__, None, None, [reference_class], 0)
reference_class = getattr(mod, reference_class)
kwargs['reference_class'] = reference_class
kwargs.setdefault('allow_blank', not prop._required)
return KeyPropertyField(**kwargs)
def model_fields(model, only=None, exclude=None, field_args=None,
converter=None):
"""
Extracts and returns a dictionary of form fields for a given
``db.Model`` class.
:param model:
The ``db.Model`` class to extract fields from.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to a keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
# Get the field names we want to include or exclude, starting with the
# full list of model properties.
props = model._properties
field_names = list(x[0] for x in sorted(props.items(), key=lambda x: x[1]._creation_counter))
if only:
field_names = list(f for f in only if f in field_names)
elif exclude:
field_names = list(f for f in field_names if f not in exclude)
# Create all fields.
field_dict = {}
for name in field_names:
field = converter.convert(model, props[name], field_args.get(name))
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, base_class=Form, only=None, exclude=None, field_args=None,
converter=None):
"""
Creates and returns a dynamic ``wtforms.Form`` class for a given
``ndb.Model`` class. The form class can be used as it is or serve as a base
for extended form classes, which can then mix non-model related fields,
subforms with other model forms, among other possibilities.
:param model:
The ``ndb.Model`` class to generate a form for.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
# Extract the fields from the model.
field_dict = model_fields(model, only, exclude, field_args, converter)
# Return a dynamically created form class, extending from base_class and
# including the created fields as properties.
return type(model._get_kind() + 'Form', (base_class,), field_dict)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.gradle.docs;
/**
* @author Phillip Webb
*/
final class Examples {
static final String DIR = "src/docs/antora/modules/gradle-plugin/examples/";
private Examples() {
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
build-plugin/spring-boot-gradle-plugin/src/test/java/org/springframework/boot/gradle/docs/Examples.java
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import traceback
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.results import base_test_results_unittest
from telemetry.results import gtest_progress_reporter
from telemetry.results import page_test_results
from telemetry.unittest_util import simple_mock
from telemetry.value import failure
from telemetry.value import skip
def _MakePageSet():
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.baz.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.roz.com/', ps, ps.base_dir))
return ps
class GTestProgressReporterTest(
base_test_results_unittest.BaseTestResultsUnittest):
def setUp(self):
super(GTestProgressReporterTest, self).setUp()
self._mock_timer = simple_mock.MockTimer(gtest_progress_reporter)
self._output_stream = base_test_results_unittest.TestOutputStream()
self._reporter = gtest_progress_reporter.GTestProgressReporter(
self._output_stream)
def tearDown(self):
self._mock_timer.Restore()
def testSingleSuccessPage(self):
test_page_set = _MakePageSet()
results = page_test_results.PageTestResults(
progress_reporter=self._reporter)
results.WillRunPage(test_page_set.pages[0])
self._mock_timer.SetTime(0.007)
results.DidRunPage(test_page_set.pages[0])
results.PrintSummary()
expected = ('[ RUN ] http://www.foo.com/\n'
'[ OK ] http://www.foo.com/ (7 ms)\n'
'[ PASSED ] 1 test.\n\n')
self.assertEquals(expected, ''.join(self._output_stream.output_data))
def testSingleFailedPage(self):
test_page_set = _MakePageSet()
results = page_test_results.PageTestResults(
progress_reporter=self._reporter)
results.WillRunPage(test_page_set.pages[0])
exc_info = self.CreateException()
results.AddValue(failure.FailureValue(test_page_set.pages[0], exc_info))
results.DidRunPage(test_page_set.pages[0])
results.PrintSummary()
exception_trace = ''.join(traceback.format_exception(*exc_info))
expected = ('[ RUN ] http://www.foo.com/\n'
'%s\n'
'[ FAILED ] http://www.foo.com/ (0 ms)\n'
'[ PASSED ] 0 tests.\n'
'[ FAILED ] 1 test, listed below:\n'
'[ FAILED ] http://www.foo.com/\n\n'
'1 FAILED TEST\n\n' % exception_trace)
self.assertEquals(expected, ''.join(self._output_stream.output_data))
def testSingleSkippedPage(self):
test_page_set = _MakePageSet()
results = page_test_results.PageTestResults(
progress_reporter=self._reporter)
results.WillRunPage(test_page_set.pages[0])
self._mock_timer.SetTime(0.007)
results.AddValue(skip.SkipValue(test_page_set.pages[0],
'Page skipped for testing reason'))
results.DidRunPage(test_page_set.pages[0])
results.PrintSummary()
expected = ('[ RUN ] http://www.foo.com/\n'
'===== SKIPPING TEST http://www.foo.com/:'
' Page skipped for testing reason =====\n'
'[ OK ] http://www.foo.com/ (7 ms)\n'
'[ PASSED ] 1 test.\n\n')
self.assertEquals(expected, ''.join(self._output_stream.output_data))
def testPassAndFailedPages(self):
test_page_set = _MakePageSet()
results = page_test_results.PageTestResults(
progress_reporter=self._reporter)
exc_info = self.CreateException()
results.WillRunPage(test_page_set.pages[0])
self._mock_timer.SetTime(0.007)
results.DidRunPage(test_page_set.pages[0])
results.WillRunPage(test_page_set.pages[1])
self._mock_timer.SetTime(0.009)
results.AddValue(failure.FailureValue(test_page_set.pages[1], exc_info))
results.DidRunPage(test_page_set.pages[1])
results.WillRunPage(test_page_set.pages[2])
self._mock_timer.SetTime(0.015)
results.AddValue(failure.FailureValue(test_page_set.pages[2], exc_info))
results.DidRunPage(test_page_set.pages[2])
results.WillRunPage(test_page_set.pages[3])
self._mock_timer.SetTime(0.020)
results.DidRunPage(test_page_set.pages[3])
results.PrintSummary()
exception_trace = ''.join(traceback.format_exception(*exc_info))
expected = ('[ RUN ] http://www.foo.com/\n'
'[ OK ] http://www.foo.com/ (7 ms)\n'
'[ RUN ] http://www.bar.com/\n'
'%s\n'
'[ FAILED ] http://www.bar.com/ (2 ms)\n'
'[ RUN ] http://www.baz.com/\n'
'%s\n'
'[ FAILED ] http://www.baz.com/ (6 ms)\n'
'[ RUN ] http://www.roz.com/\n'
'[ OK ] http://www.roz.com/ (5 ms)\n'
'[ PASSED ] 2 tests.\n'
'[ FAILED ] 2 tests, listed below:\n'
'[ FAILED ] http://www.bar.com/\n'
'[ FAILED ] http://www.baz.com/\n\n'
'2 FAILED TESTS\n\n' % (exception_trace, exception_trace))
self.assertEquals(expected, ''.join(self._output_stream.output_data))
def testStreamingResults(self):
test_page_set = _MakePageSet()
results = page_test_results.PageTestResults(
progress_reporter=self._reporter)
exc_info = self.CreateException()
results.WillRunPage(test_page_set.pages[0])
self._mock_timer.SetTime(0.007)
results.DidRunPage(test_page_set.pages[0])
expected = ('[ RUN ] http://www.foo.com/\n'
'[ OK ] http://www.foo.com/ (7 ms)\n')
self.assertEquals(expected, ''.join(self._output_stream.output_data))
results.WillRunPage(test_page_set.pages[1])
self._mock_timer.SetTime(0.009)
exception_trace = ''.join(traceback.format_exception(*exc_info))
results.AddValue(failure.FailureValue(test_page_set.pages[1], exc_info))
results.DidRunPage(test_page_set.pages[1])
expected = ('[ RUN ] http://www.foo.com/\n'
'[ OK ] http://www.foo.com/ (7 ms)\n'
'[ RUN ] http://www.bar.com/\n'
'%s\n'
'[ FAILED ] http://www.bar.com/ (2 ms)\n' % exception_trace)
def testOutputSkipInformation(self):
test_page_set = _MakePageSet()
self._reporter = gtest_progress_reporter.GTestProgressReporter(
self._output_stream, output_skipped_tests_summary=True)
results = page_test_results.PageTestResults(
progress_reporter=self._reporter)
results.WillRunPage(test_page_set.pages[0])
self._mock_timer.SetTime(0.007)
results.AddValue(skip.SkipValue(test_page_set.pages[0],
'Page skipped for testing reason'))
results.DidRunPage(test_page_set.pages[0])
results.PrintSummary()
expected = ('[ RUN ] http://www.foo.com/\n'
'===== SKIPPING TEST http://www.foo.com/:'
' Page skipped for testing reason =====\n'
'[ OK ] http://www.foo.com/ (7 ms)\n'
'[ PASSED ] 1 test.\n'
'\n'
'Skipped pages:\n'
'http://www.foo.com/\n'
'\n')
self.assertEquals(expected, ''.join(self._output_stream.output_data))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vultr_dns_record
short_description: Manages DNS records on Vultr.
description:
- Create, update and remove DNS records.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- The record name (subrecord).
default: ""
aliases: [ subrecord ]
domain:
description:
- The domain the record is related to.
required: true
record_type:
description:
- Type of the record.
default: A
choices:
- A
- AAAA
- CNAME
- MX
- SRV
- CAA
- TXT
- NS
- SSHFP
aliases: [ type ]
data:
description:
- Data of the record.
- Required if C(state=present) or C(multiple=yes).
ttl:
description:
- TTL of the record.
default: 300
multiple:
description:
- Whether to use more than one record with similar C(name) including no name and C(record_type).
- Only allowed for a few record types, e.g. C(record_type=A), C(record_type=NS) or C(record_type=MX).
- C(data) will not be updated, instead it is used as a key to find existing records.
default: no
type: bool
priority:
description:
- Priority of the record.
default: 0
state:
description:
- State of the DNS record.
default: present
choices: [ present, absent ]
extends_documentation_fragment: vultr
'''
EXAMPLES = '''
- name: Ensure an A record exists
vultr_dns_record:
name: www
domain: example.com
data: 10.10.10.10
ttl: 3600
- name: Ensure a second A record exists for round robin LB
vultr_dns_record:
name: www
domain: example.com
data: 10.10.10.11
ttl: 60
multiple: yes
- name: Ensure a CNAME record exists
vultr_dns_record:
name: web
record_type: CNAME
domain: example.com
data: www.example.com
- name: Ensure MX record exists
vultr_dns_record:
record_type: MX
domain: example.com
data: "{{ item.data }}"
priority: "{{ item.priority }}"
multiple: yes
with_items:
- { data: mx1.example.com, priority: 10 }
- { data: mx2.example.com, priority: 10 }
- { data: mx3.example.com, priority: 20 }
- name: Ensure a record is absent
local_action:
module: vultr_dns_record
name: www
domain: example.com
state: absent
- name: Ensure MX record is absent in case multiple exists
vultr_dns_record:
record_type: MX
domain: example.com
data: mx1.example.com
multiple: yes
state: absent
'''
RETURN = '''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: string
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
vultr_dns_record:
description: Response from Vultr API
returned: success
type: complex
contains:
id:
description: The ID of the DNS record.
returned: success
type: int
sample: 1265277
name:
description: The name of the DNS record.
returned: success
type: string
sample: web
record_type:
description: The name of the DNS record.
returned: success
type: string
sample: web
data:
description: Data of the DNS record.
returned: success
type: string
sample: 10.10.10.10
domain:
description: Domain the DNS record is related to.
returned: success
type: string
sample: example.com
priority:
description: Priority of the DNS record.
returned: success
type: int
sample: 10
ttl:
description: Time to live of the DNS record.
returned: success
type: int
sample: 300
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
RECORD_TYPES = [
'A',
'AAAA',
'CNAME',
'MX',
'TXT',
'NS',
'SRV',
'CAA',
'SSHFP'
]
class AnsibleVultrDnsRecord(Vultr):
def __init__(self, module):
super(AnsibleVultrDnsRecord, self).__init__(module, "vultr_dns_record")
self.returns = {
'RECORDID': dict(key='id'),
'name': dict(),
'record': dict(),
'priority': dict(),
'data': dict(),
'type': dict(key='record_type'),
'ttl': dict(),
}
def get_record(self):
records = self.api_query(path="/v1/dns/records?domain=%s" % self.module.params.get('domain'))
multiple = self.module.params.get('multiple')
data = self.module.params.get('data')
name = self.module.params.get('name')
record_type = self.module.params.get('record_type')
result = {}
for record in records or []:
if record.get('type') != record_type:
continue
if record.get('name') == name:
if not multiple:
if result:
self.module.fail_json(msg="More than one record with record_type=%s and name=%s params. "
"Use multiple=yes for more than one record." % (record_type, name))
else:
result = record
elif record.get('data') == data:
return record
return result
def present_record(self):
record = self.get_record()
if not record:
record = self._create_record(record)
else:
record = self._update_record(record)
return record
def _create_record(self, record):
self.result['changed'] = True
data = {
'name': self.module.params.get('name'),
'domain': self.module.params.get('domain'),
'data': self.module.params.get('data'),
'type': self.module.params.get('record_type'),
'priority': self.module.params.get('priority'),
'ttl': self.module.params.get('ttl'),
}
self.result['diff']['before'] = {}
self.result['diff']['after'] = data
if not self.module.check_mode:
self.api_query(
path="/v1/dns/create_record",
method="POST",
data=data
)
record = self.get_record()
return record
def _update_record(self, record):
data = {
'RECORDID': record['RECORDID'],
'name': self.module.params.get('name'),
'domain': self.module.params.get('domain'),
'data': self.module.params.get('data'),
'type': self.module.params.get('record_type'),
'priority': self.module.params.get('priority'),
'ttl': self.module.params.get('ttl'),
}
has_changed = [k for k in data if k in record and data[k] != record[k]]
if has_changed:
self.result['changed'] = True
self.result['diff']['before'] = record
self.result['diff']['after'] = record.copy()
self.result['diff']['after'].update(data)
if not self.module.check_mode:
self.api_query(
path="/v1/dns/update_record",
method="POST",
data=data
)
record = self.get_record()
return record
def absent_record(self):
record = self.get_record()
if record:
self.result['changed'] = True
data = {
'RECORDID': record['RECORDID'],
'domain': self.module.params.get('domain'),
}
self.result['diff']['before'] = record
self.result['diff']['after'] = {}
if not self.module.check_mode:
self.api_query(
path="/v1/dns/delete_record",
method="POST",
data=data
)
return record
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
domain=dict(required=True),
name=dict(default="", aliases=['subrecord']),
state=dict(choices=['present', 'absent'], default='present'),
ttl=dict(type='int', default=300),
record_type=dict(choices=RECORD_TYPES, default='A', aliases=['type']),
multiple=dict(type='bool', default=False),
priority=dict(type='int', default=0),
data=dict()
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['data']),
('multiple', True, ['data']),
],
supports_check_mode=True,
)
vultr_record = AnsibleVultrDnsRecord(module)
if module.params.get('state') == "absent":
record = vultr_record.absent_record()
else:
record = vultr_record.present_record()
result = vultr_record.get_result(record)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Python test set -- math module
# XXXX Should not do tests around zero only
from test.test_support import run_unittest, verbose
import unittest
import math
import os
import sys
import random
eps = 1E-05
NAN = float('nan')
INF = float('inf')
NINF = float('-inf')
# locate file with test values
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
test_dir = os.path.dirname(file) or os.curdir
test_file = os.path.join(test_dir, 'cmath_testcases.txt')
def parse_testfile(fname):
"""Parse a file with test values
Empty lines or lines starting with -- are ignored
yields id, fn, arg_real, arg_imag, exp_real, exp_imag
"""
with open(fname) as fp:
for line in fp:
# skip comment lines and blank lines
if line.startswith('--') or not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg_real, arg_imag = lhs.split()
rhs_pieces = rhs.split()
exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1]
flags = rhs_pieces[2:]
yield (id, fn,
float(arg_real), float(arg_imag),
float(exp_real), float(exp_imag),
flags
)
class MathTests(unittest.TestCase):
def ftest(self, name, value, expected):
if abs(value-expected) > eps:
# Use %r instead of %f so the error message
# displays full precision. Otherwise discrepancies
# in the last few bits will lead to very confusing
# error messages
self.fail('%s returned %r, expected %r' %
(name, value, expected))
def testConstants(self):
self.ftest('pi', math.pi, 3.1415926)
self.ftest('e', math.e, 2.7182818)
def testAcos(self):
self.assertRaises(TypeError, math.acos)
self.ftest('acos(-1)', math.acos(-1), math.pi)
self.ftest('acos(0)', math.acos(0), math.pi/2)
self.ftest('acos(1)', math.acos(1), 0)
self.assertRaises(ValueError, math.acos, INF)
self.assertRaises(ValueError, math.acos, NINF)
self.assert_(math.isnan(math.acos(NAN)))
def testAcosh(self):
self.assertRaises(TypeError, math.acosh)
self.ftest('acosh(1)', math.acosh(1), 0)
self.ftest('acosh(2)', math.acosh(2), 1.3169578969248168)
self.assertRaises(ValueError, math.acosh, 0)
self.assertRaises(ValueError, math.acosh, -1)
self.assertEquals(math.acosh(INF), INF)
self.assertRaises(ValueError, math.acosh, NINF)
self.assert_(math.isnan(math.acosh(NAN)))
def testAsin(self):
self.assertRaises(TypeError, math.asin)
self.ftest('asin(-1)', math.asin(-1), -math.pi/2)
self.ftest('asin(0)', math.asin(0), 0)
self.ftest('asin(1)', math.asin(1), math.pi/2)
self.assertRaises(ValueError, math.asin, INF)
self.assertRaises(ValueError, math.asin, NINF)
self.assert_(math.isnan(math.asin(NAN)))
def testAsinh(self):
self.assertRaises(TypeError, math.asinh)
self.ftest('asinh(0)', math.asinh(0), 0)
self.ftest('asinh(1)', math.asinh(1), 0.88137358701954305)
self.ftest('asinh(-1)', math.asinh(-1), -0.88137358701954305)
self.assertEquals(math.asinh(INF), INF)
self.assertEquals(math.asinh(NINF), NINF)
self.assert_(math.isnan(math.asinh(NAN)))
def testAtan(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atan(-1)', math.atan(-1), -math.pi/4)
self.ftest('atan(0)', math.atan(0), 0)
self.ftest('atan(1)', math.atan(1), math.pi/4)
self.ftest('atan(inf)', math.atan(INF), math.pi/2)
self.ftest('atan(-inf)', math.atan(NINF), -math.pi/2)
self.assert_(math.isnan(math.atan(NAN)))
def testAtanh(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atanh(0)', math.atanh(0), 0)
self.ftest('atanh(0.5)', math.atanh(0.5), 0.54930614433405489)
self.ftest('atanh(-0.5)', math.atanh(-0.5), -0.54930614433405489)
self.assertRaises(ValueError, math.atanh, 1)
self.assertRaises(ValueError, math.atanh, -1)
self.assertRaises(ValueError, math.atanh, INF)
self.assertRaises(ValueError, math.atanh, NINF)
self.assert_(math.isnan(math.atanh(NAN)))
def testAtan2(self):
self.assertRaises(TypeError, math.atan2)
self.ftest('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
self.ftest('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
self.ftest('atan2(0, 1)', math.atan2(0, 1), 0)
self.ftest('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
self.ftest('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
# math.atan2(0, x)
self.ftest('atan2(0., -inf)', math.atan2(0., NINF), math.pi)
self.ftest('atan2(0., -2.3)', math.atan2(0., -2.3), math.pi)
self.ftest('atan2(0., -0.)', math.atan2(0., -0.), math.pi)
self.assertEqual(math.atan2(0., 0.), 0.)
self.assertEqual(math.atan2(0., 2.3), 0.)
self.assertEqual(math.atan2(0., INF), 0.)
self.assert_(math.isnan(math.atan2(0., NAN)))
# math.atan2(-0, x)
self.ftest('atan2(-0., -inf)', math.atan2(-0., NINF), -math.pi)
self.ftest('atan2(-0., -2.3)', math.atan2(-0., -2.3), -math.pi)
self.ftest('atan2(-0., -0.)', math.atan2(-0., -0.), -math.pi)
self.assertEqual(math.atan2(-0., 0.), -0.)
self.assertEqual(math.atan2(-0., 2.3), -0.)
self.assertEqual(math.atan2(-0., INF), -0.)
self.assert_(math.isnan(math.atan2(-0., NAN)))
# math.atan2(INF, x)
self.ftest('atan2(inf, -inf)', math.atan2(INF, NINF), math.pi*3/4)
self.ftest('atan2(inf, -2.3)', math.atan2(INF, -2.3), math.pi/2)
self.ftest('atan2(inf, -0.)', math.atan2(INF, -0.0), math.pi/2)
self.ftest('atan2(inf, 0.)', math.atan2(INF, 0.0), math.pi/2)
self.ftest('atan2(inf, 2.3)', math.atan2(INF, 2.3), math.pi/2)
self.ftest('atan2(inf, inf)', math.atan2(INF, INF), math.pi/4)
self.assert_(math.isnan(math.atan2(INF, NAN)))
# math.atan2(NINF, x)
self.ftest('atan2(-inf, -inf)', math.atan2(NINF, NINF), -math.pi*3/4)
self.ftest('atan2(-inf, -2.3)', math.atan2(NINF, -2.3), -math.pi/2)
self.ftest('atan2(-inf, -0.)', math.atan2(NINF, -0.0), -math.pi/2)
self.ftest('atan2(-inf, 0.)', math.atan2(NINF, 0.0), -math.pi/2)
self.ftest('atan2(-inf, 2.3)', math.atan2(NINF, 2.3), -math.pi/2)
self.ftest('atan2(-inf, inf)', math.atan2(NINF, INF), -math.pi/4)
self.assert_(math.isnan(math.atan2(NINF, NAN)))
# math.atan2(+finite, x)
self.ftest('atan2(2.3, -inf)', math.atan2(2.3, NINF), math.pi)
self.ftest('atan2(2.3, -0.)', math.atan2(2.3, -0.), math.pi/2)
self.ftest('atan2(2.3, 0.)', math.atan2(2.3, 0.), math.pi/2)
self.assertEqual(math.atan2(2.3, INF), 0.)
self.assert_(math.isnan(math.atan2(2.3, NAN)))
# math.atan2(-finite, x)
self.ftest('atan2(-2.3, -inf)', math.atan2(-2.3, NINF), -math.pi)
self.ftest('atan2(-2.3, -0.)', math.atan2(-2.3, -0.), -math.pi/2)
self.ftest('atan2(-2.3, 0.)', math.atan2(-2.3, 0.), -math.pi/2)
self.assertEqual(math.atan2(-2.3, INF), -0.)
self.assert_(math.isnan(math.atan2(-2.3, NAN)))
# math.atan2(NAN, x)
self.assert_(math.isnan(math.atan2(NAN, NINF)))
self.assert_(math.isnan(math.atan2(NAN, -2.3)))
self.assert_(math.isnan(math.atan2(NAN, -0.)))
self.assert_(math.isnan(math.atan2(NAN, 0.)))
self.assert_(math.isnan(math.atan2(NAN, 2.3)))
self.assert_(math.isnan(math.atan2(NAN, INF)))
self.assert_(math.isnan(math.atan2(NAN, NAN)))
def testCeil(self):
self.assertRaises(TypeError, math.ceil)
# These types will be int in py3k.
self.assertEquals(float, type(math.ceil(1)))
self.assertEquals(float, type(math.ceil(1L)))
self.assertEquals(float, type(math.ceil(1.0)))
self.ftest('ceil(0.5)', math.ceil(0.5), 1)
self.ftest('ceil(1.0)', math.ceil(1.0), 1)
self.ftest('ceil(1.5)', math.ceil(1.5), 2)
self.ftest('ceil(-0.5)', math.ceil(-0.5), 0)
self.ftest('ceil(-1.0)', math.ceil(-1.0), -1)
self.ftest('ceil(-1.5)', math.ceil(-1.5), -1)
self.assertEquals(math.ceil(INF), INF)
self.assertEquals(math.ceil(NINF), NINF)
self.assert_(math.isnan(math.ceil(NAN)))
class TestCeil(object):
def __float__(self):
return 41.3
class TestNoCeil(object):
pass
self.ftest('ceil(TestCeil())', math.ceil(TestCeil()), 42)
self.assertRaises(TypeError, math.ceil, TestNoCeil())
t = TestNoCeil()
t.__ceil__ = lambda *args: args
self.assertRaises(TypeError, math.ceil, t)
self.assertRaises(TypeError, math.ceil, t, 0)
if float.__getformat__("double").startswith("IEEE"):
def testCopysign(self):
self.assertRaises(TypeError, math.copysign)
# copysign should let us distinguish signs of zeros
self.assertEquals(copysign(1., 0.), 1.)
self.assertEquals(copysign(1., -0.), -1.)
self.assertEquals(copysign(INF, 0.), INF)
self.assertEquals(copysign(INF, -0.), NINF)
self.assertEquals(copysign(NINF, 0.), INF)
self.assertEquals(copysign(NINF, -0.), NINF)
# and of infinities
self.assertEquals(copysign(1., INF), 1.)
self.assertEquals(copysign(1., NINF), -1.)
self.assertEquals(copysign(INF, INF), INF)
self.assertEquals(copysign(INF, NINF), NINF)
self.assertEquals(copysign(NINF, INF), INF)
self.assertEquals(copysign(NINF, NINF), NINF)
self.assert_(math.isnan(copysign(NAN, 1.)))
self.assert_(math.isnan(copysign(NAN, INF)))
self.assert_(math.isnan(copysign(NAN, NINF)))
self.assert_(math.isnan(copysign(NAN, NAN)))
# copysign(INF, NAN) may be INF or it may be NINF, since
# we don't know whether the sign bit of NAN is set on any
# given platform.
self.assert_(math.isinf(copysign(INF, NAN)))
# similarly, copysign(2., NAN) could be 2. or -2.
self.assertEquals(abs(copysign(2., NAN)), 2.)
def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0)
self.ftest('cos(0)', math.cos(0), 1)
self.ftest('cos(pi/2)', math.cos(math.pi/2), 0)
self.ftest('cos(pi)', math.cos(math.pi), -1)
try:
self.assert_(math.isnan(math.cos(INF)))
self.assert_(math.isnan(math.cos(NINF)))
except ValueError:
self.assertRaises(ValueError, math.cos, INF)
self.assertRaises(ValueError, math.cos, NINF)
self.assert_(math.isnan(math.cos(NAN)))
def testCosh(self):
self.assertRaises(TypeError, math.cosh)
self.ftest('cosh(0)', math.cosh(0), 1)
self.ftest('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
self.assertEquals(math.cosh(INF), INF)
self.assertEquals(math.cosh(NINF), INF)
self.assert_(math.isnan(math.cosh(NAN)))
def testDegrees(self):
self.assertRaises(TypeError, math.degrees)
self.ftest('degrees(pi)', math.degrees(math.pi), 180.0)
self.ftest('degrees(pi/2)', math.degrees(math.pi/2), 90.0)
self.ftest('degrees(-pi/4)', math.degrees(-math.pi/4), -45.0)
def testExp(self):
self.assertRaises(TypeError, math.exp)
self.ftest('exp(-1)', math.exp(-1), 1/math.e)
self.ftest('exp(0)', math.exp(0), 1)
self.ftest('exp(1)', math.exp(1), math.e)
self.assertEquals(math.exp(INF), INF)
self.assertEquals(math.exp(NINF), 0.)
self.assert_(math.isnan(math.exp(NAN)))
def testFabs(self):
self.assertRaises(TypeError, math.fabs)
self.ftest('fabs(-1)', math.fabs(-1), 1)
self.ftest('fabs(0)', math.fabs(0), 0)
self.ftest('fabs(1)', math.fabs(1), 1)
def testFactorial(self):
def fact(n):
result = 1
for i in range(1, int(n)+1):
result *= i
return result
values = range(10) + [50, 100, 500]
random.shuffle(values)
for x in range(10):
for cast in (int, long, float):
self.assertEqual(math.factorial(cast(x)), fact(x), (x, fact(x), math.factorial(x)))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, math.pi)
def testFloor(self):
self.assertRaises(TypeError, math.floor)
# These types will be int in py3k.
self.assertEquals(float, type(math.floor(1)))
self.assertEquals(float, type(math.floor(1L)))
self.assertEquals(float, type(math.floor(1.0)))
self.ftest('floor(0.5)', math.floor(0.5), 0)
self.ftest('floor(1.0)', math.floor(1.0), 1)
self.ftest('floor(1.5)', math.floor(1.5), 1)
self.ftest('floor(-0.5)', math.floor(-0.5), -1)
self.ftest('floor(-1.0)', math.floor(-1.0), -1)
self.ftest('floor(-1.5)', math.floor(-1.5), -2)
# pow() relies on floor() to check for integers
# This fails on some platforms - so check it here
self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167)
self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167)
self.assertEquals(math.ceil(INF), INF)
self.assertEquals(math.ceil(NINF), NINF)
self.assert_(math.isnan(math.floor(NAN)))
class TestFloor(object):
def __float__(self):
return 42.3
class TestNoFloor(object):
pass
self.ftest('floor(TestFloor())', math.floor(TestFloor()), 42)
self.assertRaises(TypeError, math.floor, TestNoFloor())
t = TestNoFloor()
t.__floor__ = lambda *args: args
self.assertRaises(TypeError, math.floor, t)
self.assertRaises(TypeError, math.floor, t, 0)
def testFmod(self):
self.assertRaises(TypeError, math.fmod)
self.ftest('fmod(10,1)', math.fmod(10,1), 0)
self.ftest('fmod(10,0.5)', math.fmod(10,0.5), 0)
self.ftest('fmod(10,1.5)', math.fmod(10,1.5), 1)
self.ftest('fmod(-10,1)', math.fmod(-10,1), 0)
self.ftest('fmod(-10,0.5)', math.fmod(-10,0.5), 0)
self.ftest('fmod(-10,1.5)', math.fmod(-10,1.5), -1)
self.assert_(math.isnan(math.fmod(NAN, 1.)))
self.assert_(math.isnan(math.fmod(1., NAN)))
self.assert_(math.isnan(math.fmod(NAN, NAN)))
self.assertRaises(ValueError, math.fmod, 1., 0.)
self.assertRaises(ValueError, math.fmod, INF, 1.)
self.assertRaises(ValueError, math.fmod, NINF, 1.)
self.assertRaises(ValueError, math.fmod, INF, 0.)
self.assertEquals(math.fmod(3.0, INF), 3.0)
self.assertEquals(math.fmod(-3.0, INF), -3.0)
self.assertEquals(math.fmod(3.0, NINF), 3.0)
self.assertEquals(math.fmod(-3.0, NINF), -3.0)
self.assertEquals(math.fmod(0.0, 3.0), 0.0)
self.assertEquals(math.fmod(0.0, NINF), 0.0)
def testFrexp(self):
self.assertRaises(TypeError, math.frexp)
def testfrexp(name, (mant, exp), (emant, eexp)):
if abs(mant-emant) > eps or exp != eexp:
self.fail('%s returned %r, expected %r'%\
(name, (mant, exp), (emant,eexp)))
testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
testfrexp('frexp(0)', math.frexp(0), (0, 0))
testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
self.assertEquals(math.frexp(INF)[0], INF)
self.assertEquals(math.frexp(NINF)[0], NINF)
self.assert_(math.isnan(math.frexp(NAN)[0]))
def testFsum(self):
# math.fsum relies on exact rounding for correct operation.
# There's a known problem with IA32 floating-point that causes
# inexact rounding in some situations, and will cause the
# math.fsum tests below to fail; see issue #2937. On non IEEE
# 754 platforms, and on IEEE 754 platforms that exhibit the
# problem described in issue #2937, we simply skip the whole
# test.
if not float.__getformat__("double").startswith("IEEE"):
return
# on IEEE 754 compliant machines, both of the expressions
# below should round to 10000000000000002.0.
if 1e16+2.0 != 1e16+2.9999:
return
# Python version of math.fsum, for comparison. Uses a
# different algorithm based on frexp, ldexp and integer
# arithmetic.
from sys import float_info
mant_dig = float_info.mant_dig
etiny = float_info.min_exp - mant_dig
def msum(iterable):
"""Full precision summation. Compute sum(iterable) without any
intermediate accumulation of error. Based on the 'lsum' function
at http://code.activestate.com/recipes/393090/
"""
tmant, texp = 0, 0
for x in iterable:
mant, exp = math.frexp(x)
mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig
if texp > exp:
tmant <<= texp-exp
texp = exp
else:
mant <<= exp-texp
tmant += mant
# Round tmant * 2**texp to a float. The original recipe
# used float(str(tmant)) * 2.0**texp for this, but that's
# a little unsafe because str -> float conversion can't be
# relied upon to do correct rounding on all platforms.
tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp)
if tail > 0:
h = 1 << (tail-1)
tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1)
texp += tail
return math.ldexp(tmant, texp)
test_values = [
([], 0.0),
([0.0], 0.0),
([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100),
([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0),
([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0),
([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0),
([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0),
([1./n for n in range(1, 1001)],
float.fromhex('0x1.df11f45f4e61ap+2')),
([(-1.)**n/n for n in range(1, 1001)],
float.fromhex('-0x1.62a2af1bd3624p-1')),
([1.7**(i+1)-1.7**i for i in range(1000)] + [-1.7**1000], -1.0),
([1e16, 1., 1e-16], 10000000000000002.0),
([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0),
# exercise code for resizing partials array
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
]
for i, (vals, expected) in enumerate(test_values):
try:
actual = math.fsum(vals)
except OverflowError:
self.fail("test %d failed: got OverflowError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
except ValueError:
self.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
self.assertEqual(actual, expected)
from random import random, gauss, shuffle
for j in xrange(1000):
vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10
s = 0
for i in xrange(200):
v = gauss(0, random()) ** 7 - s
s += v
vals.append(v)
shuffle(vals)
s = msum(vals)
self.assertEqual(msum(vals), math.fsum(vals))
def testHypot(self):
self.assertRaises(TypeError, math.hypot)
self.ftest('hypot(0,0)', math.hypot(0,0), 0)
self.ftest('hypot(3,4)', math.hypot(3,4), 5)
self.assertEqual(math.hypot(NAN, INF), INF)
self.assertEqual(math.hypot(INF, NAN), INF)
self.assertEqual(math.hypot(NAN, NINF), INF)
self.assertEqual(math.hypot(NINF, NAN), INF)
self.assert_(math.isnan(math.hypot(1.0, NAN)))
self.assert_(math.isnan(math.hypot(NAN, -2.0)))
def testLdexp(self):
self.assertRaises(TypeError, math.ldexp)
self.ftest('ldexp(0,1)', math.ldexp(0,1), 0)
self.ftest('ldexp(1,1)', math.ldexp(1,1), 2)
self.ftest('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
self.ftest('ldexp(-1,1)', math.ldexp(-1,1), -2)
self.assertRaises(OverflowError, math.ldexp, 1., 1000000)
self.assertRaises(OverflowError, math.ldexp, -1., 1000000)
self.assertEquals(math.ldexp(1., -1000000), 0.)
self.assertEquals(math.ldexp(-1., -1000000), -0.)
self.assertEquals(math.ldexp(INF, 30), INF)
self.assertEquals(math.ldexp(NINF, -213), NINF)
self.assert_(math.isnan(math.ldexp(NAN, 0)))
# large second argument
for n in [10**5, 10L**5, 10**10, 10L**10, 10**20, 10**40]:
self.assertEquals(math.ldexp(INF, -n), INF)
self.assertEquals(math.ldexp(NINF, -n), NINF)
self.assertEquals(math.ldexp(1., -n), 0.)
self.assertEquals(math.ldexp(-1., -n), -0.)
self.assertEquals(math.ldexp(0., -n), 0.)
self.assertEquals(math.ldexp(-0., -n), -0.)
self.assert_(math.isnan(math.ldexp(NAN, -n)))
self.assertRaises(OverflowError, math.ldexp, 1., n)
self.assertRaises(OverflowError, math.ldexp, -1., n)
self.assertEquals(math.ldexp(0., n), 0.)
self.assertEquals(math.ldexp(-0., n), -0.)
self.assertEquals(math.ldexp(INF, n), INF)
self.assertEquals(math.ldexp(NINF, n), NINF)
self.assert_(math.isnan(math.ldexp(NAN, n)))
def testLog(self):
self.assertRaises(TypeError, math.log)
self.ftest('log(1/e)', math.log(1/math.e), -1)
self.ftest('log(1)', math.log(1), 0)
self.ftest('log(e)', math.log(math.e), 1)
self.ftest('log(32,2)', math.log(32,2), 5)
self.ftest('log(10**40, 10)', math.log(10**40, 10), 40)
self.ftest('log(10**40, 10**20)', math.log(10**40, 10**20), 2)
self.assertEquals(math.log(INF), INF)
self.assertRaises(ValueError, math.log, NINF)
self.assert_(math.isnan(math.log(NAN)))
def testLog1p(self):
self.assertRaises(TypeError, math.log1p)
self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
self.ftest('log1p(0)', math.log1p(0), 0)
self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
self.ftest('log1p(1)', math.log1p(1), math.log(2))
self.assertEquals(math.log1p(INF), INF)
self.assertRaises(ValueError, math.log1p, NINF)
self.assert_(math.isnan(math.log1p(NAN)))
n= 2**90
self.assertAlmostEquals(math.log1p(n), 62.383246250395075)
self.assertAlmostEquals(math.log1p(n), math.log1p(float(n)))
def testLog10(self):
self.assertRaises(TypeError, math.log10)
self.ftest('log10(0.1)', math.log10(0.1), -1)
self.ftest('log10(1)', math.log10(1), 0)
self.ftest('log10(10)', math.log10(10), 1)
self.assertEquals(math.log(INF), INF)
self.assertRaises(ValueError, math.log10, NINF)
self.assert_(math.isnan(math.log10(NAN)))
def testModf(self):
self.assertRaises(TypeError, math.modf)
def testmodf(name, (v1, v2), (e1, e2)):
if abs(v1-e1) > eps or abs(v2-e2):
self.fail('%s returned %r, expected %r'%\
(name, (v1,v2), (e1,e2)))
testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
self.assertEquals(math.modf(INF), (0.0, INF))
self.assertEquals(math.modf(NINF), (-0.0, NINF))
modf_nan = math.modf(NAN)
self.assert_(math.isnan(modf_nan[0]))
self.assert_(math.isnan(modf_nan[1]))
def testPow(self):
self.assertRaises(TypeError, math.pow)
self.ftest('pow(0,1)', math.pow(0,1), 0)
self.ftest('pow(1,0)', math.pow(1,0), 1)
self.ftest('pow(2,1)', math.pow(2,1), 2)
self.ftest('pow(2,-1)', math.pow(2,-1), 0.5)
self.assertEqual(math.pow(INF, 1), INF)
self.assertEqual(math.pow(NINF, 1), NINF)
self.assertEqual((math.pow(1, INF)), 1.)
self.assertEqual((math.pow(1, NINF)), 1.)
self.assert_(math.isnan(math.pow(NAN, 1)))
self.assert_(math.isnan(math.pow(2, NAN)))
self.assert_(math.isnan(math.pow(0, NAN)))
self.assertEqual(math.pow(1, NAN), 1)
# pow(0., x)
self.assertEqual(math.pow(0., INF), 0.)
self.assertEqual(math.pow(0., 3.), 0.)
self.assertEqual(math.pow(0., 2.3), 0.)
self.assertEqual(math.pow(0., 2.), 0.)
self.assertEqual(math.pow(0., 0.), 1.)
self.assertEqual(math.pow(0., -0.), 1.)
self.assertRaises(ValueError, math.pow, 0., -2.)
self.assertRaises(ValueError, math.pow, 0., -2.3)
self.assertRaises(ValueError, math.pow, 0., -3.)
self.assertRaises(ValueError, math.pow, 0., NINF)
self.assert_(math.isnan(math.pow(0., NAN)))
# pow(INF, x)
self.assertEqual(math.pow(INF, INF), INF)
self.assertEqual(math.pow(INF, 3.), INF)
self.assertEqual(math.pow(INF, 2.3), INF)
self.assertEqual(math.pow(INF, 2.), INF)
self.assertEqual(math.pow(INF, 0.), 1.)
self.assertEqual(math.pow(INF, -0.), 1.)
self.assertEqual(math.pow(INF, -2.), 0.)
self.assertEqual(math.pow(INF, -2.3), 0.)
self.assertEqual(math.pow(INF, -3.), 0.)
self.assertEqual(math.pow(INF, NINF), 0.)
self.assert_(math.isnan(math.pow(INF, NAN)))
# pow(-0., x)
self.assertEqual(math.pow(-0., INF), 0.)
self.assertEqual(math.pow(-0., 3.), -0.)
self.assertEqual(math.pow(-0., 2.3), 0.)
self.assertEqual(math.pow(-0., 2.), 0.)
self.assertEqual(math.pow(-0., 0.), 1.)
self.assertEqual(math.pow(-0., -0.), 1.)
self.assertRaises(ValueError, math.pow, -0., -2.)
self.assertRaises(ValueError, math.pow, -0., -2.3)
self.assertRaises(ValueError, math.pow, -0., -3.)
self.assertRaises(ValueError, math.pow, -0., NINF)
self.assert_(math.isnan(math.pow(-0., NAN)))
# pow(NINF, x)
self.assertEqual(math.pow(NINF, INF), INF)
self.assertEqual(math.pow(NINF, 3.), NINF)
self.assertEqual(math.pow(NINF, 2.3), INF)
self.assertEqual(math.pow(NINF, 2.), INF)
self.assertEqual(math.pow(NINF, 0.), 1.)
self.assertEqual(math.pow(NINF, -0.), 1.)
self.assertEqual(math.pow(NINF, -2.), 0.)
self.assertEqual(math.pow(NINF, -2.3), 0.)
self.assertEqual(math.pow(NINF, -3.), -0.)
self.assertEqual(math.pow(NINF, NINF), 0.)
self.assert_(math.isnan(math.pow(NINF, NAN)))
# pow(-1, x)
self.assertEqual(math.pow(-1., INF), 1.)
self.assertEqual(math.pow(-1., 3.), -1.)
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertEqual(math.pow(-1., 2.), 1.)
self.assertEqual(math.pow(-1., 0.), 1.)
self.assertEqual(math.pow(-1., -0.), 1.)
self.assertEqual(math.pow(-1., -2.), 1.)
self.assertRaises(ValueError, math.pow, -1., -2.3)
self.assertEqual(math.pow(-1., -3.), -1.)
self.assertEqual(math.pow(-1., NINF), 1.)
self.assert_(math.isnan(math.pow(-1., NAN)))
# pow(1, x)
self.assertEqual(math.pow(1., INF), 1.)
self.assertEqual(math.pow(1., 3.), 1.)
self.assertEqual(math.pow(1., 2.3), 1.)
self.assertEqual(math.pow(1., 2.), 1.)
self.assertEqual(math.pow(1., 0.), 1.)
self.assertEqual(math.pow(1., -0.), 1.)
self.assertEqual(math.pow(1., -2.), 1.)
self.assertEqual(math.pow(1., -2.3), 1.)
self.assertEqual(math.pow(1., -3.), 1.)
self.assertEqual(math.pow(1., NINF), 1.)
self.assertEqual(math.pow(1., NAN), 1.)
# pow(x, 0) should be 1 for any x
self.assertEqual(math.pow(2.3, 0.), 1.)
self.assertEqual(math.pow(-2.3, 0.), 1.)
self.assertEqual(math.pow(NAN, 0.), 1.)
self.assertEqual(math.pow(2.3, -0.), 1.)
self.assertEqual(math.pow(-2.3, -0.), 1.)
self.assertEqual(math.pow(NAN, -0.), 1.)
# pow(x, y) is invalid if x is negative and y is not integral
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertRaises(ValueError, math.pow, -15., -3.1)
# pow(x, NINF)
self.assertEqual(math.pow(1.9, NINF), 0.)
self.assertEqual(math.pow(1.1, NINF), 0.)
self.assertEqual(math.pow(0.9, NINF), INF)
self.assertEqual(math.pow(0.1, NINF), INF)
self.assertEqual(math.pow(-0.1, NINF), INF)
self.assertEqual(math.pow(-0.9, NINF), INF)
self.assertEqual(math.pow(-1.1, NINF), 0.)
self.assertEqual(math.pow(-1.9, NINF), 0.)
# pow(x, INF)
self.assertEqual(math.pow(1.9, INF), INF)
self.assertEqual(math.pow(1.1, INF), INF)
self.assertEqual(math.pow(0.9, INF), 0.)
self.assertEqual(math.pow(0.1, INF), 0.)
self.assertEqual(math.pow(-0.1, INF), 0.)
self.assertEqual(math.pow(-0.9, INF), 0.)
self.assertEqual(math.pow(-1.1, INF), INF)
self.assertEqual(math.pow(-1.9, INF), INF)
# pow(x, y) should work for x negative, y an integer
self.ftest('(-2.)**3.', math.pow(-2.0, 3.0), -8.0)
self.ftest('(-2.)**2.', math.pow(-2.0, 2.0), 4.0)
self.ftest('(-2.)**1.', math.pow(-2.0, 1.0), -2.0)
self.ftest('(-2.)**0.', math.pow(-2.0, 0.0), 1.0)
self.ftest('(-2.)**-0.', math.pow(-2.0, -0.0), 1.0)
self.ftest('(-2.)**-1.', math.pow(-2.0, -1.0), -0.5)
self.ftest('(-2.)**-2.', math.pow(-2.0, -2.0), 0.25)
self.ftest('(-2.)**-3.', math.pow(-2.0, -3.0), -0.125)
self.assertRaises(ValueError, math.pow, -2.0, -0.5)
self.assertRaises(ValueError, math.pow, -2.0, 0.5)
# the following tests have been commented out since they don't
# really belong here: the implementation of ** for floats is
# independent of the implemention of math.pow
#self.assertEqual(1**NAN, 1)
#self.assertEqual(1**INF, 1)
#self.assertEqual(1**NINF, 1)
#self.assertEqual(1**0, 1)
#self.assertEqual(1.**NAN, 1)
#self.assertEqual(1.**INF, 1)
#self.assertEqual(1.**NINF, 1)
#self.assertEqual(1.**0, 1)
def testRadians(self):
self.assertRaises(TypeError, math.radians)
self.ftest('radians(180)', math.radians(180), math.pi)
self.ftest('radians(90)', math.radians(90), math.pi/2)
self.ftest('radians(-45)', math.radians(-45), -math.pi/4)
def testSin(self):
self.assertRaises(TypeError, math.sin)
self.ftest('sin(0)', math.sin(0), 0)
self.ftest('sin(pi/2)', math.sin(math.pi/2), 1)
self.ftest('sin(-pi/2)', math.sin(-math.pi/2), -1)
try:
self.assert_(math.isnan(math.sin(INF)))
self.assert_(math.isnan(math.sin(NINF)))
except ValueError:
self.assertRaises(ValueError, math.sin, INF)
self.assertRaises(ValueError, math.sin, NINF)
self.assert_(math.isnan(math.sin(NAN)))
def testSinh(self):
self.assertRaises(TypeError, math.sinh)
self.ftest('sinh(0)', math.sinh(0), 0)
self.ftest('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
self.ftest('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
self.assertEquals(math.sinh(INF), INF)
self.assertEquals(math.sinh(NINF), NINF)
self.assert_(math.isnan(math.sinh(NAN)))
def testSqrt(self):
self.assertRaises(TypeError, math.sqrt)
self.ftest('sqrt(0)', math.sqrt(0), 0)
self.ftest('sqrt(1)', math.sqrt(1), 1)
self.ftest('sqrt(4)', math.sqrt(4), 2)
self.assertEquals(math.sqrt(INF), INF)
self.assertRaises(ValueError, math.sqrt, NINF)
self.assert_(math.isnan(math.sqrt(NAN)))
def testTan(self):
self.assertRaises(TypeError, math.tan)
self.ftest('tan(0)', math.tan(0), 0)
self.ftest('tan(pi/4)', math.tan(math.pi/4), 1)
self.ftest('tan(-pi/4)', math.tan(-math.pi/4), -1)
try:
self.assert_(math.isnan(math.tan(INF)))
self.assert_(math.isnan(math.tan(NINF)))
except:
self.assertRaises(ValueError, math.tan, INF)
self.assertRaises(ValueError, math.tan, NINF)
self.assert_(math.isnan(math.tan(NAN)))
def testTanh(self):
self.assertRaises(TypeError, math.tanh)
self.ftest('tanh(0)', math.tanh(0), 0)
self.ftest('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0)
self.ftest('tanh(inf)', math.tanh(INF), 1)
self.ftest('tanh(-inf)', math.tanh(NINF), -1)
self.assert_(math.isnan(math.tanh(NAN)))
# check that tanh(-0.) == -0. on IEEE 754 systems
if float.__getformat__("double").startswith("IEEE"):
self.assertEqual(math.tanh(-0.), -0.)
self.assertEqual(math.copysign(1., math.tanh(-0.)),
math.copysign(1., -0.))
def test_trunc(self):
self.assertEqual(math.trunc(1), 1)
self.assertEqual(math.trunc(-1), -1)
self.assertEqual(type(math.trunc(1)), int)
self.assertEqual(type(math.trunc(1.5)), int)
self.assertEqual(math.trunc(1.5), 1)
self.assertEqual(math.trunc(-1.5), -1)
self.assertEqual(math.trunc(1.999999), 1)
self.assertEqual(math.trunc(-1.999999), -1)
self.assertEqual(math.trunc(-0.999999), -0)
self.assertEqual(math.trunc(-100.999), -100)
class TestTrunc(object):
def __trunc__(self):
return 23
class TestNoTrunc(object):
pass
self.assertEqual(math.trunc(TestTrunc()), 23)
self.assertRaises(TypeError, math.trunc)
self.assertRaises(TypeError, math.trunc, 1, 2)
# XXX: This is not ideal, but see the comment in math_trunc().
self.assertRaises(AttributeError, math.trunc, TestNoTrunc())
t = TestNoTrunc()
t.__trunc__ = lambda *args: args
self.assertEquals((), math.trunc(t))
self.assertRaises(TypeError, math.trunc, t, 0)
def testCopysign(self):
self.assertEqual(math.copysign(1, 42), 1.0)
self.assertEqual(math.copysign(0., 42), 0.0)
self.assertEqual(math.copysign(1., -42), -1.0)
self.assertEqual(math.copysign(3, 0.), 3.0)
self.assertEqual(math.copysign(4., -0.), -4.0)
def testIsnan(self):
self.assert_(math.isnan(float("nan")))
self.assert_(math.isnan(float("inf")* 0.))
self.failIf(math.isnan(float("inf")))
self.failIf(math.isnan(0.))
self.failIf(math.isnan(1.))
def testIsinf(self):
self.assert_(math.isinf(float("inf")))
self.assert_(math.isinf(float("-inf")))
self.assert_(math.isinf(1E400))
self.assert_(math.isinf(-1E400))
self.failIf(math.isinf(float("nan")))
self.failIf(math.isinf(0.))
self.failIf(math.isinf(1.))
# RED_FLAG 16-Oct-2000 Tim
# While 2.0 is more consistent about exceptions than previous releases, it
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
if verbose:
def test_exceptions(self):
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
self.fail("underflowing exp() should not have raised "
"an exception")
if x != 0:
self.fail("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
self.fail("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except ValueError:
pass
else:
self.fail("sqrt(-1) didn't raise ValueError")
def test_testfile(self):
if not float.__getformat__("double").startswith("IEEE"):
return
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
# Skip if either the input or result is complex, or if
# flags is nonempty
if ai != 0. or ei != 0. or flags:
continue
if fn in ['rect', 'polar']:
# no real versions of rect, polar
continue
func = getattr(math, fn)
try:
result = func(ar)
except ValueError:
message = ("Unexpected ValueError in " +
"test %s:%s(%r)\n" % (id, fn, ar))
self.fail(message)
except OverflowError:
message = ("Unexpected OverflowError in " +
"test %s:%s(%r)\n" % (id, fn, ar))
self.fail(message)
self.ftest("%s:%s(%r)" % (id, fn, ar), result, er)
def test_main():
from doctest import DocFileSuite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MathTests))
suite.addTest(DocFileSuite("ieee754.txt"))
run_unittest(suite)
if __name__ == '__main__':
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import unittest
from pizco.protocol import Protocol
class TestProtocol(unittest.TestCase):
def test_protocol(self):
prot = Protocol()
self.assertRaises(ValueError, prot.parse, [])
msg = prot.format('friend', 'bla', 'here goes the content')
sender, topic, content, msgid = prot.parse(msg)
self.assertEqual(sender, 'friend')
self.assertEqual(topic, 'bla')
self.assertEqual(content, 'here goes the content')
real_id = msg[1]
msg[1] = 'newid'.encode('utf-8')
self.assertRaises(ValueError, prot.parse, msg, check_msgid='wrong id')
self.assertRaises(ValueError, prot.parse, msg, check_sender='another')
msg[-1] = 'fake signature'.encode('utf-8')
msg[1] = real_id
self.assertEqual(sender, 'friend')
self.assertEqual(topic, 'bla')
self.assertEqual(content, 'here goes the content')
def test_protocol_key(self):
prot = Protocol(hmac_key='have a key')
msg = prot.format('friend', 'bla', 'here goes the content')
sender, topic, content, msgid = prot.parse(msg)
self.assertEqual(sender, 'friend')
self.assertEqual(topic, 'bla')
self.assertEqual(content, 'here goes the content')
real_id = msg[1]
msg[1] = 'newid'.encode('utf-8')
self.assertRaises(ValueError, prot.parse, msg, check_msgid='wrong id')
self.assertRaises(ValueError, prot.parse, msg, check_sender='another')
msg[-1] = 'fake signature'.encode('utf-8')
msg[1] = real_id
self.assertRaises(ValueError, prot.parse, msg)
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#ifndef REFS_H
#define REFS_H
#include "commit.h"
#include "repository.h"
#include "repo-settings.h"
struct fsck_options;
struct object_id;
struct ref_store;
struct strbuf;
struct string_list;
struct string_list_item;
struct worktree;
enum ref_storage_format ref_storage_format_by_name(const char *name);
const char *ref_storage_format_to_name(enum ref_storage_format ref_storage_format);
enum ref_transaction_error {
/* Default error code */
REF_TRANSACTION_ERROR_GENERIC = -1,
/* Ref name conflict like A vs A/B */
REF_TRANSACTION_ERROR_NAME_CONFLICT = -2,
/* Ref to be created already exists */
REF_TRANSACTION_ERROR_CREATE_EXISTS = -3,
/* ref expected but doesn't exist */
REF_TRANSACTION_ERROR_NONEXISTENT_REF = -4,
/* Provided old_oid or old_target of reference doesn't match actual */
REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE = -5,
/* Provided new_oid or new_target is invalid */
REF_TRANSACTION_ERROR_INVALID_NEW_VALUE = -6,
/* Expected ref to be symref, but is a regular ref */
REF_TRANSACTION_ERROR_EXPECTED_SYMREF = -7,
/* Cannot create ref due to case-insensitive filesystem */
REF_TRANSACTION_ERROR_CASE_CONFLICT = -8,
};
/*
* Resolve a reference, recursively following symbolic references.
*
* Return the name of the non-symbolic reference that ultimately pointed
* at the resolved object name. The return value, if not NULL, is a
* pointer into either a static buffer or the input ref.
*
* If oid is non-NULL, store the referred-to object's name in it.
*
* If the reference cannot be resolved to an object, the behavior
* depends on the RESOLVE_REF_READING flag:
*
* - If RESOLVE_REF_READING is set, return NULL.
*
* - If RESOLVE_REF_READING is not set, clear oid and return the name of
* the last reference name in the chain, which will either be a non-symbolic
* reference or an undefined reference. If this is a prelude to
* "writing" to the ref, the return value is the name of the ref
* that will actually be created or changed.
*
* If the RESOLVE_REF_NO_RECURSE flag is passed, only resolves one
* level of symbolic reference. The value stored in oid for a symbolic
* reference will always be null_oid in this case, and the return
* value is the reference that the symref refers to directly.
*
* If flags is non-NULL, set the value that it points to the
* combination of REF_ISPACKED (if the reference was found among the
* packed references), REF_ISSYMREF (if the initial reference was a
* symbolic reference), REF_BAD_NAME (if the reference name is ill
* formed --- see RESOLVE_REF_ALLOW_BAD_NAME below), and REF_ISBROKEN
* (if the ref is malformed or has a bad name). See refs.h for more detail
* on each flag.
*
* If ref is not a properly-formatted, normalized reference, return
* NULL. If more than MAXDEPTH recursive symbolic lookups are needed,
* give up and return NULL.
*
* RESOLVE_REF_ALLOW_BAD_NAME allows resolving refs even when their
* name is invalid according to git-check-ref-format(1). If the name
* is bad then the value stored in oid will be null_oid and the two
* flags REF_ISBROKEN and REF_BAD_NAME will be set.
*
* Even with RESOLVE_REF_ALLOW_BAD_NAME, names that escape the refs/
* directory and do not consist of all caps and underscores cannot be
* resolved. The function returns NULL for such ref names.
* Caps and underscores refers to the pseudorefs, such as HEAD,
* FETCH_HEAD and friends, that all live outside of the refs/ directory.
*/
#define RESOLVE_REF_READING 0x01
#define RESOLVE_REF_NO_RECURSE 0x02
#define RESOLVE_REF_ALLOW_BAD_NAME 0x04
const char *refs_resolve_ref_unsafe(struct ref_store *refs,
const char *refname,
int resolve_flags,
struct object_id *oid,
int *flags);
char *refs_resolve_refdup(struct ref_store *refs,
const char *refname, int resolve_flags,
struct object_id *oid, int *flags);
int refs_read_ref_full(struct ref_store *refs, const char *refname,
int resolve_flags, struct object_id *oid, int *flags);
int refs_read_ref(struct ref_store *refs, const char *refname, struct object_id *oid);
#define NOT_A_SYMREF -2
/*
* Read the symbolic ref named "refname" and write its immediate referent into
* the provided buffer. Referent is left empty if "refname" is not a symbolic
* ref. It does not resolve the symbolic reference recursively in case the
* target is also a symbolic ref.
*
* Returns 0 on success, -2 if the "refname" is not a symbolic ref,
* -1 otherwise.
*/
int refs_read_symbolic_ref(struct ref_store *ref_store, const char *refname,
struct strbuf *referent);
/*
* Return 0 if a reference named refname could be created without
* conflicting with the name of an existing reference. Otherwise,
* return a negative value and write an explanation to err. If extras
* is non-NULL, it is a list of additional refnames with which refname
* is not allowed to conflict. If skip is non-NULL, ignore potential
* conflicts with refs in skip (e.g., because they are scheduled for
* deletion in the same operation). Behavior is undefined if the same
* name is listed in both extras and skip.
*
* Two reference names conflict if one of them exactly matches the
* leading components of the other; e.g., "foo/bar" conflicts with
* both "foo" and with "foo/bar/baz" but not with "foo/bar" or
* "foo/barbados".
*
* If `initial_transaction` is truish, then all collision checks with
* preexisting refs are skipped.
*
* extras and skip must be sorted.
*/
enum ref_transaction_error refs_verify_refname_available(struct ref_store *refs,
const char *refname,
const struct string_list *extras,
const struct string_list *skip,
unsigned int initial_transaction,
struct strbuf *err);
int refs_ref_exists(struct ref_store *refs, const char *refname);
int should_autocreate_reflog(enum log_refs_config log_all_ref_updates,
const char *refname);
int is_branch(const char *refname);
#define REF_STORE_CREATE_ON_DISK_IS_WORKTREE (1 << 0)
int ref_store_create_on_disk(struct ref_store *refs, int flags, struct strbuf *err);
/*
* Release all memory and resources associated with the ref store.
*/
void ref_store_release(struct ref_store *ref_store);
/*
* Remove the ref store from disk. This deletes all associated data.
*/
int ref_store_remove_on_disk(struct ref_store *refs, struct strbuf *err);
/*
* Return the peeled value of the oid currently being iterated via
* for_each_ref(), etc. This is equivalent to calling:
*
* peel_object(r, oid, &peeled);
*
* with the "oid" value given to the each_ref_fn callback, except
* that some ref storage may be able to answer the query without
* actually loading the object in memory.
*/
int peel_iterated_oid(struct repository *r,
const struct object_id *base, struct object_id *peeled);
/**
* Resolve refname in the nested "gitlink" repository in the specified
* submodule (which must be non-NULL). If the resolution is
* successful, return 0 and set oid to the name of the object;
* otherwise, return a non-zero value.
*/
int repo_resolve_gitlink_ref(struct repository *r,
const char *submodule, const char *refname,
struct object_id *oid);
/*
* Return true iff abbrev_name is a possible abbreviation for
* full_name according to the rules defined by ref_rev_parse_rules in
* refs.c.
*/
int refname_match(const char *abbrev_name, const char *full_name);
/*
* Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
* the results to 'prefixes'
*/
struct strvec;
void expand_ref_prefix(struct strvec *prefixes, const char *prefix);
int expand_ref(struct repository *r, const char *str, int len, struct object_id *oid, char **ref);
int repo_dwim_ref(struct repository *r, const char *str, int len,
struct object_id *oid, char **ref, int nonfatal_dangling_mark);
int repo_dwim_log(struct repository *r, const char *str, int len, struct object_id *oid, char **ref);
/*
* Retrieves the default branch name for newly-initialized repositories.
*
* The return value is an allocated string.
*/
char *repo_default_branch_name(struct repository *r, int quiet);
/*
* Copy "name" to "sb", expanding any special @-marks as handled by
* repo_interpret_branch_name(). The result is a non-qualified branch name
* (so "foo" or "origin/master" instead of "refs/heads/foo" or
* "refs/remotes/origin/master").
*
* Note that the resulting name may not be a syntactically valid refname.
*
* If "allowed" is non-zero, restrict the set of allowed expansions. See
* repo_interpret_branch_name() for details.
*/
void copy_branchname(struct strbuf *sb, const char *name,
unsigned allowed);
/*
* Like copy_branchname() above, but confirm that the result is
* syntactically valid to be used as a local branch name in refs/heads/.
*
* The return value is "0" if the result is valid, and "-1" otherwise.
*/
int check_branch_ref(struct strbuf *sb, const char *name);
/*
* Similar for a tag name in refs/tags/.
*
* The return value is "0" if the result is valid, and "-1" otherwise.
*/
int check_tag_ref(struct strbuf *sb, const char *name);
/*
* A ref_transaction represents a collection of reference updates that
* should succeed or fail together.
*
* Calling sequence
* ----------------
*
* - Allocate and initialize a `struct ref_transaction` by calling
* `ref_transaction_begin()`.
*
* - Specify the intended ref updates by calling one or more of the
* following functions:
* - `ref_transaction_update()`
* - `ref_transaction_create()`
* - `ref_transaction_delete()`
* - `ref_transaction_verify()`
*
* - Then either:
*
* - Optionally call `ref_transaction_prepare()` to prepare the
* transaction. This locks all references, checks preconditions,
* etc. but doesn't finalize anything. If this step fails, the
* transaction has been closed and can only be freed. If this step
* succeeds, then `ref_transaction_commit()` is almost certain to
* succeed. However, you can still call `ref_transaction_abort()`
* if you decide not to commit the transaction after all.
*
* - Call `ref_transaction_commit()` to execute the transaction,
* make the changes permanent, and release all locks. If you
* haven't already called `ref_transaction_prepare()`, then
* `ref_transaction_commit()` calls it for you.
*
* Or
*
* - Call `ref_transaction_begin()` with REF_TRANSACTION_FLAG_INITIAL if the
* ref database is known to be empty and have no other writers (e.g. during
* clone). This is likely to be much faster than without the flag.
*
* - Then finally, call `ref_transaction_free()` to free the
* `ref_transaction` data structure.
*
* At any time before calling `ref_transaction_commit()`, you can call
* `ref_transaction_abort()` to abort the transaction, rollback any
* locks, and free any associated resources (including the
* `ref_transaction` data structure).
*
* Putting it all together, a complete reference update looks like
*
* struct ref_transaction *transaction;
* struct strbuf err = STRBUF_INIT;
* int ret = 0;
*
* transaction = ref_store_transaction_begin(refs, 0, &err);
* if (!transaction ||
* ref_transaction_update(...) ||
* ref_transaction_create(...) ||
* ...etc... ||
* ref_transaction_commit(transaction, &err)) {
* error("%s", err.buf);
* ret = -1;
* }
* ref_transaction_free(transaction);
* strbuf_release(&err);
* return ret;
*
* Error handling
* --------------
*
* On error, transaction functions append a message about what
* went wrong to the 'err' argument. The message mentions what
* ref was being updated (if any) when the error occurred so it
* can be passed to 'die' or 'error' as-is.
*
* The message is appended to err without first clearing err.
* err will not be '\n' terminated.
*
* Caveats
* -------
*
* Note that no locks are taken, and no refs are read, until
* `ref_transaction_prepare()` or `ref_transaction_commit()` is
* called. So, for example, `ref_transaction_verify()` won't report a
* verification failure until the commit is attempted.
*/
struct ref_transaction;
/*
* Bit values set in the flags argument passed to each_ref_fn() and
* stored in ref_iterator::flags. Other bits are for internal use
* only:
*/
enum reference_status {
/* Reference is a symbolic reference. */
REF_ISSYMREF = (1 << 0),
/* Reference is a packed reference. */
REF_ISPACKED = (1 << 1),
/*
* Reference cannot be resolved to an object name: dangling symbolic
* reference (directly or indirectly), corrupt reference file,
* reference exists but name is bad, or symbolic reference refers to
* ill-formatted reference name.
*/
REF_ISBROKEN = (1 << 2),
/*
* Reference name is not well formed.
*
* See git-check-ref-format(1) for the definition of well formed ref names.
*/
REF_BAD_NAME = (1 << 3),
};
/* A reference passed to `for_each_ref()`-style callbacks. */
struct reference {
/* The fully-qualified name of the reference. */
const char *name;
/* The target of a symbolic ref. `NULL` for direct references. */
const char *target;
/*
* The object ID of a reference. Either the direct object ID or the
* resolved object ID in the case of a symbolic ref. May be the zero
* object ID in case the symbolic ref cannot be resolved.
*/
const struct object_id *oid;
/*
* An optional peeled object ID. This field _may_ be set for tags in
* case the peeled value is present in the backend. Please refer to
* `reference_get_peeled_oid()`.
*/
const struct object_id *peeled_oid;
/* A bitfield of `enum reference_status` flags. */
unsigned flags;
};
/*
* Peel the tag to a non-tag commit. If present, this uses the peeled object ID
* exposed by the reference backend. Otherwise, the object is peeled via the
* object database, which is less efficient.
*
* Return `0` if the reference could be peeled, a negative error code
* otherwise.
*/
int reference_get_peeled_oid(struct repository *repo,
const struct reference *ref,
struct object_id *peeled_oid);
/*
* The signature for the callback function for the for_each_*()
* functions below. The memory pointed to by the `struct reference`
* argument is only guaranteed to be valid for the duration of a
* single callback invocation.
*/
typedef int each_ref_fn(const struct reference *ref, void *cb_data);
/*
* The following functions invoke the specified callback function for
* each reference indicated. If the function ever returns a nonzero
* value, stop the iteration and return that value. Please note that
* it is not safe to modify references while an iteration is in
* progress, unless the same callback function invocation that
* modifies the reference also returns a nonzero value to immediately
* stop the iteration. Returned references are sorted.
*/
int refs_head_ref(struct ref_store *refs,
each_ref_fn fn, void *cb_data);
int refs_for_each_ref(struct ref_store *refs,
each_ref_fn fn, void *cb_data);
int refs_for_each_ref_in(struct ref_store *refs, const char *prefix,
each_ref_fn fn, void *cb_data);
int refs_for_each_tag_ref(struct ref_store *refs,
each_ref_fn fn, void *cb_data);
int refs_for_each_branch_ref(struct ref_store *refs,
each_ref_fn fn, void *cb_data);
int refs_for_each_remote_ref(struct ref_store *refs,
each_ref_fn fn, void *cb_data);
int refs_for_each_replace_ref(struct ref_store *refs,
each_ref_fn fn, void *cb_data);
/*
* references matching any pattern in "exclude_patterns" are omitted from the
* result set on a best-effort basis.
*/
int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix,
const char **exclude_patterns,
each_ref_fn fn, void *cb_data);
/**
* iterate all refs in "patterns" by partitioning patterns into disjoint sets
* and iterating the longest-common prefix of each set.
*
* references matching any pattern in "exclude_patterns" are omitted from the
* result set on a best-effort basis.
*
* callers should be prepared to ignore references that they did not ask for.
*/
int refs_for_each_fullref_in_prefixes(struct ref_store *refs,
const char *namespace,
const char **patterns,
const char **exclude_patterns,
each_ref_fn fn, void *cb_data);
/* iterates all refs that match the specified glob pattern. */
int refs_for_each_glob_ref(struct ref_store *refs, each_ref_fn fn,
const char *pattern, void *cb_data);
int refs_for_each_glob_ref_in(struct ref_store *refs, each_ref_fn fn,
const char *pattern, const char *prefix, void *cb_data);
int refs_head_ref_namespaced(struct ref_store *refs, each_ref_fn fn, void *cb_data);
/*
* references matching any pattern in "exclude_patterns" are omitted from the
* result set on a best-effort basis.
*/
int refs_for_each_namespaced_ref(struct ref_store *refs,
const char **exclude_patterns,
each_ref_fn fn, void *cb_data);
/* can be used to learn about broken ref and symref */
int refs_for_each_rawref(struct ref_store *refs, each_ref_fn fn, void *cb_data);
int refs_for_each_rawref_in(struct ref_store *refs, const char *prefix,
each_ref_fn fn, void *cb_data);
/*
* Iterates over all refs including root refs, i.e. pseudorefs and HEAD.
*/
int refs_for_each_include_root_refs(struct ref_store *refs, each_ref_fn fn,
void *cb_data);
/*
* Normalizes partial refs to their fully qualified form.
* Will prepend <prefix> to the <pattern> if it doesn't start with 'refs/'.
* <prefix> will default to 'refs/' if NULL.
*
* item.string will be set to the result.
* item.util will be set to NULL if <pattern> contains glob characters, or
* non-NULL if it doesn't.
*/
void normalize_glob_ref(struct string_list_item *item, const char *prefix,
const char *pattern);
static inline const char *has_glob_specials(const char *pattern)
{
return strpbrk(pattern, "?*[");
}
void refs_warn_dangling_symrefs(struct ref_store *refs, FILE *fp,
const char *indent, int dry_run,
const struct string_list *refnames);
/*
* Flags for controlling behaviour of refs_optimize()
* REFS_OPTIMIZE_PRUNE: Prune loose refs after packing
* REFS_OPTIMIZE_AUTO: Pack refs on a best effort basis. The heuristics and end
* result are decided by the ref backend. Backends may ignore
* this flag and fall back to a normal repack.
*/
#define REFS_OPTIMIZE_PRUNE (1 << 0)
#define REFS_OPTIMIZE_AUTO (1 << 1)
struct refs_optimize_opts {
unsigned int flags;
struct ref_exclusions *exclusions;
struct string_list *includes;
};
/*
* Optimize the ref store. The exact behavior is up to the backend.
* For the files backend, this is equivalent to packing refs.
*/
int refs_optimize(struct ref_store *refs, struct refs_optimize_opts *opts);
/*
* Check if refs backend can be optimized by calling 'refs_optimize'.
*/
int refs_optimize_required(struct ref_store *ref_store,
struct refs_optimize_opts *opts,
bool *required);
/*
* Setup reflog before using. Fill in err and return -1 on failure.
*/
int refs_create_reflog(struct ref_store *refs, const char *refname,
struct strbuf *err);
/**
* Reads log for the value of ref during at_time (in which case "cnt" should be
* negative) or the reflog "cnt" entries from the top (in which case "at_time"
* should be 0).
*
* If we found the reflog entry in question, returns 0 (and details of the
* entry can be found in the out-parameters).
*
* If we ran out of reflog entries, the out-parameters are filled with the
* details of the oldest entry we did find, and the function returns 1. Note
* that there is one important special case here! If the reflog was empty
* and the caller asked for the 0-th cnt, we will return "1" but leave the
* "oid" field untouched.
**/
int read_ref_at(struct ref_store *refs,
const char *refname, unsigned int flags,
timestamp_t at_time, int cnt,
struct object_id *oid, char **msg,
timestamp_t *cutoff_time, int *cutoff_tz, int *cutoff_cnt);
/** Check if a particular reflog exists */
int refs_reflog_exists(struct ref_store *refs, const char *refname);
/*
* Delete the specified reference. If old_oid is non-NULL, then
* verify that the current value of the reference is old_oid before
* deleting it. If old_oid is NULL, delete the reference if it
* exists, regardless of its old value. It is an error for old_oid to
* be null_oid. msg and flags are passed through to
* ref_transaction_delete().
*/
int refs_delete_ref(struct ref_store *refs, const char *msg,
const char *refname,
const struct object_id *old_oid,
unsigned int flags);
/*
* Delete the specified references. If there are any problems, emit
* errors but attempt to keep going (i.e., the deletes are not done in
* an all-or-nothing transaction). msg and flags are passed through to
* ref_transaction_delete().
*/
int refs_delete_refs(struct ref_store *refs, const char *msg,
struct string_list *refnames, unsigned int flags);
/** Delete a reflog */
int refs_delete_reflog(struct ref_store *refs, const char *refname);
/*
* Callback to process a reflog entry found by the iteration functions (see
* below).
*
* The committer parameter is a single string, in the form
* "$GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>" (without double quotes).
*
* The timestamp parameter gives the time when entry was created as the number
* of seconds since the UNIX epoch.
*
* The tz parameter gives the timezone offset for the user who created
* the reflog entry, and its value gives a positive or negative offset
* from UTC. Its absolute value is formed by multiplying the hour
* part by 100 and adding the minute part. For example, 1 hour ahead
* of UTC, CET == "+0100", is represented as positive one hundred (not
* positive sixty).
*
* The msg parameter is a single complete line; a reflog message given
* to refs_delete_ref, refs_update_ref, etc. is returned to the
* callback normalized---each run of whitespaces are squashed into a
* single whitespace, trailing whitespace, if exists, is trimmed, and
* then a single LF is added at the end.
*
* The cb_data is a caller-supplied pointer given to the iterator
* functions.
*/
typedef int each_reflog_ent_fn(const char *refname,
struct object_id *old_oid,
struct object_id *new_oid,
const char *committer,
timestamp_t timestamp,
int tz, const char *msg,
void *cb_data);
/* Iterate over reflog entries in the log for `refname`. */
/* oldest entry first */
int refs_for_each_reflog_ent(struct ref_store *refs, const char *refname,
each_reflog_ent_fn fn, void *cb_data);
/* youngest entry first */
int refs_for_each_reflog_ent_reverse(struct ref_store *refs,
const char *refname,
each_reflog_ent_fn fn,
void *cb_data);
/*
* The signature for the callback function for the refs_for_each_reflog()
* functions below. The memory pointed to by the refname argument is only
* guaranteed to be valid for the duration of a single callback invocation.
*/
typedef int each_reflog_fn(const char *refname, void *cb_data);
/*
* Calls the specified function for each reflog file until it returns nonzero,
* and returns the value. Reflog file order is unspecified.
*/
int refs_for_each_reflog(struct ref_store *refs, each_reflog_fn fn, void *cb_data);
#define REFNAME_ALLOW_ONELEVEL 1
#define REFNAME_REFSPEC_PATTERN 2
/*
* Return 0 iff refname has the correct format for a refname according
* to the rules described in Documentation/git-check-ref-format.adoc.
* If REFNAME_ALLOW_ONELEVEL is set in flags, then accept one-level
* reference names. If REFNAME_REFSPEC_PATTERN is set in flags, then
* allow a single "*" wildcard character in the refspec. No leading or
* repeated slashes are accepted.
*/
int check_refname_format(const char *refname, int flags);
struct fsck_ref_report;
/*
* Perform generic checks for a specific direct ref. This function is
* expected to be called by the ref backends for every symbolic ref.
*/
int refs_fsck_ref(struct ref_store *refs, struct fsck_options *o,
struct fsck_ref_report *report,
const char *refname, const struct object_id *oid);
/*
* Perform generic checks for a specific symref target. This function is
* expected to be called by the ref backends for every symbolic ref.
*/
int refs_fsck_symref(struct ref_store *refs, struct fsck_options *o,
struct fsck_ref_report *report,
const char *refname, const char *target);
/*
* Check the reference database for consistency. Return 0 if refs and
* reflogs are consistent, and non-zero otherwise. The errors will be
* written to stderr.
*/
int refs_fsck(struct ref_store *refs, struct fsck_options *o,
struct worktree *wt);
/*
* Apply the rules from check_refname_format, but mutate the result until it
* is acceptable, and place the result in "out".
*/
void sanitize_refname_component(const char *refname, struct strbuf *out);
const char *prettify_refname(const char *refname);
char *refs_shorten_unambiguous_ref(struct ref_store *refs,
const char *refname, int strict);
/** rename ref, return 0 on success **/
int refs_rename_ref(struct ref_store *refs, const char *oldref,
const char *newref, const char *logmsg);
/** copy ref, return 0 on success **/
int refs_copy_existing_ref(struct ref_store *refs, const char *oldref,
const char *newref, const char *logmsg);
int refs_update_symref(struct ref_store *refs, const char *refname,
const char *target, const char *logmsg);
int refs_update_symref_extended(struct ref_store *refs, const char *refname,
const char *target, const char *logmsg,
struct strbuf *referent, int create_only);
enum action_on_err {
UPDATE_REFS_MSG_ON_ERR,
UPDATE_REFS_DIE_ON_ERR,
UPDATE_REFS_QUIET_ON_ERR
};
enum ref_transaction_flag {
/*
* The ref transaction is part of the initial creation of the ref store
* and can thus assume that the ref store is completely empty. This
* allows the backend to perform the transaction more efficiently by
* skipping certain checks.
*
* It is a bug to set this flag when there might be other processes
* accessing the repository or if there are existing references that
* might conflict with the ones being created. All old_oid values must
* either be absent or null_oid.
*/
REF_TRANSACTION_FLAG_INITIAL = (1 << 0),
/*
* The transaction mechanism by default fails all updates if any conflict
* is detected. This flag allows transactions to partially apply updates
* while rejecting updates which do not match the expected state.
*/
REF_TRANSACTION_ALLOW_FAILURE = (1 << 1),
};
/*
* Begin a reference transaction. The reference transaction must
* be freed by calling ref_transaction_free().
*/
struct ref_transaction *ref_store_transaction_begin(struct ref_store *refs,
unsigned int flags,
struct strbuf *err);
/*
* Reference transaction updates
*
* The following four functions add a reference check or update to a
* ref_transaction. They have some common similar parameters:
*
* transaction -- a pointer to an open ref_transaction, obtained
* from ref_transaction_begin().
*
* refname -- the name of the reference to be affected.
*
* new_oid -- the object ID that should be set to be the new value
* of the reference. Some functions allow this parameter to be
* NULL, meaning that the reference is not changed, or
* null_oid, meaning that the reference should be deleted. A
* copy of this value is made in the transaction.
*
* old_oid -- the object ID that the reference must have before
* the update. Some functions allow this parameter to be NULL,
* meaning that the old value of the reference is not checked,
* or null_oid, meaning that the reference must not exist
* before the update. A copy of this value is made in the
* transaction.
*
* new_target -- the target reference that the reference will be
* updated to point to. If the reference is a regular reference,
* it will be converted to a symbolic reference. Cannot be set
* together with `new_oid`. A copy of this value is made in the
* transaction.
*
* old_target -- the reference that the reference must be pointing to.
* Canont be set together with `old_oid`. A copy of this value is
* made in the transaction.
*
* flags -- flags affecting the update, passed to
* update_ref_lock(). Possible flags: REF_NO_DEREF,
* REF_FORCE_CREATE_REFLOG. See those constants for more
* information.
*
* msg -- a message describing the change (for the reflog).
*
* err -- a strbuf for receiving a description of any error that
* might have occurred.
*
* The functions make internal copies of refname and msg, so the
* caller retains ownership of these parameters.
*
* The functions return 0 on success and non-zero on failure. A
* failure means that the transaction as a whole has failed and needs
* to be rolled back.
*/
/*
* The following flags can be passed to ref_transaction_update() etc.
* Internally, they are stored in `ref_update::flags`, along with some
* internal flags.
*/
/*
* Act on the ref directly; i.e., without dereferencing symbolic refs.
* If this flag is not specified, then symbolic references are
* dereferenced and the update is applied to the referent.
*/
#define REF_NO_DEREF (1 << 0)
/*
* Force the creation of a reflog for this reference, even if it
* didn't previously have a reflog.
*/
#define REF_FORCE_CREATE_REFLOG (1 << 1)
/*
* Blindly write an object_id. This is useful for testing data corruption
* scenarios.
*/
#define REF_SKIP_OID_VERIFICATION (1 << 10)
/*
* Skip verifying refname. This is useful for testing data corruption scenarios.
*/
#define REF_SKIP_REFNAME_VERIFICATION (1 << 11)
/*
* Skip creation of a reflog entry, even if it would have otherwise been
* created.
*/
#define REF_SKIP_CREATE_REFLOG (1 << 12)
/*
* When writing a REF_LOG_ONLY record, use the old and new object IDs provided
* in the update instead of resolving the old object ID. The caller must also
* set both REF_HAVE_OLD and REF_HAVE_NEW.
*/
#define REF_LOG_USE_PROVIDED_OIDS (1 << 13)
/*
* Bitmask of all of the flags that are allowed to be passed in to
* ref_transaction_update() and friends:
*/
#define REF_TRANSACTION_UPDATE_ALLOWED_FLAGS \
(REF_NO_DEREF | REF_FORCE_CREATE_REFLOG | REF_SKIP_OID_VERIFICATION | \
REF_SKIP_REFNAME_VERIFICATION | REF_SKIP_CREATE_REFLOG | REF_LOG_USE_PROVIDED_OIDS)
/*
* Add a reference update to transaction. `new_oid` is the value that
* the reference should have after the update, or `null_oid` if it
* should be deleted. If `new_oid` is NULL, then the reference is not
* changed at all. `old_oid` is the value that the reference must have
* before the update, or `null_oid` if it must not have existed
* beforehand. The old value is checked after the lock is taken to
* prevent races. If the old value doesn't agree with old_oid, the
* whole transaction fails. If old_oid is NULL, then the previous
* value is not checked. If `old_target` is not NULL, treat the reference
* as a symbolic ref and validate that its target before the update is
* `old_target`. If the `new_target` is not NULL, then the reference
* will be updated to a symbolic ref which targets `new_target`.
* Together, these allow us to update between regular refs and symrefs.
*
* See the above comment "Reference transaction updates" for more
* information.
*/
int ref_transaction_update(struct ref_transaction *transaction,
const char *refname,
const struct object_id *new_oid,
const struct object_id *old_oid,
const char *new_target,
const char *old_target,
unsigned int flags, const char *msg,
struct strbuf *err);
/*
* Similar to `ref_transaction_update`, but this function is only for adding
* a reflog update. Supports providing custom committer information. The index
* field can be utiltized to order updates as desired. When set to zero, the
* updates default to being ordered by refname.
*/
int ref_transaction_update_reflog(struct ref_transaction *transaction,
const char *refname,
const struct object_id *new_oid,
const struct object_id *old_oid,
const char *committer_info,
const char *msg,
uint64_t index,
struct strbuf *err);
/*
* Add a reference creation to transaction. new_oid is the value that
* the reference should have after the update; it must not be
* null_oid. It is verified that the reference does not exist
* already.
*
* See the above comment "Reference transaction updates" for more
* information.
*/
int ref_transaction_create(struct ref_transaction *transaction,
const char *refname,
const struct object_id *new_oid,
const char *new_target,
unsigned int flags, const char *msg,
struct strbuf *err);
/*
* Add a reference deletion to transaction. If old_oid is non-NULL,
* then it holds the value that the reference should have had before
* the update (which must not be null_oid).
*
* See the above comment "Reference transaction updates" for more
* information.
*/
int ref_transaction_delete(struct ref_transaction *transaction,
const char *refname,
const struct object_id *old_oid,
const char *old_target,
unsigned int flags,
const char *msg,
struct strbuf *err);
/*
* Verify, within a transaction, that refname has the value old_oid,
* or, if old_oid is null_oid, then verify that the reference
* doesn't exist. old_oid must be non-NULL.
*
* See the above comment "Reference transaction updates" for more
* information.
*/
int ref_transaction_verify(struct ref_transaction *transaction,
const char *refname,
const struct object_id *old_oid,
const char *old_target,
unsigned int flags,
struct strbuf *err);
/*
* Perform the preparatory stages of committing `transaction`. Acquire
* any needed locks, check preconditions, etc.; basically, do as much
* as possible to ensure that the transaction will be able to go
* through, stopping just short of making any irrevocable or
* user-visible changes. The updates that this function prepares can
* be finished up by calling `ref_transaction_commit()` or rolled back
* by calling `ref_transaction_abort()`.
*
* On success, return 0 and leave the transaction in "prepared" state.
* On failure, abort the transaction, write an error message to `err`,
* and return one of the `TRANSACTION_*` constants.
*
* Callers who don't need such fine-grained control over committing
* reference transactions should just call `ref_transaction_commit()`.
*/
int ref_transaction_prepare(struct ref_transaction *transaction,
struct strbuf *err);
/*
* Commit all of the changes that have been queued in transaction, as
* atomically as possible. On success, return 0 and leave the
* transaction in "closed" state. On failure, roll back the
* transaction, write an error message to `err`, and return one of the
* `TRANSACTION_*` constants
*/
int ref_transaction_commit(struct ref_transaction *transaction,
struct strbuf *err);
/*
* Abort `transaction`, which has been begun and possibly prepared,
* but not yet committed.
*/
int ref_transaction_abort(struct ref_transaction *transaction,
struct strbuf *err);
/*
* Execute the given callback function for each of the reference updates which
* have been queued in the given transaction. `old_oid` and `new_oid` may be
* `NULL` pointers depending on whether the update has these object IDs set or
* not.
*/
typedef void ref_transaction_for_each_queued_update_fn(const char *refname,
const struct object_id *old_oid,
const struct object_id *new_oid,
void *cb_data);
void ref_transaction_for_each_queued_update(struct ref_transaction *transaction,
ref_transaction_for_each_queued_update_fn cb,
void *cb_data);
/*
* Execute the given callback function for each of the reference updates which
* have been rejected in the given transaction.
*/
typedef void ref_transaction_for_each_rejected_update_fn(const char *refname,
const struct object_id *old_oid,
const struct object_id *new_oid,
const char *old_target,
const char *new_target,
enum ref_transaction_error err,
const char *details,
void *cb_data);
void ref_transaction_for_each_rejected_update(struct ref_transaction *transaction,
ref_transaction_for_each_rejected_update_fn cb,
void *cb_data);
/*
* Translate errors to human readable error messages.
*/
const char *ref_transaction_error_msg(enum ref_transaction_error err);
/*
* Free `*transaction` and all associated data.
*/
void ref_transaction_free(struct ref_transaction *transaction);
/**
* Lock, update, and unlock a single reference. This function
* basically does a transaction containing a single call to
* ref_transaction_update(). The parameters to this function have the
* same meaning as the corresponding parameters to
* ref_transaction_update(). Handle errors as requested by the `onerr`
* argument.
*/
int refs_update_ref(struct ref_store *refs, const char *msg, const char *refname,
const struct object_id *new_oid, const struct object_id *old_oid,
unsigned int flags, enum action_on_err onerr);
int parse_hide_refs_config(const char *var, const char *value, const char *,
struct strvec *);
/*
* Check whether a ref is hidden. If no namespace is set, both the first and
* the second parameter point to the full ref name. If a namespace is set and
* the ref is inside that namespace, the first parameter is a pointer to the
* name of the ref with the namespace prefix removed. If a namespace is set and
* the ref is outside that namespace, the first parameter is NULL. The second
* parameter always points to the full ref name.
*/
int ref_is_hidden(const char *, const char *, const struct strvec *);
/*
* Returns an array of patterns to use as excluded_patterns, if none of the
* hidden references use the token '!' or '^'.
*/
const char **hidden_refs_to_excludes(const struct strvec *hide_refs);
/*
* Prefix all exclude patterns with the namespace, if any. This is required
* because exclude patterns apply to the stripped reference name, not the full
* reference name with the namespace.
*/
const char **get_namespaced_exclude_patterns(const char **exclude_patterns,
const char *namespace,
struct strvec *out);
/* Is this a per-worktree ref living in the refs/ namespace? */
int is_per_worktree_ref(const char *refname);
/* Describes how a refname relates to worktrees */
enum ref_worktree_type {
REF_WORKTREE_CURRENT, /* implicitly per worktree, eg. HEAD or
refs/bisect/SOMETHING */
REF_WORKTREE_MAIN, /* explicitly in main worktree, eg.
main-worktree/HEAD */
REF_WORKTREE_OTHER, /* explicitly in named worktree, eg.
worktrees/bla/HEAD */
REF_WORKTREE_SHARED, /* the default, eg. refs/heads/main */
};
/*
* Parse a `maybe_worktree_ref` as a ref that possibly refers to a worktree ref
* (ie. either REFNAME, main-worktree/REFNAME or worktree/WORKTREE/REFNAME). It
* returns what kind of ref was found, and in case of REF_WORKTREE_OTHER, the
* worktree name is returned in `worktree_name` (pointing into
* `maybe_worktree_ref`) and `worktree_name_length`. The bare refname (the
* refname stripped of prefixes) is returned in `bare_refname`. The
* `worktree_name`, `worktree_name_length` and `bare_refname` arguments may be
* NULL.
*/
enum ref_worktree_type parse_worktree_ref(const char *maybe_worktree_ref,
const char **worktree_name,
int *worktree_name_length,
const char **bare_refname);
enum expire_reflog_flags {
EXPIRE_REFLOGS_DRY_RUN = 1 << 0,
EXPIRE_REFLOGS_UPDATE_REF = 1 << 1,
EXPIRE_REFLOGS_REWRITE = 1 << 2,
};
/*
* The following interface is used for reflog expiration. The caller
* calls refs_reflog_expire(), supplying it with three callback functions,
* of the following types. The callback functions define the
* expiration policy that is desired.
*
* reflog_expiry_prepare_fn -- Called once after the reference is
* locked. Called with the OID of the locked reference.
*
* reflog_expiry_should_prune_fn -- Called once for each entry in the
* existing reflog. It should return true iff that entry should be
* pruned.
*
* reflog_expiry_cleanup_fn -- Called once before the reference is
* unlocked again.
*/
typedef void reflog_expiry_prepare_fn(const char *refname,
const struct object_id *oid,
void *cb_data);
typedef int reflog_expiry_should_prune_fn(struct object_id *ooid,
struct object_id *noid,
const char *email,
timestamp_t timestamp, int tz,
const char *message, void *cb_data);
typedef void reflog_expiry_cleanup_fn(void *cb_data);
/*
* Expire reflog entries for the specified reference.
* flags is a combination of the constants in
* enum expire_reflog_flags. The three function pointers are described
* above. On success, return zero.
*/
int refs_reflog_expire(struct ref_store *refs,
const char *refname,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
reflog_expiry_cleanup_fn cleanup_fn,
void *policy_cb_data);
struct ref_store *get_main_ref_store(struct repository *r);
/**
* Submodules
* ----------
*
* If you want to iterate the refs of a submodule you first need to add the
* submodules object database. You can do this by a code-snippet like
* this:
*
* const char *path = "path/to/submodule"
* if (add_submodule_odb(path))
* die("Error submodule '%s' not populated.", path);
*
* `add_submodule_odb()` will return zero on success. If you
* do not do this you will get an error for each ref that it does not point
* to a valid object.
*
* Note: As a side-effect of this you cannot safely assume that all
* objects you lookup are available in superproject. All submodule objects
* will be available the same way as the superprojects objects.
*
* Example:
* --------
*
* ----
* static int handle_remote_ref(const char *refname,
* const unsigned char *sha1, int flags, void *cb_data)
* {
* struct strbuf *output = cb_data;
* strbuf_addf(output, "%s\n", refname);
* return 0;
* }
*
*/
/*
* Return the ref_store instance for the specified submodule. For the
* main repository, use submodule==NULL; such a call cannot fail. For
* a submodule, the submodule must exist and be a nonbare repository,
* otherwise return NULL. If the requested reference store has not yet
* been initialized, initialize it first.
*
* For backwards compatibility, submodule=="" is treated the same as
* submodule==NULL.
*/
struct ref_store *repo_get_submodule_ref_store(struct repository *repo,
const char *submodule);
struct ref_store *get_worktree_ref_store(const struct worktree *wt);
/*
* Some of the names specified by refs have special meaning to Git.
* Organize these namespaces in a common 'ref_namespace' array for
* reference from multiple places in the codebase.
*/
struct ref_namespace_info {
const char *ref;
enum decoration_type decoration;
/*
* If 'exact' is true, then we must match the 'ref' exactly.
* Otherwise, use a prefix match.
*
* 'ref_updated' is for internal use. It represents whether the
* 'ref' value was replaced from its original literal version.
*/
unsigned exact:1,
ref_updated:1;
};
enum ref_namespace {
NAMESPACE_HEAD,
NAMESPACE_BRANCHES,
NAMESPACE_TAGS,
NAMESPACE_REMOTE_REFS,
NAMESPACE_STASH,
NAMESPACE_REPLACE,
NAMESPACE_NOTES,
NAMESPACE_PREFETCH,
NAMESPACE_REWRITTEN,
/* Must be last */
NAMESPACE__COUNT
};
/* See refs.c for the contents of this array. */
extern struct ref_namespace_info ref_namespace[NAMESPACE__COUNT];
/*
* Some ref namespaces can be modified by config values or environment
* variables. Modify a namespace as specified by its ref_namespace key.
*/
void update_ref_namespace(enum ref_namespace namespace, char *ref);
/*
* Check whether the provided name names a root reference. This function only
* performs a syntactic check.
*
* A root ref is a reference that lives in the root of the reference hierarchy.
* These references must conform to special syntax:
*
* - Their name must be all-uppercase or underscores ("_").
*
* - Their name must end with "_HEAD". As a special rule, "HEAD" is a root
* ref, as well.
*
* - Their name may not contain a slash.
*
* There is a special set of irregular root refs that exist due to historic
* reasons, only. This list shall not be expanded in the future:
*
* - AUTO_MERGE
*
* - BISECT_EXPECTED_REV
*
* - NOTES_MERGE_PARTIAL
*
* - NOTES_MERGE_REF
*
* - MERGE_AUTOSTASH
*/
int is_root_ref(const char *refname);
/*
* Pseudorefs are refs that have different semantics compared to
* "normal" refs. These refs can thus not be stored in the ref backend,
* but must always be accessed via the filesystem. The following refs
* are pseudorefs:
*
* - FETCH_HEAD may contain multiple object IDs, and each one of them
* carries additional metadata like where it came from.
*
* - MERGE_HEAD may contain multiple object IDs when merging multiple
* heads.
*
* Reading, writing or deleting references must consistently go either
* through the filesystem (pseudorefs) or through the reference
* backend (normal ones).
*/
int is_pseudo_ref(const char *refname);
/*
* The following flags can be passed to `repo_migrate_ref_storage_format()`:
*
* - REPO_MIGRATE_REF_STORAGE_FORMAT_DRYRUN: perform a dry-run migration
* without touching the main repository. The result will be written into a
* temporary ref storage directory.
*
* - REPO_MIGRATE_REF_STORAGE_FORMAT_SKIP_REFLOG: skip migration of reflogs.
*/
#define REPO_MIGRATE_REF_STORAGE_FORMAT_DRYRUN (1 << 0)
#define REPO_MIGRATE_REF_STORAGE_FORMAT_SKIP_REFLOG (1 << 1)
/*
* Migrate the ref storage format used by the repository to the
* specified one.
*/
int repo_migrate_ref_storage_format(struct repository *repo,
enum ref_storage_format format,
unsigned int flags,
struct strbuf *err);
/*
* Reference iterators
*
* A reference iterator encapsulates the state of an in-progress
* iteration over references. Create an instance of `struct
* ref_iterator` via one of the functions in this module.
*
* A freshly-created ref_iterator doesn't yet point at a reference. To
* advance the iterator, call ref_iterator_advance(). If successful,
* this sets the iterator's refname, oid, and flags fields to describe
* the next reference and returns ITER_OK. The data pointed at by
* refname and oid belong to the iterator; if you want to retain them
* after calling ref_iterator_advance() again or calling
* ref_iterator_free(), you must make a copy. When the iteration has
* been exhausted, ref_iterator_advance() releases any resources
* associated with the iteration, frees the ref_iterator object, and
* returns ITER_DONE. If you want to abort the iteration early, call
* ref_iterator_free(), which also frees the ref_iterator object and
* any associated resources. If there was an internal error advancing
* to the next entry, ref_iterator_advance() aborts the iteration,
* frees the ref_iterator, and returns ITER_ERROR.
*
* Putting it all together, a typical iteration looks like this:
*
* int ok;
* struct ref_iterator *iter = ...;
*
* while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
* if (want_to_stop_iteration()) {
* ok = ITER_DONE;
* break;
* }
*
* // Access information about the current reference:
* if (!(iter->flags & REF_ISSYMREF))
* printf("%s is %s\n", iter->refname, oid_to_hex(iter->oid));
* }
*
* if (ok != ITER_DONE)
* handle_error();
* ref_iterator_free(iter);
*/
struct ref_iterator;
/*
* These flags are passed to refs_ref_iterator_begin() (and do_for_each_ref(),
* which feeds it).
*/
enum do_for_each_ref_flags {
/*
* Include broken references in a do_for_each_ref*() iteration, which
* would normally be omitted. This includes both refs that point to
* missing objects (a true repository corruption), ones with illegal
* names (which we prefer not to expose to callers), as well as
* dangling symbolic refs (i.e., those that point to a non-existent
* ref; this is not a corruption, but as they have no valid oid, we
* omit them from normal iteration results).
*/
DO_FOR_EACH_INCLUDE_BROKEN = (1 << 0),
/*
* Only include per-worktree refs in a do_for_each_ref*() iteration.
* Normally this will be used with a files ref_store, since that's
* where all reference backends will presumably store their
* per-worktree refs.
*/
DO_FOR_EACH_PER_WORKTREE_ONLY = (1 << 1),
/*
* Omit dangling symrefs from output; this only has an effect with
* INCLUDE_BROKEN, since they are otherwise not included at all.
*/
DO_FOR_EACH_OMIT_DANGLING_SYMREFS = (1 << 2),
/*
* Include root refs i.e. HEAD and pseudorefs along with the regular
* refs.
*/
DO_FOR_EACH_INCLUDE_ROOT_REFS = (1 << 3),
};
/*
* Return an iterator that goes over each reference in `refs` for
* which the refname begins with prefix. If trim is non-zero, then
* trim that many characters off the beginning of each refname.
* The output is ordered by refname.
*/
struct ref_iterator *refs_ref_iterator_begin(
struct ref_store *refs,
const char *prefix, const char **exclude_patterns,
int trim, enum do_for_each_ref_flags flags);
/*
* Advance the iterator to the first or next item and return ITER_OK.
* If the iteration is exhausted, free the resources associated with
* the ref_iterator and return ITER_DONE. On errors, free the iterator
* resources and return ITER_ERROR. It is a bug to use ref_iterator or
* call this function again after it has returned ITER_DONE or
* ITER_ERROR.
*/
int ref_iterator_advance(struct ref_iterator *ref_iterator);
enum ref_iterator_seek_flag {
/*
* When the REF_ITERATOR_SEEK_SET_PREFIX flag is set, the iterator's prefix is
* updated to match the provided string, affecting all subsequent iterations. If
* not, the iterator seeks to the specified reference and clears any previously
* set prefix.
*/
REF_ITERATOR_SEEK_SET_PREFIX = (1 << 0),
};
/*
* Seek the iterator to the first reference matching the given seek string.
* The seek string is matched as a literal string, without regard for path
* separators. If seek is NULL or the empty string, seek the iterator to the
* first reference again.
*
* This function is expected to behave as if a new ref iterator has been
* created, but allows reuse of existing iterators for optimization.
*
* Returns 0 on success, a negative error code otherwise.
*/
int ref_iterator_seek(struct ref_iterator *ref_iterator, const char *refname,
unsigned int flags);
/* Free the reference iterator and any associated resources. */
void ref_iterator_free(struct ref_iterator *ref_iterator);
/*
* The common backend for the for_each_*ref* functions. Call fn for
* each reference in iter. If the iterator itself ever returns
* ITER_ERROR, return -1. If fn ever returns a non-zero value, stop
* the iteration and return that value. Otherwise, return 0. In any
* case, free the iterator when done. This function is basically an
* adapter between the callback style of reference iteration and the
* iterator style.
*/
int do_for_each_ref_iterator(struct ref_iterator *iter,
each_ref_fn fn, void *cb_data);
#endif /* REFS_H */
|
c
|
github
|
https://github.com/git/git
|
refs.h
|
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, basestring), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit, e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
sys.stderr.write(e.args[0] + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in signal.__dict__.items():
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
require __DIR__ . '/../vendor/autoload.php';
Composer\Util\Platform::putEnv('COMPOSER_TESTS_ARE_RUNNING', '1');
return new Composer\Console\Application();
|
php
|
github
|
https://github.com/composer/composer
|
tests/console-application.php
|
'use strict';
const common = require('../common.js');
const querystring = require('querystring');
const searchParams = common.searchParams;
const bench = common.createBenchmark(main, {
searchParam: Object.keys(searchParams),
method: ['legacy', 'whatwg'],
n: [1e6],
});
function useLegacy(n, input, prop) {
const obj = querystring.parse(input);
querystring.stringify(obj);
bench.start();
for (let i = 0; i < n; i += 1) {
querystring.stringify(obj);
}
bench.end(n);
}
function useWHATWG(n, param, prop) {
const obj = new URLSearchParams(param);
obj.toString();
bench.start();
for (let i = 0; i < n; i += 1) {
obj.toString();
}
bench.end(n);
}
function main({ searchParam, n, method }) {
const param = searchParams[searchParam];
if (!param) {
throw new Error(`Unknown search parameter type "${searchParam}"`);
}
switch (method) {
case 'legacy':
useLegacy(n, param);
break;
case 'whatwg':
useWHATWG(n, param);
break;
default:
throw new Error(`Unknown method ${method}`);
}
}
|
javascript
|
github
|
https://github.com/nodejs/node
|
benchmark/url/legacy-vs-whatwg-url-searchparams-serialize.js
|
from __future__ import annotations
from datetime import (
datetime,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Self,
TypeVar,
cast,
overload,
)
import warnings
import numpy as np
from pandas._config import using_string_dtype
from pandas._config.config import get_option
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
astype_overflowsafe,
fields,
get_resolution,
get_supported_dtype,
get_unit_from_dtype,
ints_to_pydatetime,
is_date_array_normalized,
is_supported_dtype,
is_unitless,
normalize_i8_timestamps,
timezones,
to_offset,
tz_convert_from_utc,
tzconversion,
)
from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit
from pandas.errors import PerformanceWarning
from pandas.util._decorators import set_module
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_float_dtype,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
PeriodDtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays._ranges import generate_regular_range
import pandas.core.common as com
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
Day,
Tick,
)
if TYPE_CHECKING:
from collections.abc import (
Callable,
Generator,
Iterator,
)
from pandas._typing import (
ArrayLike,
DateTimeErrorChoices,
DtypeObj,
IntervalClosedType,
TimeAmbiguous,
TimeNonexistent,
TimeUnit,
npt,
)
from pandas import (
DataFrame,
Timedelta,
)
from pandas.core.arrays import PeriodArray
_TimestampNoneT1 = TypeVar("_TimestampNoneT1", Timestamp, None)
_TimestampNoneT2 = TypeVar("_TimestampNoneT2", Timestamp, None)
_ITER_CHUNKSIZE = 10_000
@overload
def tz_to_dtype(tz: tzinfo, unit: TimeUnit = ...) -> DatetimeTZDtype: ...
@overload
def tz_to_dtype(tz: None, unit: TimeUnit = ...) -> np.dtype[np.datetime64]: ...
def tz_to_dtype(
tz: tzinfo | None, unit: TimeUnit = "ns"
) -> np.dtype[np.datetime64] | DatetimeTZDtype:
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
unit : str, default "ns"
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return np.dtype(f"M8[{unit}]")
else:
return DatetimeTZDtype(tz=tz, unit=unit)
def _field_accessor(name: str, field: str, docstring: str | None = None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", month_kw))
if freq is not None:
freq_name = freq.name
else:
freq_name = None
result = fields.get_start_end_field(
values, field, freq_name, month_kw, reso=self._creso
)
else:
result = fields.get_date_field(values, field, reso=self._creso)
# these return a boolean by-definition
return result
result = fields.get_date_field(values, field, reso=self._creso)
result = self._maybe_mask_results(result, fill_value=None, convert="float64")
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
@set_module("pandas.arrays")
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
data : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
See Also
--------
DatetimeIndex : Immutable Index for datetime-like data.
Series : One-dimensional labeled array capable of holding datetime-like data.
Timestamp : Pandas replacement for python datetime.datetime object.
to_datetime : Convert argument to datetime.
period_range : Return a fixed frequency PeriodIndex.
Examples
--------
>>> pd.arrays.DatetimeArray._from_sequence(
... pd.DatetimeIndex(["2023-01-01", "2023-01-02"], freq="D")
... )
<DatetimeArray>
['2023-01-01 00:00:00', '2023-01-02 00:00:00']
Length: 2, dtype: datetime64[us]
"""
_typ = "datetimearray"
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype: Callable[[DtypeObj], bool] = lambda x: lib.is_np_dtype(
x, "M"
) or isinstance(x, DatetimeTZDtype)
_infer_matches = ("datetime", "datetime64", "date")
@property
def _internal_fill_value(self) -> np.datetime64:
return np.datetime64("NaT", self.unit)
@property
def _scalar_type(self) -> type[Timestamp]:
return Timestamp
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = (
_field_ops + _bool_ops + _other_ops + ["unit", "freq", "tz"]
)
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
"as_unit",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype[np.datetime64] | DatetimeTZDtype
_freq: BaseOffset | None = None
@classmethod
def _validate_dtype(cls, values, dtype):
# used in TimeLikeOps.__init__
dtype = _validate_dt64_dtype(dtype)
_validate_dt64_dtype(values.dtype)
if isinstance(dtype, np.dtype):
if values.dtype != dtype:
raise ValueError("Values resolution does not match dtype.")
else:
vunit = np.datetime_data(values.dtype)[0]
if vunit != dtype.unit:
raise ValueError("Values resolution does not match dtype.")
return dtype
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls,
values: npt.NDArray[np.datetime64],
freq: BaseOffset | None = None,
dtype: np.dtype[np.datetime64] | DatetimeTZDtype = DT64NS_DTYPE,
) -> Self:
assert isinstance(values, np.ndarray)
assert dtype.kind == "M"
if isinstance(dtype, np.dtype):
assert dtype == values.dtype
assert not is_unitless(dtype)
else:
# DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC],
# then values.dtype should be M8[us].
assert dtype._creso == get_unit_from_dtype(values.dtype)
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False) -> Self:
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
*,
dtype=None,
copy: bool = False,
tz=lib.no_default,
freq: str | BaseOffset | lib.NoDefault | None = lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous: TimeAmbiguous = "raise",
) -> Self:
"""
A non-strict version of _from_sequence, called from DatetimeIndex.__new__.
"""
# if the user either explicitly passes tz=None or a tz-naive dtype, we
# disallows inferring a tz.
explicit_tz_none = tz is None
if tz is lib.no_default:
tz = None
else:
tz = timezones.maybe_get_tz(tz)
dtype = _validate_dt64_dtype(dtype)
# if dtype has an embedded tz, capture it
tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none)
unit = None
if dtype is not None:
unit = dtl.dtype_to_unit(dtype)
data, copy = dtl.ensure_arraylike_for_datetimelike(
data, copy, cls_name="DatetimeArray"
)
inferred_freq = None
if isinstance(data, DatetimeArray):
inferred_freq = data.freq
subarr, tz = _sequence_to_dt64(
data,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
out_unit=unit,
)
# We have to call this again after possibly inferring a tz above
_validate_tz_from_dtype(dtype, tz, explicit_tz_none)
if tz is not None and explicit_tz_none:
raise ValueError(
"Passed data is timezone-aware, incompatible with 'tz=None'. "
"Use obj.tz_localize(None) instead."
)
data_unit = np.datetime_data(subarr.dtype)[0]
data_unit = cast("TimeUnit", data_unit)
data_dtype = tz_to_dtype(tz, data_unit)
result = cls._simple_new(subarr, freq=inferred_freq, dtype=data_dtype)
if unit is not None and unit != result.unit:
# If unit was specified in user-passed dtype, cast to it here
# error: Argument 1 to "as_unit" of "TimelikeOps" has
# incompatible type "str"; expected "Literal['s', 'ms', 'us', 'ns']"
# [arg-type]
result = result.as_unit(unit) # type: ignore[arg-type]
validate_kwds = {"ambiguous": ambiguous}
result._maybe_pin_freq(freq, validate_kwds)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods: int | None,
freq,
tz=None,
normalize: bool = False,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
inclusive: IntervalClosedType = "both",
*,
unit: TimeUnit = "ns",
) -> Self:
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
if unit is not None:
if unit not in ["s", "ms", "us", "ns"]:
raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'")
else:
unit = "ns"
if start is not None:
start = start.as_unit(unit, round_ok=False)
if end is not None:
end = end.as_unit(unit, round_ok=False)
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start = _maybe_localize_point(start, freq, tz, ambiguous, nonexistent)
end = _maybe_localize_point(end, freq, tz, ambiguous, nonexistent)
if freq is not None:
# Offset handling:
# Ticks (fixed-duration like hours/minutes): keep tz; do absolute-time math.
# Other calendar offsets: drop tz; do naive wall time; localize once later
# so `ambiguous`/`nonexistent` are applied correctly.
if not isinstance(freq, Tick):
if start is not None and start.tz is not None:
start = start.tz_localize(None)
if end is not None and end.tz is not None:
end = end.tz_localize(None)
if isinstance(freq, (Tick, Day)):
i8values = generate_regular_range(start, end, periods, freq, unit=unit)
else:
xdr = _generate_range(
start=start, end=end, periods=periods, offset=freq, unit=unit
)
i8values = np.array([x._value for x in xdr], dtype=np.int64)
endpoint_tz = start.tz if start is not None else end.tz
if tz is not None and endpoint_tz is None:
if not timezones.is_utc(tz):
# short-circuit tz_localize_to_utc which would make
# an unnecessary copy with UTC but be a no-op.
creso = abbrev_to_npy_unit(unit)
i8values = tzconversion.tz_localize_to_utc(
i8values,
tz,
ambiguous=ambiguous,
nonexistent=nonexistent,
creso=creso,
)
# i8values is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent)
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent)
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
periods = cast(int, periods)
i8values = (
np.linspace(0, end._value - start._value, periods, dtype="int64")
+ start._value
)
if i8values.dtype != "i8":
# 2022-01-09 I (brock) am not sure if it is possible for this
# to overflow and cast to e.g. f8, but if it does we need to cast
i8values = i8values.astype("i8")
if start == end:
if not left_inclusive and not right_inclusive:
i8values = i8values[1:-1]
else:
start_i8 = Timestamp(start)._value
end_i8 = Timestamp(end)._value
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(i8values) and i8values[0] == start_i8:
i8values = i8values[1:]
if not right_inclusive and len(i8values) and i8values[-1] == end_i8:
i8values = i8values[:-1]
dt64_values = i8values.view(f"datetime64[{unit}]")
dtype = tz_to_dtype(tz, unit=unit)
return cls._simple_new(dt64_values, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value)
if value is NaT:
return np.datetime64(value._value, self.unit)
else:
return value.as_unit(self.unit, round_ok=False).asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other) -> None:
if other is NaT:
return
self._assert_tzawareness_compat(other)
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
# GH#42228
value = x.view("i8")
ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return the timezone.
This property returns the timezone information associated with the
datetime data. If the data is timezone-naive (i.e. has no timezone
information), it returns None.
Returns
-------
zoneinfo.ZoneInfo,, datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
See Also
--------
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.tz
datetime.timezone.utc
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(
... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
... )
>>> idx.tz
datetime.timezone.utc
""" # noqa: E501
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz, reso=self._creso)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None, copy=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype, copy=copy)
def __iter__(self) -> Iterator:
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = _ITER_CHUNKSIZE
chunks = (length // chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i],
tz=self.tz,
box="timestamp",
reso=self._creso,
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if dtype == self.dtype:
if copy:
return self.copy()
return self
elif isinstance(dtype, ExtensionDtype):
if not isinstance(dtype, DatetimeTZDtype):
# e.g. Sparse[datetime64[ns]]
return super().astype(dtype, copy=copy)
elif self.tz is None:
# pre-2.0 this did self.tz_localize(dtype.tz), which did not match
# the Series behavior which did
# values.tz_localize("UTC").tz_convert(dtype.tz)
raise TypeError(
"Cannot use .astype to convert from timezone-naive dtype to "
"timezone-aware dtype. Use obj.tz_localize instead or "
"series.dt.tz_localize instead"
)
else:
# tzaware unit conversion e.g. datetime64[s, UTC]
np_dtype = np.dtype(dtype.str)
res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy)
return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq)
elif (
self.tz is None
and lib.is_np_dtype(dtype, "M")
and not is_unitless(dtype)
and is_supported_dtype(dtype)
):
# unit conversion e.g. datetime64[s]
res_values = astype_overflowsafe(self._ndarray, dtype, copy=True)
return type(self)._simple_new(res_values, dtype=res_values.dtype)
# TODO: preserve freq?
elif self.tz is not None and lib.is_np_dtype(dtype, "M"):
# pre-2.0 behavior for DTA/DTI was
# values.tz_convert("UTC").tz_localize(None), which did not match
# the Series behavior
raise TypeError(
"Cannot use .astype to convert from timezone-aware dtype to "
"timezone-naive dtype. Use obj.tz_localize(None) or "
"obj.tz_convert('UTC').tz_localize(None) instead."
)
elif (
self.tz is None
and lib.is_np_dtype(dtype, "M")
and dtype != self.dtype
and is_unitless(dtype)
):
raise TypeError(
"Casting to unit-less dtype 'datetime64' is not supported. "
"Pass e.g. 'datetime64[ns]' instead."
)
elif isinstance(dtype, PeriodDtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
def _format_native_types(
self, *, na_rep: str | float = "NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
if date_format is None and self._is_dates_only:
# Only dates and no timezone: provide a default format
date_format = "%Y-%m-%d"
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=date_format, na_rep=na_rep, reso=self._creso
)
# -----------------------------------------------------------------
# Comparison Methods
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if isinstance(other_dtype, DatetimeTZDtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _add_offset(self, offset: BaseOffset) -> Self:
assert not isinstance(offset, Tick)
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
try:
res_values = offset._apply_array(values._ndarray)
if res_values.dtype.kind == "i":
# error: Argument 1 to "view" of "ndarray" has
# incompatible type
# "dtype[datetime64[date | int | None]] | DatetimeTZDtype";
# expected "dtype[Any] | _HasDType[dtype[Any]]" [arg-type]
res_values = res_values.view(values.dtype) # type: ignore[arg-type]
except NotImplementedError:
if get_option("performance_warnings"):
warnings.warn(
"Non-vectorized DateOffset being applied to Series or "
"DatetimeIndex.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
res_values = self.astype("O") + offset
result = type(self)._from_sequence(res_values, dtype=self.dtype)
else:
result = type(self)._simple_new(res_values, dtype=res_values.dtype)
if offset.normalize:
result = result.normalize()
result._freq = None
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> npt.NDArray[np.int64]:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
# Avoid the copy that would be made in tzconversion
return self.asi8
return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
def tz_convert(self, tz) -> Self:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
This method converts datetime values from their current timezone to a
different timezone. The underlying UTC time remains the same, only the
local time representation changes to reflect the target timezone.
Parameters
----------
tz : str, zoneinfo.ZoneInfo, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Datetme Array/Index with target `tz`.
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(
... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin"
... )
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[us, Europe/Berlin]', freq='h')
>>> dti.tz_convert("US/Central")
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[us, US/Central]', freq='h')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(
... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin"
... )
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[us, Europe/Berlin]', freq='h')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[us]', freq='h')
""" # noqa: E501
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz, unit=self.unit)
new_freq = None
if isinstance(self.freq, Tick):
new_freq = self.freq
return self._simple_new(self._ndarray, dtype=dtype, freq=new_freq)
@dtl.ravel_compat
def tz_localize(
self,
tz,
ambiguous: TimeAmbiguous = "raise",
nonexistent: TimeNonexistent = "raise",
) -> Self:
"""
Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, zoneinfo.ZoneInfo,, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise a ValueError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise a ValueError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[us]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[us, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[us]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[us, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[us, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00'], dtype="M8[ns]"))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1h'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
""" # noqa: E501
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8,
tz,
ambiguous=ambiguous,
nonexistent=nonexistent,
creso=self._creso,
)
new_dates_dt64 = new_dates.view(f"M8[{self.unit}]")
dtype = tz_to_dtype(tz, unit=self.unit)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates_dt64[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates_dt64, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return an ndarray of ``datetime.datetime`` objects.
Returns
-------
numpy.ndarray
An ndarray of ``datetime.datetime`` objects.
See Also
--------
DatetimeIndex.to_julian_date : Converts Datetime Array to float64 ndarray
of Julian Dates.
Examples
--------
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.to_pydatetime()
array([datetime.datetime(2018, 2, 27, 0, 0),
datetime.datetime(2018, 2, 28, 0, 0),
datetime.datetime(2018, 3, 1, 0, 0)], dtype=object)
"""
return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)
def normalize(self) -> Self:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(
... start="2014-08-01 10:00", freq="h", periods=3, tz="Asia/Calcutta"
... )
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[us, Asia/Calcutta]', freq='h')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[us, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso)
dt64_values = new_values.view(self._ndarray.dtype)
dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype)
dta = dta._with_freq("infer")
if self.tz is not None:
dta = dta.tz_localize(self.tz)
return dta
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/PeriodIndex at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/PeriodIndex.
Parameters
----------
freq : str or Period, optional
One of pandas' :ref:`period aliases <timeseries.period_aliases>`
or a Period object. Will be inferred by default.
Returns
-------
PeriodArray/PeriodIndex
Immutable ndarray holding ordinal values at a particular frequency.
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame(
... {"y": [1, 2, 3]},
... index=pd.to_datetime(
... [
... "2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00",
... ]
... ),
... )
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
stacklevel=find_stack_level(),
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if isinstance(self.freq, BaseOffset) and hasattr(
self.freq, "_period_dtype_code"
):
freq = PeriodDtype(self.freq)._freqstr
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None) -> npt.NDArray[np.object_]:
"""
Return the month names with specified locale.
This method returns the full name of the month (e.g., "January", "February")
for each datetime value in the Series/Index. The names can be localized
to different languages using the locale parameter.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale (``'en_US.utf8'``). Use the command
``locale -a`` on your terminal on Unix systems to find your locale
language code.
Returns
-------
Series or Index
Series or Index of month names.
See Also
--------
DatetimeIndex.day_name : Return the day names with specified locale.
Examples
--------
>>> s = pd.Series(pd.date_range(start="2018-01", freq="ME", periods=3))
>>> s
0 2018-01-31
1 2018-02-28
2 2018-03-31
dtype: datetime64[us]
>>> s.dt.month_name()
0 January
1 February
2 March
dtype: str
>>> idx = pd.date_range(start="2018-01", freq="ME", periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[us]', freq='ME')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='str')
Using the ``locale`` parameter you can set a different locale language,
for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month
names in Brazilian Portuguese language.
>>> idx = pd.date_range(start="2018-01", freq="ME", periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[us]', freq='ME')
>>> idx.month_name(locale="pt_BR.utf8") # doctest: +SKIP
Index(['Janeiro', 'Fevereiro', 'Março'], dtype='str')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(
values, "month_name", locale=locale, reso=self._creso
)
result = self._maybe_mask_results(result, fill_value=None)
if using_string_dtype():
from pandas import (
StringDtype,
array as pd_array,
)
return pd_array(result, dtype=StringDtype(na_value=np.nan)) # type: ignore[return-value]
return result
def day_name(self, locale=None) -> npt.NDArray[np.object_]:
"""
Return the day names with specified locale.
This method returns the full name of the day of the week (e.g., "Monday",
"Tuesday") for each datetime value in the Series/Index. The names can be
localized to different languages using the locale parameter.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale (``'en_US.utf8'``). Use the command
``locale -a`` on your terminal on Unix systems to find your locale
language code.
Returns
-------
Series or Index
Series or Index of day names.
See Also
--------
DatetimeIndex.month_name : Return the month names with specified locale.
Examples
--------
>>> s = pd.Series(pd.date_range(start="2018-01-01", freq="D", periods=3))
>>> s
0 2018-01-01
1 2018-01-02
2 2018-01-03
dtype: datetime64[us]
>>> s.dt.day_name()
0 Monday
1 Tuesday
2 Wednesday
dtype: str
>>> idx = pd.date_range(start="2018-01-01", freq="D", periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='str')
Using the ``locale`` parameter you can set a different locale language,
for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day
names in Brazilian Portuguese language.
>>> idx = pd.date_range(start="2018-01-01", freq="D", periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.day_name(locale="pt_BR.utf8") # doctest: +SKIP
Index(['Segunda', 'Terça', 'Quarta'], dtype='str')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(
values, "day_name", locale=locale, reso=self._creso
)
result = self._maybe_mask_results(result, fill_value=None)
if using_string_dtype():
# TODO: no tests that check for dtype of result as of 2024-08-15
from pandas import (
StringDtype,
array as pd_array,
)
return pd_array(result, dtype=StringDtype(na_value=np.nan)) # type: ignore[return-value]
return result
@property
def time(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of :class:`datetime.time` objects.
The time part of the Timestamps.
See Also
--------
DatetimeIndex.timetz : Returns numpy array of :class:`datetime.time`
objects with timezones. The time part of the Timestamps.
DatetimeIndex.date : Returns numpy array of python :class:`datetime.date`
objects. Namely, the date part of Timestamps without time and timezone
information.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.time
0 10:00:00
1 11:00:00
dtype: object
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(
... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
... )
>>> idx.time
array([datetime.time(10, 0), datetime.time(11, 0)], dtype=object)
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time", reso=self._creso)
@property
def timetz(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of :class:`datetime.time` objects with timezones.
The time part of the Timestamps.
See Also
--------
DatetimeIndex.time : Returns numpy array of :class:`datetime.time` objects.
The time part of the Timestamps.
DatetimeIndex.tz : Return the timezone.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.timetz
0 10:00:00+00:00
1 11:00:00+00:00
dtype: object
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(
... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
... )
>>> idx.timetz
array([datetime.time(10, 0, tzinfo=datetime.timezone.utc),
datetime.time(11, 0, tzinfo=datetime.timezone.utc)], dtype=object)
"""
return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso)
@property
def date(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of python :class:`datetime.date` objects.
Namely, the date part of Timestamps without time and
timezone information.
See Also
--------
DatetimeIndex.time : Returns numpy array of :class:`datetime.time` objects.
The time part of the Timestamps.
DatetimeIndex.year : The year of the datetime.
DatetimeIndex.month : The month as January=1, December=12.
DatetimeIndex.day : The day of the datetime.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.date
0 2020-01-01
1 2020-02-01
dtype: object
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(
... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
... )
>>> idx.date
array([datetime.date(2020, 1, 1), datetime.date(2020, 2, 1)], dtype=object)
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="date", reso=self._creso)
def isocalendar(self) -> DataFrame:
"""
Calculate year, week, and day according to the ISO 8601 standard.
The ISO 8601 standard defines the first week of the year as the week
containing the first Thursday. This method returns a DataFrame with
columns for the ISO year, ISO week number, and ISO day of week.
Returns
-------
DataFrame
With columns year, week and day.
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> idx = pd.date_range(start="2019-12-29", freq="D", periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
2019-12-30 2020 1 1
2019-12-31 2020 1 2
2020-01-01 2020 1 3
>>> idx.isocalendar().week
2019-12-29 52
2019-12-30 1
2019-12-31 1
2020-01-01 1
Freq: D, Name: week, dtype: UInt32
"""
from pandas import DataFrame
values = self._local_timestamps()
sarray = fields.build_isocalendar_sarray(values, reso=self._creso)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
if self._hasna:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
year = _field_accessor(
"year",
"Y",
"""
The year of the datetime.
This attribute returns the year component of each datetime value
in the Series/Index. The returned values are integers representing
the calendar year.
See Also
--------
DatetimeIndex.month: The month as January=1, December=12.
DatetimeIndex.day: The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="YE")
... )
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[us]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int32
""",
)
month = _field_accessor(
"month",
"M",
"""
The month as January=1, December=12.
This attribute returns the month component of each datetime value
in the Series/Index. Months are numbered from 1 (January) through
12 (December).
See Also
--------
DatetimeIndex.year: The year of the datetime.
DatetimeIndex.day: The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ME")
... )
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[us]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int32
""",
)
day = _field_accessor(
"day",
"D",
"""
The day of the datetime.
This attribute returns the day of the month component of each
datetime value in the Series/Index. Values range from 1 to 31,
depending on the month.
See Also
--------
DatetimeIndex.year: The year of the datetime.
DatetimeIndex.month: The month as January=1, December=12.
DatetimeIndex.hour: The hours of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="D")
... )
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[us]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int32
""",
)
hour = _field_accessor(
"hour",
"h",
"""
The hours of the datetime.
This attribute returns the hour component of each datetime value
in the Series/Index. Values range from 0 to 23, representing the
hour of the day in 24-hour format.
See Also
--------
DatetimeIndex.day: The day of the datetime.
DatetimeIndex.minute: The minutes of the datetime.
DatetimeIndex.second: The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="h")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[us]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int32
""",
)
minute = _field_accessor(
"minute",
"m",
"""
The minutes of the datetime.
This attribute returns the minute component of each datetime value
in the Series/Index. Values range from 0 to 59.
See Also
--------
DatetimeIndex.hour: The hours of the datetime.
DatetimeIndex.second: The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="min")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[us]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int32
""",
)
second = _field_accessor(
"second",
"s",
"""
The seconds of the datetime.
This attribute returns the second component of each datetime value
in the Series/Index. Values range from 0 to 59.
See Also
--------
DatetimeIndex.minute: The minutes of the datetime.
DatetimeIndex.microsecond: The microseconds of the datetime.
DatetimeIndex.nanosecond: The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="s")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[us]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int32
""",
)
microsecond = _field_accessor(
"microsecond",
"us",
"""
The microseconds of the datetime.
This attribute returns the microsecond component of each datetime
value in the Series/Index. Values range from 0 to 999999
(one microsecond is one millionth of a second).
See Also
--------
DatetimeIndex.second: The seconds of the datetime.
DatetimeIndex.nanosecond: The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="us")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[us]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int32
""",
)
nanosecond = _field_accessor(
"nanosecond",
"ns",
"""
The nanoseconds of the datetime.
This attribute returns the nanosecond component of each datetime
value in the Series/Index. Values range from 0 to 999
(one nanosecond is one billionth of a second).
See Also
--------
DatetimeIndex.second: The seconds of the datetime.
DatetimeIndex.microsecond: The microseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ns")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int32
""",
)
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int32
"""
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
dayofweek = day_of_week
weekday = day_of_week
day_of_year = _field_accessor(
"dayofyear",
"doy",
"""
The ordinal day of the year.
This attribute returns the day of the year for each datetime value
in the Series/Index. Values range from 1 to 365 (or 366 for leap years).
See Also
--------
DatetimeIndex.dayofweek : The day of the week with Monday=0, Sunday=6.
DatetimeIndex.day : The day of the datetime.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.dayofyear
0 1
1 32
dtype: int32
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
... "2/1/2020 11:00:00+00:00"])
>>> idx.dayofyear
Index([1, 32], dtype='int32')
""",
)
dayofyear = day_of_year
quarter = _field_accessor(
"quarter",
"q",
"""
The quarter of the date.
This attribute returns the quarter of the year for each datetime value
in the Series/Index. Quarter 1 includes January through March, quarter 2
includes April through June, quarter 3 includes July through September,
and quarter 4 includes October through December.
See Also
--------
DatetimeIndex.snap : Snap time stamps to nearest occurring frequency.
DatetimeIndex.time : Returns numpy array of datetime.time objects.
The time part of the Timestamps.
Examples
--------
For Series:
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "4/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-04-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.quarter
0 1
1 2
dtype: int32
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
... "2/1/2020 11:00:00+00:00"])
>>> idx.quarter
Index([1, 1], dtype='int32')
""",
)
days_in_month = _field_accessor(
"days_in_month",
"dim",
"""
The number of days in the month.
This attribute returns the total number of days in the month for each
datetime value in the Series/Index. The value depends on the month and
whether the year is a leap year (e.g., February has 29 days in a leap year).
See Also
--------
Series.dt.day : Return the day of the month.
Series.dt.is_month_end : Return a boolean indicating if the
date is the last day of the month.
Series.dt.is_month_start : Return a boolean indicating if the
date is the first day of the month.
Series.dt.month : Return the month as January=1 through December=12.
Examples
--------
>>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
>>> s = pd.to_datetime(s)
>>> s
0 2020-01-01 10:00:00+00:00
1 2020-02-01 11:00:00+00:00
dtype: datetime64[us, UTC]
>>> s.dt.daysinmonth
0 31
1 29
dtype: int32
""",
)
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
This boolean attribute evaluates to True if the date falls on the
{first_or_last} day of a calendar month, and False otherwise.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[us]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
)
is_month_end = _field_accessor(
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
)
is_quarter_start = _field_accessor(
"is_quarter_start",
"is_quarter_start",
"""
Indicator for whether the date is the first day of a quarter.
This boolean attribute evaluates to True if the date falls on the
first day of a calendar quarter (January 1, April 1, July 1, or
October 1), and False otherwise.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter end.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[us]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""",
)
is_quarter_end = _field_accessor(
"is_quarter_end",
"is_quarter_end",
"""
Indicator for whether the date is the last day of a quarter.
This boolean attribute evaluates to True if the date falls on the
last day of a calendar quarter (March 31, June 30, September 30, or
December 31), and False otherwise.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[us]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""",
)
is_year_start = _field_accessor(
"is_year_start",
"is_year_start",
"""
Indicate whether the date is the first day of a year.
This boolean attribute evaluates to True if the date is January 1st,
and False otherwise.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[us]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[us]', freq='D')
>>> idx.is_year_start
array([False, False, True])
This method, when applied to Series with datetime values under
the ``.dt`` accessor, will lose information about Business offsets.
>>> dates = pd.Series(pd.date_range("2020-10-30", periods=4, freq="BYS"))
>>> dates
0 2021-01-01
1 2022-01-03
2 2023-01-02
3 2024-01-01
dtype: datetime64[us]
>>> dates.dt.is_year_start
0 True
1 False
2 False
3 True
dtype: bool
>>> idx = pd.date_range("2020-10-30", periods=4, freq="BYS")
>>> idx
DatetimeIndex(['2021-01-01', '2022-01-03', '2023-01-02', '2024-01-01'],
dtype='datetime64[us]', freq='BYS-JAN')
>>> idx.is_year_start
array([ True, True, True, True])
""",
)
is_year_end = _field_accessor(
"is_year_end",
"is_year_end",
"""
Indicate whether the date is the last day of the year.
This boolean attribute evaluates to True if the date is December 31st,
and False otherwise.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[us]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[us]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""",
)
is_leap_year = _field_accessor(
"is_leap_year",
"is_leap_year",
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
See Also
--------
DatetimeIndex.is_year_end : Indicate whether the date is the
last day of the year.
DatetimeIndex.is_year_start : Indicate whether the date is the first
day of a year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="YE")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[us]', freq='YE-DEC')
>>> idx.is_leap_year
array([ True, False, False])
>>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[us]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""",
)
def to_julian_date(self) -> npt.NDArray[np.float64]:
"""
Convert Timestamp to a Julian Date.
This method returns the number of days as a float since noon January 1, 4713 BC.
https://en.wikipedia.org/wiki/Julian_day
Returns
-------
ndarray or Index
Float values that represent each date in Julian Calendar.
See Also
--------
Timestamp.to_julian_date : Equivalent method on ``Timestamp`` objects.
Examples
--------
>>> idx = pd.DatetimeIndex(["2028-08-12 00:54", "2028-08-12 02:06"])
>>> idx.to_julian_date()
Index([2461995.5375, 2461995.5875], dtype='float64')
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (
day
+ np.trunc((153 * month - 457) / 5)
+ 365 * year
+ np.floor(year / 4)
- np.floor(year / 100)
+ np.floor(year / 400)
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60
+ self.second / 3600
+ self.microsecond / 3600 / 10**6
+ self.nanosecond / 3600 / 10**9
)
/ 24
)
# -----------------------------------------------------------------
# Reductions
def _reduce(
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
):
result = super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs)
if keepdims and isinstance(result, np.ndarray):
if name == "std":
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray._from_sequence(result)
else:
return self._from_sequence(result, dtype=self.dtype)
return result
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
) -> Timedelta:
"""
Return sample standard deviation over requested axis.
Normalized by `N-1` by default. This can be changed using ``ddof``.
Parameters
----------
axis : int, optional
Axis for the function to be applied on. For :class:`pandas.Series`
this parameter is unused and defaults to ``None``.
dtype : dtype, optional, default None
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types
it is the same as the array type.
out : ndarray, optional, default None
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, default 1
Degrees of Freedom. The divisor used in calculations is `N - ddof`,
where `N` represents the number of elements.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array. If the default
value is passed, then keepdims will not be passed through to the
std method of sub-classes of ndarray, however any non-default value
will be. If the sub-class method does not implement keepdims any
exceptions will be raised.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is ``NA``, the result
will be ``NA``.
Returns
-------
Timedelta
Standard deviation over requested axis.
See Also
--------
numpy.ndarray.std : Returns the standard deviation of the array elements
along given axis.
Series.std : Return sample standard deviation over requested axis.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.std()
Timedelta('1 days 00:00:00')
"""
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
# Find the td64 dtype with the same resolution as our dt64 dtype
dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64")
dtype = np.dtype(dtype_str)
tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype)
return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna)
# -------------------------------------------------------------------
# Constructor Helpers
def _sequence_to_dt64(
data: ArrayLike,
*,
copy: bool = False,
tz: tzinfo | None = None,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous: TimeAmbiguous = "raise",
out_unit: str | None = None,
) -> tuple[np.ndarray, tzinfo | None]:
"""
Parameters
----------
data : np.ndarray or ExtensionArray
dtl.ensure_arraylike_for_datetimelike has already been called.
copy : bool, default False
tz : tzinfo or None, default None
dayfirst : bool, default False
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
out_unit : str or None, default None
Desired output resolution.
Returns
-------
result : numpy.ndarray
The sequence converted to a numpy array with dtype ``datetime64[unit]``.
Where `unit` is "ns" unless specified otherwise by `out_unit`.
tz : tzinfo or None
Either the user-provided tzinfo or one inferred from the data.
Raises
------
TypeError : PeriodDType data is passed
"""
# By this point we are assured to have either a numpy array or Index
data, copy = maybe_convert_dtype(data, copy, tz=tz)
data_dtype = getattr(data, "dtype", None)
out_dtype = DT64NS_DTYPE
if out_unit is not None:
out_dtype = np.dtype(f"M8[{out_unit}]")
if data_dtype == object or is_string_dtype(data_dtype):
# TODO: We do not have tests specific to string-dtypes,
# also complex or categorical or other extension
data = cast(np.ndarray, data)
copy = False
if lib.infer_dtype(data, skipna=False) == "integer":
# Much more performant than going through array_to_datetime
data = data.astype(np.int64)
elif tz is not None and ambiguous == "raise":
obj_data = np.asarray(data, dtype=object)
result = tslib.array_to_datetime_with_tz(
obj_data,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
creso=abbrev_to_npy_unit(out_unit),
)
return result, tz
else:
converted, inferred_tz = objects_to_datetime64(
data,
dayfirst=dayfirst,
yearfirst=yearfirst,
allow_object=False,
out_unit=out_unit,
)
copy = False
if tz and inferred_tz:
# two timezones: convert to intended from base UTC repr
# GH#42505 by convention, these are _already_ UTC
result = converted
elif inferred_tz:
tz = inferred_tz
result = converted
else:
result, _ = _construct_from_dt64_naive(
converted, tz=tz, copy=copy, ambiguous=ambiguous
)
return result, tz
data_dtype = data.dtype
# `data` may have originally been a Categorical[datetime64[ns, tz]],
# so we need to handle these types.
if isinstance(data_dtype, DatetimeTZDtype):
# DatetimeArray -> ndarray
data = cast(DatetimeArray, data)
tz = _maybe_infer_tz(tz, data.tz)
result = data._ndarray
elif lib.is_np_dtype(data_dtype, "M"):
# tz-naive DatetimeArray or ndarray[datetime64]
if isinstance(data, DatetimeArray):
data = data._ndarray
data = cast(np.ndarray, data)
result, copy = _construct_from_dt64_naive(
data, tz=tz, copy=copy, ambiguous=ambiguous
)
else:
# must be integer dtype otherwise
# assume this data are epoch timestamps
if data.dtype != INT64_DTYPE:
data = data.astype(np.int64, copy=False)
copy = False
data = cast(np.ndarray, data)
result = data.view(out_dtype)
if copy:
result = result.copy()
assert isinstance(result, np.ndarray), type(result)
assert result.dtype.kind == "M"
assert result.dtype != "M8"
assert is_supported_dtype(result.dtype)
return result, tz
def _construct_from_dt64_naive(
data: np.ndarray, *, tz: tzinfo | None, copy: bool, ambiguous: TimeAmbiguous
) -> tuple[np.ndarray, bool]:
"""
Convert datetime64 data to a supported dtype, localizing if necessary.
"""
# Caller is responsible for ensuring
# lib.is_np_dtype(data.dtype)
new_dtype = data.dtype
if not is_supported_dtype(new_dtype):
# Cast to the nearest supported unit, generally "s"
new_dtype = get_supported_dtype(new_dtype)
data = astype_overflowsafe(data, dtype=new_dtype, copy=False)
copy = False
if data.dtype.byteorder == ">":
# TODO: better way to handle this? non-copying alternative?
# without this, test_constructor_datetime64_bigendian fails
data = data.astype(data.dtype.newbyteorder("<"))
new_dtype = data.dtype
copy = False
if tz is not None:
# Convert tz-naive to UTC
# TODO: if tz is UTC, are there situations where we *don't* want a
# copy? tz_localize_to_utc always makes one.
shape = data.shape
if data.ndim > 1:
data = data.ravel()
data_unit = get_unit_from_dtype(new_dtype)
data = tzconversion.tz_localize_to_utc(
data.view("i8"), tz, ambiguous=ambiguous, creso=data_unit
)
data = data.view(new_dtype)
data = data.reshape(shape)
assert data.dtype == new_dtype, data.dtype
result = data
return result, copy
def objects_to_datetime64(
data: np.ndarray,
dayfirst,
yearfirst,
utc: bool = False,
errors: DateTimeErrorChoices = "raise",
allow_object: bool = False,
out_unit: str | None = None,
) -> tuple[np.ndarray, tzinfo | None]:
"""
Convert data to array of timestamps.
Parameters
----------
data : np.ndarray[object]
dayfirst : bool
yearfirst : bool
utc : bool, default False
Whether to convert/localize timestamps to UTC.
errors : {'raise', 'coerce'}
allow_object : bool
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
out_unit : str or None, default None
None indicates we should do resolution inference.
Returns
-------
result : ndarray
np.datetime64[out_unit] if returned values represent wall times or UTC
timestamps.
object if mixed timezones
inferred_tz : tzinfo or None
If not None, then the datetime64 values in `result` denote UTC timestamps.
Raises
------
ValueError : if data cannot be converted to datetimes
TypeError : When a type cannot be converted to datetime
"""
assert errors in ["raise", "coerce"]
# if str-dtype, convert
data = np.asarray(data, dtype=np.object_)
result, tz_parsed = tslib.array_to_datetime(
data,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
creso=abbrev_to_npy_unit(out_unit),
)
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return result, tz_parsed
elif result.dtype.kind == "M":
return result, tz_parsed
elif result.dtype == object:
# GH#23675 when called via `pd.to_datetime`, returning an object-dtype
# array is allowed. When called via `pd.DatetimeIndex`, we can
# only accept datetime64 dtype, so raise TypeError if object-dtype
# is returned, as that indicates the values can be recognized as
# datetimes but they have conflicting timezones/awareness
if allow_object:
return result, tz_parsed
raise TypeError("DatetimeIndex has mixed timezones")
else: # pragma: no cover
# GH#23675 this TypeError should never be hit, whereas the TypeError
# in the object-dtype branch above is reachable.
raise TypeError(result)
def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None):
"""
Convert data based on dtype conventions, issuing
errors where appropriate.
Parameters
----------
data : np.ndarray or pd.Index
copy : bool
tz : tzinfo or None, default None
Returns
-------
data : np.ndarray or pd.Index
copy : bool
Raises
------
TypeError : PeriodDType data is passed
"""
if not hasattr(data, "dtype"):
# e.g. collections.deque
return data, copy
if is_float_dtype(data.dtype):
# pre-2.0 we treated these as wall-times, inconsistent with ints
# GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes.
# Note: data.astype(np.int64) fails ARM tests, see
# https://github.com/pandas-dev/pandas/issues/49468.
data = data.astype(DT64NS_DTYPE).view("i8")
copy = False
elif lib.is_np_dtype(data.dtype, "m") or is_bool_dtype(data.dtype):
# GH#29794 enforcing deprecation introduced in GH#23539
raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]")
elif isinstance(data.dtype, PeriodDtype):
# Note: without explicitly raising here, PeriodIndex
# test_setops.test_join_does_not_recur fails
raise TypeError(
"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead"
)
elif isinstance(data.dtype, ExtensionDtype) and not isinstance(
data.dtype, DatetimeTZDtype
):
# TODO: We have no tests for these
data = np.array(data, dtype=np.object_)
copy = False
return data, copy
# -------------------------------------------------------------------
# Validation and Inference
def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None:
"""
If a timezone is inferred from data, check that it is compatible with
the user-provided timezone, if any.
Parameters
----------
tz : tzinfo or None
inferred_tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if both timezones are present but do not match
"""
if tz is None:
tz = inferred_tz
elif inferred_tz is None:
pass
elif not timezones.tz_compare(tz, inferred_tz):
raise TypeError(
f"data is already tz-aware {inferred_tz}, unable to set specified tz: {tz}"
)
return tz
def _validate_dt64_dtype(dtype):
"""
Check that a dtype, if passed, represents either a numpy datetime64[ns]
dtype or a pandas DatetimeTZDtype.
Parameters
----------
dtype : object
Returns
-------
dtype : None, numpy.dtype, or DatetimeTZDtype
Raises
------
ValueError : invalid dtype
Notes
-----
Unlike _validate_tz_from_dtype, this does _not_ allow non-existent
tz errors to go through
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if dtype == np.dtype("M8"):
# no precision, disallowed GH#24806
msg = (
"Passing in 'datetime64' dtype with no precision is not allowed. "
"Please pass in 'datetime64[ns]' instead."
)
raise ValueError(msg)
if (
isinstance(dtype, np.dtype)
and (dtype.kind != "M" or not is_supported_dtype(dtype))
) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)):
raise ValueError(
f"Unexpected value for 'dtype': '{dtype}'. "
"Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', "
"'datetime64[ns]' or DatetimeTZDtype'."
)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = cast(DatetimeTZDtype, dtype)
dtype = DatetimeTZDtype(
unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz)
)
return dtype
def _validate_tz_from_dtype(
dtype, tz: tzinfo | None, explicit_tz_none: bool = False
) -> tzinfo | None:
"""
If the given dtype is a DatetimeTZDtype, extract the implied
tzinfo object from it and check that it does not conflict with the given
tz.
Parameters
----------
dtype : dtype, str
tz : None, tzinfo
explicit_tz_none : bool, default False
Whether tz=None was passed explicitly, as opposed to lib.no_default.
Returns
-------
tz : consensus tzinfo
Raises
------
ValueError : on tzinfo mismatch
"""
if dtype is not None:
if isinstance(dtype, str):
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
except TypeError:
# Things like `datetime64[ns]`, which is OK for the
# constructors, but also nonsense, which should be validated
# but not by us. We *do* allow non-existent tz errors to
# go through
pass
dtz = getattr(dtype, "tz", None)
if dtz is not None:
if tz is not None and not timezones.tz_compare(tz, dtz):
raise ValueError("cannot supply both a tz and a dtype with a tz")
if explicit_tz_none:
raise ValueError("Cannot pass both a timezone-aware dtype and tz=None")
tz = dtz
if tz is not None and lib.is_np_dtype(dtype, "M"):
# We also need to check for the case where the user passed a
# tz-naive dtype (i.e. datetime64[ns])
if tz is not None and not timezones.tz_compare(tz, dtz):
raise ValueError(
"cannot supply both a tz and a "
"timezone-naive dtype (i.e. datetime64[ns])"
)
return tz
def _infer_tz_from_endpoints(
start: Timestamp, end: Timestamp, tz: tzinfo | None
) -> tzinfo | None:
"""
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end)
except AssertionError as err:
# infer_tzinfo raises AssertionError if passed mismatched timezones
raise TypeError(
"Start and end cannot both be tz-aware with different timezones"
) from err
inferred_tz = timezones.maybe_get_tz(inferred_tz)
tz = timezones.maybe_get_tz(tz)
if tz is not None and inferred_tz is not None:
if not timezones.tz_compare(inferred_tz, tz):
raise AssertionError("Inferred time zone not equal to passed time zone")
elif inferred_tz is not None:
tz = inferred_tz
return tz
def _maybe_normalize_endpoints(
start: _TimestampNoneT1, end: _TimestampNoneT2, normalize: bool
) -> tuple[_TimestampNoneT1, _TimestampNoneT2]:
if normalize:
if start is not None:
start = start.normalize()
if end is not None:
end = end.normalize()
return start, end
def _maybe_localize_point(
ts: Timestamp | None, freq, tz, ambiguous, nonexistent
) -> Timestamp | None:
"""
Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
freq : Tick, DateOffset, or None
tz : str, timezone object or None
ambiguous: str, localization behavior for ambiguous times
nonexistent: str, localization behavior for nonexistent times
Returns
-------
ts : Timestamp
"""
# Make sure start and end are timezone localized if:
# 1) freq = a Timedelta-like frequency (Tick)
# 2) freq = None i.e. generating a linspaced range
if ts is not None and ts.tzinfo is None:
# Note: We can't ambiguous='infer' a singular ambiguous time; however,
# we have historically defaulted ambiguous=False
ambiguous = ambiguous if ambiguous != "infer" else False
localize_args = {"ambiguous": ambiguous, "nonexistent": nonexistent, "tz": None}
if isinstance(freq, Tick) or freq is None:
localize_args["tz"] = tz
ts = ts.tz_localize(**localize_args)
return ts
def _generate_range(
start: Timestamp | None,
end: Timestamp | None,
periods: int | None,
offset: BaseOffset,
*,
unit: TimeUnit,
) -> Generator[Timestamp]:
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments.
Parameters
----------
start : Timestamp or None
end : Timestamp or None
periods : int or None
offset : DateOffset
unit : str
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
Returns
-------
dates : generator object
"""
offset = to_offset(offset)
# Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
# expected "Union[integer[Any], float, str, date, datetime64]"
start = Timestamp(start) # type: ignore[arg-type]
if start is not NaT:
start = start.as_unit(unit)
else:
start = None
# Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
# expected "Union[integer[Any], float, str, date, datetime64]"
end = Timestamp(end) # type: ignore[arg-type]
if end is not NaT:
end = end.as_unit(unit)
else:
end = None
if start and not offset.is_on_offset(start):
# Incompatible types in assignment (expression has type "datetime",
# variable has type "Optional[Timestamp]")
# GH #56147 account for negative direction and range bounds
if offset.n >= 0:
start = offset.rollforward(start) # type: ignore[assignment]
else:
start = offset.rollback(start) # type: ignore[assignment]
# Unsupported operand types for < ("Timestamp" and "None")
if periods is None and end < start and offset.n >= 0: # type: ignore[operator]
end = None
periods = 0
if end is None:
# error: No overload variant of "__radd__" of "BaseOffset" matches
# argument type "None"
end = start + (periods - 1) * offset # type: ignore[operator]
if start is None:
# error: No overload variant of "__radd__" of "BaseOffset" matches
# argument type "None"
start = end - (periods - 1) * offset # type: ignore[operator]
start = cast(Timestamp, start)
end = cast(Timestamp, end)
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
if cur == end:
# GH#24252 avoid overflows by not performing the addition
# in offset.apply unless we have to
break
# faster than cur + offset
next_date = offset._apply(cur)
next_date = next_date.as_unit(unit)
if next_date <= cur:
raise ValueError(f"Offset {offset} did not increment date")
cur = next_date
else:
while cur >= end:
yield cur
if cur == end:
# GH#24252 avoid overflows by not performing the addition
# in offset.apply unless we have to
break
# faster than cur + offset
next_date = offset._apply(cur)
next_date = next_date.as_unit(unit)
if next_date >= cur:
raise ValueError(f"Offset {offset} did not decrement date")
cur = next_date
|
python
|
github
|
https://github.com/pandas-dev/pandas
|
pandas/core/arrays/datetimes.py
|
import sys
#compatibility
try: input = raw_input
except NameError: pass
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
colors = {
'GREEN': bcolors.OKGREEN,
'BLUE': bcolors.OKBLUE,
'MAGENTA': bcolors.HEADER,
'PURPLE': bcolors.HEADER,
'YELLOW': bcolors.WARNING,
'RED': bcolors.FAIL,
'NONE': bcolors.ENDC
}
attribs = {
'BOLD' : bcolors.BOLD,
'UNDERLINE': bcolors.UNDERLINE,
}
exit_cond = lambda x: x in {'q', 'quit', 'leave', 'exit'}
def set_exit_cond(condition):
global exit_cond
exit_cond = condition
def get_char(s, char_list):
while( True ):
string = input(s)
if exit_cond(string):
return None
if string in char_list:
return string
def get_number(s, max_val=None):
while( True ):
try:
string = input(s)
if exit_cond(string):
return None
val = int(string)
if max_val is None or val <= max_val:
return val
except:
print ('Not a number. Try again')
def get_string(s):
string = input(s)
if exit_cond(string):
return None
return string
def get_word(s):
string = input(s)
if exit_cond(string):
return False
return True
def ask_addition_question(m, n):
for i in range(1, 4):
result = get_number(str(m) + ' + ' + str(n) + ' = ')
if result == None:
return -1
if result == (m+n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_multiplication_question(m, n):
for i in range(1, 4):
result = get_number(str(m) + ' x ' + str(n) + ' = ')
if result == None:
return -1
if result == (m*n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_subtraction_question(m, n):
for i in range(1, 4):
if m < n:
m, n = n, m
result = get_number(str(m) + ' - ' + str(n) + ' = ')
if result == None:
return -1
if result == (m-n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_word_question(word):
return get_word(' ' + word + ' ')
def write(text, color=None, *attrib):
prefix = ''
sufix = ''
if not color is None:
prefix += colors[color.upper()]
for at in attrib:
prefix += attribs[at.upper()]
if len(prefix) > 0:
sufix = colors['NONE']
print (prefix + text + sufix)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun <yunx.liu@intel.com>
import unittest
import os
import comm
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_update_name(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["name"] = "org.exa mple.test"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
comm.build(self, buildcmd)
comm.clear("org.xwalk.test")
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- test-case-name: twisted.test.test_tcp -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various asynchronous TCP/IP classes.
End users shouldn't use this module directly - use the reactor APIs instead.
Maintainer: Itamar Shtull-Trauring
"""
# System Imports
import os
import types
import socket
import sys
import operator
from zope.interface import implements, classImplements
try:
from OpenSSL import SSL
except ImportError:
SSL = None
from twisted.python.runtime import platformType
if platformType == 'win32':
# no such thing as WSAEPERM or error code 10001 according to winsock.h or MSDN
EPERM = object()
from errno import WSAEINVAL as EINVAL
from errno import WSAEWOULDBLOCK as EWOULDBLOCK
from errno import WSAEINPROGRESS as EINPROGRESS
from errno import WSAEALREADY as EALREADY
from errno import WSAECONNRESET as ECONNRESET
from errno import WSAEISCONN as EISCONN
from errno import WSAENOTCONN as ENOTCONN
from errno import WSAEINTR as EINTR
from errno import WSAENOBUFS as ENOBUFS
from errno import WSAEMFILE as EMFILE
# No such thing as WSAENFILE, either.
ENFILE = object()
# Nor ENOMEM
ENOMEM = object()
EAGAIN = EWOULDBLOCK
from errno import WSAECONNRESET as ECONNABORTED
from twisted.python.win32 import formatError as strerror
else:
from errno import EPERM
from errno import EINVAL
from errno import EWOULDBLOCK
from errno import EINPROGRESS
from errno import EALREADY
from errno import ECONNRESET
from errno import EISCONN
from errno import ENOTCONN
from errno import EINTR
from errno import ENOBUFS
from errno import EMFILE
from errno import ENFILE
from errno import ENOMEM
from errno import EAGAIN
from errno import ECONNABORTED
from os import strerror
from errno import errorcode
# Twisted Imports
from twisted.internet import defer, base, address, fdesc
from twisted.internet.task import deferLater
from twisted.python import log, failure, reflect
from twisted.python.util import unsignedID
from twisted.internet.error import CannotListenError
from twisted.internet import abstract, main, interfaces, error
class _SocketCloser:
_socketShutdownMethod = 'shutdown'
def _closeSocket(self):
# socket.close() doesn't *really* close if there's another reference
# to it in the TCP/IP stack, e.g. if it was was inherited by a
# subprocess. And we really do want to close the connection. So we
# use shutdown() instead, and then close() in order to release the
# filedescriptor.
skt = self.socket
try:
getattr(skt, self._socketShutdownMethod)(2)
except socket.error:
pass
try:
skt.close()
except socket.error:
pass
class _TLSMixin:
_socketShutdownMethod = 'sock_shutdown'
writeBlockedOnRead = 0
readBlockedOnWrite = 0
_userWantRead = _userWantWrite = True
def getPeerCertificate(self):
return self.socket.get_peer_certificate()
def doRead(self):
if self.disconnected:
# See the comment in the similar check in doWrite below.
# Additionally, in order for anything other than returning
# CONNECTION_DONE here to make sense, it will probably be necessary
# to implement a way to switch back to TCP from TLS (actually, if
# we did something other than return CONNECTION_DONE, that would be
# a big part of implementing that feature). In other words, the
# expectation is that doRead will be called when self.disconnected
# is True only when the connection has been lost. It's possible
# that the other end could stop speaking TLS and then send us some
# non-TLS data. We'll end up ignoring that data and dropping the
# connection. There's no unit tests for this check in the cases
# where it makes a difference. The test suite only hits this
# codepath when it would have otherwise hit the SSL.ZeroReturnError
# exception handler below, which has exactly the same behavior as
# this conditional. Maybe that's the only case that can ever be
# triggered, I'm not sure. -exarkun
return main.CONNECTION_DONE
if self.writeBlockedOnRead:
self.writeBlockedOnRead = 0
self._resetReadWrite()
try:
return Connection.doRead(self)
except SSL.ZeroReturnError:
return main.CONNECTION_DONE
except SSL.WantReadError:
return
except SSL.WantWriteError:
self.readBlockedOnWrite = 1
Connection.startWriting(self)
Connection.stopReading(self)
return
except SSL.SysCallError, (retval, desc):
if ((retval == -1 and desc == 'Unexpected EOF')
or retval > 0):
return main.CONNECTION_LOST
log.err()
return main.CONNECTION_LOST
except SSL.Error, e:
return e
def doWrite(self):
# Retry disconnecting
if self.disconnected:
# This case is triggered when "disconnected" is set to True by a
# call to _postLoseConnection from FileDescriptor.doWrite (to which
# we upcall at the end of this overridden version of that API). It
# means that while, as far as any protocol connected to this
# transport is concerned, the connection no longer exists, the
# connection *does* actually still exist. Instead of closing the
# connection in the overridden _postLoseConnection, we probably
# tried (and failed) to send a TLS close alert. The TCP connection
# is still up and we're waiting for the socket to become writeable
# enough for the TLS close alert to actually be sendable. Only
# then will the connection actually be torn down. -exarkun
return self._postLoseConnection()
if self._writeDisconnected:
return self._closeWriteConnection()
if self.readBlockedOnWrite:
self.readBlockedOnWrite = 0
self._resetReadWrite()
return Connection.doWrite(self)
def writeSomeData(self, data):
try:
return Connection.writeSomeData(self, data)
except SSL.WantWriteError:
return 0
except SSL.WantReadError:
self.writeBlockedOnRead = 1
Connection.stopWriting(self)
Connection.startReading(self)
return 0
except SSL.ZeroReturnError:
return main.CONNECTION_LOST
except SSL.SysCallError, e:
if e[0] == -1 and data == "":
# errors when writing empty strings are expected
# and can be ignored
return 0
else:
return main.CONNECTION_LOST
except SSL.Error, e:
return e
def _postLoseConnection(self):
"""
Gets called after loseConnection(), after buffered data is sent.
We try to send an SSL shutdown alert, but if it doesn't work, retry
when the socket is writable.
"""
# Here, set "disconnected" to True to trick higher levels into thinking
# the connection is really gone. It's not, and we're not going to
# close it yet. Instead, we'll try to send a TLS close alert to shut
# down the TLS connection cleanly. Only after we actually get the
# close alert into the socket will we disconnect the underlying TCP
# connection.
self.disconnected = True
if hasattr(self.socket, 'set_shutdown'):
# If possible, mark the state of the TLS connection as having
# already received a TLS close alert from the peer. Why do
# this???
self.socket.set_shutdown(SSL.RECEIVED_SHUTDOWN)
return self._sendCloseAlert()
def _sendCloseAlert(self):
# Okay, *THIS* is a bit complicated.
# Basically, the issue is, OpenSSL seems to not actually return
# errors from SSL_shutdown. Therefore, the only way to
# determine if the close notification has been sent is by
# SSL_shutdown returning "done". However, it will not claim it's
# done until it's both sent *and* received a shutdown notification.
# I don't actually want to wait for a received shutdown
# notification, though, so, I have to set RECEIVED_SHUTDOWN
# before calling shutdown. Then, it'll return True once it's
# *SENT* the shutdown.
# However, RECEIVED_SHUTDOWN can't be left set, because then
# reads will fail, breaking half close.
# Also, since shutdown doesn't report errors, an empty write call is
# done first, to try to detect if the connection has gone away.
# (*NOT* an SSL_write call, because that fails once you've called
# shutdown)
try:
os.write(self.socket.fileno(), '')
except OSError, se:
if se.args[0] in (EINTR, EWOULDBLOCK, ENOBUFS):
return 0
# Write error, socket gone
return main.CONNECTION_LOST
try:
if hasattr(self.socket, 'set_shutdown'):
laststate = self.socket.get_shutdown()
self.socket.set_shutdown(laststate | SSL.RECEIVED_SHUTDOWN)
done = self.socket.shutdown()
if not (laststate & SSL.RECEIVED_SHUTDOWN):
self.socket.set_shutdown(SSL.SENT_SHUTDOWN)
else:
#warnings.warn("SSL connection shutdown possibly unreliable, "
# "please upgrade to ver 0.XX", category=UserWarning)
self.socket.shutdown()
done = True
except SSL.Error, e:
return e
if done:
self.stopWriting()
# Note that this is tested for by identity below.
return main.CONNECTION_DONE
else:
# For some reason, the close alert wasn't sent. Start writing
# again so that we'll get another chance to send it.
self.startWriting()
# On Linux, select will sometimes not report a closed file
# descriptor in the write set (in particular, it seems that if a
# send() fails with EPIPE, the socket will not appear in the write
# set). The shutdown call above (which calls down to SSL_shutdown)
# may have swallowed a write error. Therefore, also start reading
# so that if the socket is closed we will notice. This doesn't
# seem to be a problem for poll (because poll reports errors
# separately) or with select on BSD (presumably because, unlike
# Linux, it doesn't implement select in terms of poll and then map
# POLLHUP to select's in fd_set).
self.startReading()
return None
def _closeWriteConnection(self):
result = self._sendCloseAlert()
if result is main.CONNECTION_DONE:
return Connection._closeWriteConnection(self)
return result
def startReading(self):
self._userWantRead = True
if not self.readBlockedOnWrite:
return Connection.startReading(self)
def stopReading(self):
self._userWantRead = False
if not self.writeBlockedOnRead:
return Connection.stopReading(self)
def startWriting(self):
self._userWantWrite = True
if not self.writeBlockedOnRead:
return Connection.startWriting(self)
def stopWriting(self):
self._userWantWrite = False
if not self.readBlockedOnWrite:
return Connection.stopWriting(self)
def _resetReadWrite(self):
# After changing readBlockedOnWrite or writeBlockedOnRead,
# call this to reset the state to what the user requested.
if self._userWantWrite:
self.startWriting()
else:
self.stopWriting()
if self._userWantRead:
self.startReading()
else:
self.stopReading()
class _TLSDelayed(object):
"""
State tracking record for TLS startup parameters. Used to remember how
TLS should be started when starting it is delayed to wait for the output
buffer to be flushed.
@ivar bufferedData: A C{list} which contains all the data which was
written to the transport after an attempt to start TLS was made but
before the buffers outstanding at that time could be flushed and TLS
could really be started. This is appended to by the transport's
write and writeSequence methods until it is possible to actually
start TLS, then it is written to the TLS-enabled transport.
@ivar context: An SSL context factory object to use to start TLS.
@ivar extra: An extra argument to pass to the transport's C{startTLS}
method.
"""
def __init__(self, bufferedData, context, extra):
self.bufferedData = bufferedData
self.context = context
self.extra = extra
def _getTLSClass(klass, _existing={}):
if klass not in _existing:
class TLSConnection(_TLSMixin, klass):
implements(interfaces.ISSLTransport)
_existing[klass] = TLSConnection
return _existing[klass]
class Connection(abstract.FileDescriptor, _SocketCloser):
"""
Superclass of all socket-based FileDescriptors.
This is an abstract superclass of all objects which represent a TCP/IP
connection based socket.
@ivar logstr: prefix used when logging events related to this connection.
@type logstr: C{str}
"""
implements(interfaces.ITCPTransport, interfaces.ISystemHandle)
TLS = 0
def __init__(self, skt, protocol, reactor=None):
abstract.FileDescriptor.__init__(self, reactor=reactor)
self.socket = skt
self.socket.setblocking(0)
self.fileno = skt.fileno
self.protocol = protocol
if SSL:
_tlsWaiting = None
def startTLS(self, ctx, extra):
assert not self.TLS
if self.dataBuffer or self._tempDataBuffer:
# pre-TLS bytes are still being written. Starting TLS now
# will do the wrong thing. Instead, mark that we're trying
# to go into the TLS state.
self._tlsWaiting = _TLSDelayed([], ctx, extra)
return False
self.stopReading()
self.stopWriting()
self._startTLS()
self.socket = SSL.Connection(ctx.getContext(), self.socket)
self.fileno = self.socket.fileno
self.startReading()
return True
def _startTLS(self):
self.TLS = 1
self.__class__ = _getTLSClass(self.__class__)
def write(self, bytes):
if self._tlsWaiting is not None:
self._tlsWaiting.bufferedData.append(bytes)
else:
abstract.FileDescriptor.write(self, bytes)
def writeSequence(self, iovec):
if self._tlsWaiting is not None:
self._tlsWaiting.bufferedData.extend(iovec)
else:
abstract.FileDescriptor.writeSequence(self, iovec)
def doWrite(self):
result = abstract.FileDescriptor.doWrite(self)
if self._tlsWaiting is not None:
if not self.dataBuffer and not self._tempDataBuffer:
waiting = self._tlsWaiting
self._tlsWaiting = None
self.startTLS(waiting.context, waiting.extra)
self.writeSequence(waiting.bufferedData)
return result
def getHandle(self):
"""Return the socket for this connection."""
return self.socket
def doRead(self):
"""Calls self.protocol.dataReceived with all available data.
This reads up to self.bufferSize bytes of data from its socket, then
calls self.dataReceived(data) to process it. If the connection is not
lost through an error in the physical recv(), this function will return
the result of the dataReceived call.
"""
try:
data = self.socket.recv(self.bufferSize)
except socket.error, se:
if se.args[0] == EWOULDBLOCK:
return
else:
return main.CONNECTION_LOST
if not data:
return main.CONNECTION_DONE
return self.protocol.dataReceived(data)
def writeSomeData(self, data):
"""
Write as much as possible of the given data to this TCP connection.
This sends up to C{self.SEND_LIMIT} bytes from C{data}. If the
connection is lost, an exception is returned. Otherwise, the number
of bytes successfully written is returned.
"""
try:
# Limit length of buffer to try to send, because some OSes are too
# stupid to do so themselves (ahem windows)
return self.socket.send(buffer(data, 0, self.SEND_LIMIT))
except socket.error, se:
if se.args[0] == EINTR:
return self.writeSomeData(data)
elif se.args[0] in (EWOULDBLOCK, ENOBUFS):
return 0
else:
return main.CONNECTION_LOST
def _closeWriteConnection(self):
try:
getattr(self.socket, self._socketShutdownMethod)(1)
except socket.error:
pass
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except:
f = failure.Failure()
log.err()
self.connectionLost(f)
def readConnectionLost(self, reason):
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
def connectionLost(self, reason):
"""See abstract.FileDescriptor.connectionLost().
"""
abstract.FileDescriptor.connectionLost(self, reason)
self._closeSocket()
protocol = self.protocol
del self.protocol
del self.socket
del self.fileno
protocol.connectionLost(reason)
logstr = "Uninitialized"
def logPrefix(self):
"""Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
if SSL:
classImplements(Connection, interfaces.ITLSTransport)
class BaseClient(Connection):
"""A base class for client TCP (and similiar) sockets.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
def _finishInit(self, whenDone, skt, error, reactor):
"""Called by base classes to continue to next stage of initialization."""
if whenDone:
Connection.__init__(self, skt, None, reactor)
self.doWrite = self.doConnect
self.doRead = self.doConnect
reactor.callLater(0, whenDone)
else:
reactor.callLater(0, self.failIfNotConnected, error)
def startTLS(self, ctx, client=1):
if Connection.startTLS(self, ctx, client):
if client:
self.socket.set_connect_state()
else:
self.socket.set_accept_state()
def stopConnecting(self):
"""Stop attempt to connect."""
self.failIfNotConnected(error.UserError())
def failIfNotConnected(self, err):
"""
Generic method called when the attemps to connect failed. It basically
cleans everything it can: call connectionFailed, stop read and write,
delete socket related members.
"""
if (self.connected or self.disconnected or
not hasattr(self, "connector")):
return
self.connector.connectionFailed(failure.Failure(err))
if hasattr(self, "reactor"):
# this doesn't happen if we failed in __init__
self.stopReading()
self.stopWriting()
del self.connector
try:
self._closeSocket()
except AttributeError:
pass
else:
del self.socket, self.fileno
def createInternetSocket(self):
"""(internal) Create a non-blocking socket using
self.addressFamily, self.socketType.
"""
s = socket.socket(self.addressFamily, self.socketType)
s.setblocking(0)
fdesc._setCloseOnExec(s.fileno())
return s
def resolveAddress(self):
if abstract.isIPAddress(self.addr[0]):
self._setRealAddress(self.addr[0])
else:
d = self.reactor.resolve(self.addr[0])
d.addCallbacks(self._setRealAddress, self.failIfNotConnected)
def _setRealAddress(self, address):
self.realAddress = (address, self.addr[1])
self.doConnect()
def doConnect(self):
"""I connect the socket.
Then, call the protocol's makeConnection, and start waiting for data.
"""
if not hasattr(self, "connector"):
# this happens when connection failed but doConnect
# was scheduled via a callLater in self._finishInit
return
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err:
self.failIfNotConnected(error.getConnectError((err, strerror(err))))
return
# doConnect gets called twice. The first time we actually need to
# start the connection attempt. The second time we don't really
# want to (SO_ERROR above will have taken care of any errors, and if
# it reported none, the mere fact that doConnect was called again is
# sufficient to indicate that the connection has succeeded), but it
# is not /particularly/ detrimental to do so. This should get
# cleaned up some day, though.
try:
connectResult = self.socket.connect_ex(self.realAddress)
except socket.error, se:
connectResult = se.args[0]
if connectResult:
if connectResult == EISCONN:
pass
# on Windows EINVAL means sometimes that we should keep trying:
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winsock/winsock/connect_2.asp
elif ((connectResult in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or
(connectResult == EINVAL and platformType == "win32")):
self.startReading()
self.startWriting()
return
else:
self.failIfNotConnected(error.getConnectError((connectResult, strerror(connectResult))))
return
# If I have reached this point without raising or returning, that means
# that the socket is connected.
del self.doWrite
del self.doRead
# we first stop and then start, to reset any references to the old doRead
self.stopReading()
self.stopWriting()
self._connectDone()
def _connectDone(self):
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = 1
self.logstr = self.protocol.__class__.__name__ + ",client"
self.startReading()
self.protocol.makeConnection(self)
def connectionLost(self, reason):
if not self.connected:
self.failIfNotConnected(error.ConnectError(string=reason))
else:
Connection.connectionLost(self, reason)
self.connector.connectionLost(reason)
class Client(BaseClient):
"""A TCP client."""
def __init__(self, host, port, bindAddress, connector, reactor=None):
# BaseClient.__init__ is invoked later
self.connector = connector
self.addr = (host, port)
whenDone = self.resolveAddress
err = None
skt = None
try:
skt = self.createInternetSocket()
except socket.error, se:
err = error.ConnectBindError(se[0], se[1])
whenDone = None
if whenDone and bindAddress is not None:
try:
skt.bind(bindAddress)
except socket.error, se:
err = error.ConnectBindError(se[0], se[1])
whenDone = None
self._finishInit(whenDone, skt, err, reactor)
def getHost(self):
"""Returns an IPv4Address.
This indicates the address from which I am connecting.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
def getPeer(self):
"""Returns an IPv4Address.
This indicates the address that I am connected to.
"""
return address.IPv4Address('TCP', *(self.realAddress + ('INET',)))
def __repr__(self):
s = '<%s to %s at %x>' % (self.__class__, self.addr, unsignedID(self))
return s
class Server(Connection):
"""
Serverside socket-stream connection class.
This is a serverside network connection transport; a socket which came from
an accept() on a server.
"""
def __init__(self, sock, protocol, client, server, sessionno, reactor):
"""
Server(sock, protocol, client, server, sessionno)
Initialize it with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol, reactor)
self.server = server
self.client = client
self.sessionno = sessionno
self.hostname = client[0]
self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__,
sessionno,
self.hostname)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
self.sessionno,
self.server._realPortNumber)
self.startReading()
self.connected = 1
def __repr__(self):
"""A string representation of this connection.
"""
return self.repstr
def startTLS(self, ctx, server=1):
if Connection.startTLS(self, ctx, server):
if server:
self.socket.set_accept_state()
else:
self.socket.set_connect_state()
def getHost(self):
"""Returns an IPv4Address.
This indicates the server's address.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
def getPeer(self):
"""Returns an IPv4Address.
This indicates the client's address.
"""
return address.IPv4Address('TCP', *(self.client + ('INET',)))
class Port(base.BasePort, _SocketCloser):
"""
A TCP server port, listening for connections.
When a connection is accepted, this will call a factory's buildProtocol
with the incoming address as an argument, according to the specification
described in L{twisted.internet.interfaces.IProtocolFactory}.
If you wish to change the sort of transport that will be used, the
C{transport} attribute will be called with the signature expected for
C{Server.__init__}, so it can be replaced.
@ivar deferred: a deferred created when L{stopListening} is called, and
that will fire when connection is lost. This is not to be used it
directly: prefer the deferred returned by L{stopListening} instead.
@type deferred: L{defer.Deferred}
@ivar disconnecting: flag indicating that the L{stopListening} method has
been called and that no connections should be accepted anymore.
@type disconnecting: C{bool}
@ivar connected: flag set once the listen has successfully been called on
the socket.
@type connected: C{bool}
"""
implements(interfaces.IListeningPort)
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
transport = Server
sessionno = 0
interface = ''
backlog = 50
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
def __init__(self, port, factory, backlog=50, interface='', reactor=None):
"""Initialize with a numeric port to listen on.
"""
base.BasePort.__init__(self, reactor=reactor)
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
def __repr__(self):
if self._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__, self.factory.__class__,
self._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__, self.factory.__class__)
def createInternetSocket(self):
s = base.BasePort.createInternetSocket(self)
if platformType == "posix" and sys.platform != "cygwin":
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s
def startListening(self):
"""Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
try:
skt = self.createInternetSocket()
skt.bind((self.interface, self.port))
except socket.error, le:
raise CannotListenError, (self.interface, self.port, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (self.factory.__class__, self._realPortNumber))
# The order of the next 6 lines is kind of bizarre. If no one
# can explain it, perhaps we should re-arrange them.
self.factory.doStart()
skt.listen(self.backlog)
self.connected = True
self.socket = skt
self.fileno = self.socket.fileno
self.numberAccepts = 100
self.startReading()
def _buildAddr(self, (host, port)):
return address._ServerFactoryIPv4Address('TCP', host, port)
def doRead(self):
"""Called when my socket is ready for reading.
This accepts a connection and calls self.protocol() to handle the
wire-level protocol.
"""
try:
if platformType == "posix":
numAccepts = self.numberAccepts
else:
# win32 event loop breaks if we do more than one accept()
# in an iteration of the event loop.
numAccepts = 1
for i in range(numAccepts):
# we need this so we can deal with a factory's buildProtocol
# calling our loseConnection
if self.disconnecting:
return
try:
skt, addr = self.socket.accept()
except socket.error, e:
if e.args[0] in (EWOULDBLOCK, EAGAIN):
self.numberAccepts = i
break
elif e.args[0] == EPERM:
# Netfilter on Linux may have rejected the
# connection, but we get told to try to accept()
# anyway.
continue
elif e.args[0] in (EMFILE, ENOBUFS, ENFILE, ENOMEM, ECONNABORTED):
# Linux gives EMFILE when a process is not allowed
# to allocate any more file descriptors. *BSD and
# Win32 give (WSA)ENOBUFS. Linux can also give
# ENFILE if the system is out of inodes, or ENOMEM
# if there is insufficient memory to allocate a new
# dentry. ECONNABORTED is documented as possible on
# both Linux and Windows, but it is not clear
# whether there are actually any circumstances under
# which it can happen (one might expect it to be
# possible if a client sends a FIN or RST after the
# server sends a SYN|ACK but before application code
# calls accept(2), however at least on Linux this
# _seems_ to be short-circuited by syncookies.
log.msg("Could not accept new connection (%s)" % (
errorcode[e.args[0]],))
break
raise
fdesc._setCloseOnExec(skt.fileno())
protocol = self.factory.buildProtocol(self._buildAddr(addr))
if protocol is None:
skt.close()
continue
s = self.sessionno
self.sessionno = s+1
transport = self.transport(skt, protocol, addr, self, s, self.reactor)
transport = self._preMakeConnection(transport)
protocol.makeConnection(transport)
else:
self.numberAccepts = self.numberAccepts+20
except:
# Note that in TLS mode, this will possibly catch SSL.Errors
# raised by self.socket.accept()
#
# There is no "except SSL.Error:" above because SSL may be
# None if there is no SSL support. In any case, all the
# "except SSL.Error:" suite would probably do is log.deferr()
# and return, so handling it here works just as well.
log.deferr()
def _preMakeConnection(self, transport):
return transport
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Stop accepting connections on this port.
This will shut down the socket and call self.connectionLost(). It
returns a deferred which will fire successfully when the port is
actually closed, or with a failure if an error occurs shutting down.
"""
self.disconnecting = True
self.stopReading()
if self.connected:
self.deferred = deferLater(
self.reactor, 0, self.connectionLost, connDone)
return self.deferred
stopListening = loseConnection
def connectionLost(self, reason):
"""
Cleans up the socket.
"""
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
base.BasePort.connectionLost(self, reason)
self.connected = False
self._closeSocket()
del self.socket
del self.fileno
try:
self.factory.doStop()
finally:
self.disconnecting = False
def logPrefix(self):
"""Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""Returns an IPv4Address.
This indicates the server's address.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, timeout, bindAddress, reactor=None):
self.host = host
if isinstance(port, types.StringTypes):
try:
port = socket.getservbyname(port, 'tcp')
except socket.error, e:
raise error.ServiceNameUnknownError(string="%s (%r)" % (e, port))
self.port = port
self.bindAddress = bindAddress
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'INET')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import os
from unittest import mock
import pytest
from airflow.api_fastapi.app import AUTH_MANAGER_FASTAPI_APP_PREFIX
from airflow.api_fastapi.auth.managers.models.resource_details import AccessView
from airflow.api_fastapi.auth.managers.simple.user import SimpleAuthManagerUser
from airflow.api_fastapi.common.types import MenuItem
from tests_common.test_utils.config import conf_vars
class TestSimpleAuthManager:
def test_get_users(self, auth_manager):
with conf_vars(
{
("core", "simple_auth_manager_users"): "test1:viewer,test2:viewer",
}
):
users = auth_manager.get_users()
assert users == [{"role": "viewer", "username": "test1"}, {"role": "viewer", "username": "test2"}]
@pytest.mark.parametrize(
("file_content", "expected"),
[
("{}", {}),
("", {}),
('{"test1": "test1"}', {"test1": "test1"}),
('{"test1": "test1", "test2": "test2"}', {"test1": "test1", "test2": "test2"}),
],
)
def test_get_passwords(self, auth_manager, file_content, expected):
with conf_vars(
{
("core", "simple_auth_manager_users"): "test1:viewer,test2:viewer",
}
):
with open(auth_manager.get_generated_password_file(), "w") as file:
file.write(file_content)
passwords = auth_manager.get_passwords()
assert passwords == expected
def test_init_with_default_user(self, auth_manager):
auth_manager.init()
with open(auth_manager.get_generated_password_file()) as file:
passwords_str = file.read().strip()
user_passwords_from_file = json.loads(passwords_str)
assert len(user_passwords_from_file) == 1
def test_init_with_users(self, auth_manager):
with conf_vars(
{
("core", "simple_auth_manager_users"): "test1:viewer,test2:viewer",
}
):
auth_manager.init()
with open(auth_manager.get_generated_password_file()) as file:
passwords_str = file.read().strip()
user_passwords_from_file = json.loads(passwords_str)
assert len(user_passwords_from_file) == 2
@pytest.mark.parametrize(
("file_content", "expected"),
[
({"test1": "test1"}, {"test1": "test1"}),
({"test2": "test2", "test3": "test3"}, {"test1": mock.ANY, "test2": "test2", "test3": "test3"}),
],
)
def test_init_with_users_with_password(self, auth_manager, file_content, expected):
with conf_vars(
{
("core", "simple_auth_manager_users"): "test1:viewer",
}
):
with open(auth_manager.get_generated_password_file(), "w") as file:
file.write(json.dumps(file_content) + "\n")
auth_manager.init()
with open(auth_manager.get_generated_password_file()) as file:
passwords_str = file.read().strip()
user_passwords_from_file = json.loads(passwords_str)
assert user_passwords_from_file == expected
def test_init_with_all_admins(self, auth_manager):
with conf_vars({("core", "simple_auth_manager_all_admins"): "true"}):
auth_manager.init()
assert not os.path.exists(auth_manager.get_generated_password_file())
def test_get_url_login(self, auth_manager):
result = auth_manager.get_url_login()
assert result == AUTH_MANAGER_FASTAPI_APP_PREFIX + "/login"
def test_get_url_login_with_all_admins(self, auth_manager):
with conf_vars({("core", "simple_auth_manager_all_admins"): "true"}):
result = auth_manager.get_url_login()
assert result == AUTH_MANAGER_FASTAPI_APP_PREFIX + "/token/login"
def test_deserialize_user(self, auth_manager):
result = auth_manager.deserialize_user({"sub": "test", "role": "admin"})
assert result.username == "test"
assert result.role == "admin"
def test_serialize_user(self, auth_manager):
user = SimpleAuthManagerUser(username="test", role="admin")
result = auth_manager.serialize_user(user)
assert result == {"sub": "test", "role": "admin"}
@pytest.mark.parametrize(
"api",
[
"is_authorized_configuration",
"is_authorized_connection",
"is_authorized_dag",
"is_authorized_asset",
"is_authorized_asset_alias",
"is_authorized_pool",
"is_authorized_variable",
],
)
@pytest.mark.parametrize(
("role", "method", "result"),
[
("ADMIN", "GET", True),
("ADMIN", "DELETE", True),
("VIEWER", "POST", False),
("VIEWER", "PUT", False),
("VIEWER", "DELETE", False),
],
)
def test_is_authorized_methods(self, auth_manager, api, role, method, result):
assert (
getattr(auth_manager, api)(method=method, user=SimpleAuthManagerUser(username="test", role=role))
is result
)
@pytest.mark.parametrize(
("api", "kwargs"),
[
("is_authorized_view", {"access_view": AccessView.CLUSTER_ACTIVITY}),
(
"is_authorized_custom_view",
{
"method": "GET",
"resource_name": "test",
},
),
],
)
@pytest.mark.parametrize(
("role", "result"),
[
("ADMIN", True),
("VIEWER", True),
("USER", True),
("OP", True),
],
)
def test_is_authorized_view_methods(self, auth_manager, api, kwargs, role, result):
assert (
getattr(auth_manager, api)(**kwargs, user=SimpleAuthManagerUser(username="test", role=role))
is result
)
@pytest.mark.parametrize(
"api",
[
"is_authorized_configuration",
"is_authorized_connection",
"is_authorized_asset",
"is_authorized_asset_alias",
"is_authorized_pool",
"is_authorized_variable",
],
)
@pytest.mark.parametrize(
("role", "method", "result"),
[
("ADMIN", "GET", True),
("OP", "DELETE", True),
("USER", "DELETE", False),
("VIEWER", "PUT", False),
],
)
def test_is_authorized_methods_op_role_required(self, auth_manager, api, role, method, result):
assert (
getattr(auth_manager, api)(method=method, user=SimpleAuthManagerUser(username="test", role=role))
is result
)
@pytest.mark.parametrize(
"api",
["is_authorized_dag"],
)
@pytest.mark.parametrize(
("role", "method", "result"),
[
("ADMIN", "GET", True),
("OP", "DELETE", True),
("USER", "GET", True),
("USER", "DELETE", True),
("VIEWER", "PUT", False),
],
)
def test_is_authorized_methods_user_role_required(self, auth_manager, api, role, method, result):
assert (
getattr(auth_manager, api)(method=method, user=SimpleAuthManagerUser(username="test", role=role))
is result
)
@pytest.mark.parametrize(
"api",
[
"is_authorized_dag",
"is_authorized_asset",
"is_authorized_asset_alias",
"is_authorized_pool",
],
)
@pytest.mark.parametrize(
("role", "method", "result"),
[
("ADMIN", "GET", True),
("VIEWER", "GET", True),
("OP", "GET", True),
("USER", "GET", True),
("VIEWER", "POST", False),
],
)
def test_is_authorized_methods_viewer_role_required_for_get(
self, auth_manager, api, role, method, result
):
assert (
getattr(auth_manager, api)(method=method, user=SimpleAuthManagerUser(username="test", role=role))
is result
)
def test_is_authorized_team(self, auth_manager):
result = auth_manager.is_authorized_team(
method="GET", user=SimpleAuthManagerUser(username="test", role=None)
)
assert result is True
def test_filter_authorized_menu_items(self, auth_manager):
items = [MenuItem.ASSETS]
results = auth_manager.filter_authorized_menu_items(
items, user=SimpleAuthManagerUser(username="test", role=None)
)
assert results == items
@pytest.mark.parametrize(
("all_admins", "user_id", "assigned_users", "expected"),
[
# When simple_auth_manager_all_admins=True, any user should be allowed
(True, "user1", {"user2"}, True),
(True, "user2", {"user2"}, True),
(True, "admin", {"test_user"}, True),
# When simple_auth_manager_all_admins=False, user must be in assigned_users
(False, "user1", {"user1"}, True),
(False, "user2", {"user1"}, False),
(False, "admin", {"test_user"}, False),
# When no assigned_users, allow access
(False, "user1", set(), True),
],
)
def test_is_authorized_hitl_task(self, auth_manager, all_admins, user_id, assigned_users, expected):
"""Test is_authorized_hitl_task method with different configurations."""
with conf_vars({("core", "simple_auth_manager_all_admins"): str(all_admins)}):
user = SimpleAuthManagerUser(username=user_id, role="user")
result = auth_manager.is_authorized_hitl_task(assigned_users=assigned_users, user=user)
assert result == expected
|
python
|
github
|
https://github.com/apache/airflow
|
airflow-core/tests/unit/api_fastapi/auth/managers/simple/test_simple_auth_manager.py
|
// Copyright 2025 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package base_test
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
)
func TestJoinListType(t *testing.T) {
defer leaktest.AfterTest(t)()
testData := []struct {
join string
exp string
err string
}{
{"", "", "no address specified in --join"},
{":", "--join=:" + base.DefaultPort, ""},
{"a", "--join=a:" + base.DefaultPort, ""},
{"a,b", "--join=a:" + base.DefaultPort + " --join=b:" + base.DefaultPort, ""},
{"a,,b", "--join=a:" + base.DefaultPort + " --join=b:" + base.DefaultPort, ""},
{",a", "--join=a:" + base.DefaultPort, ""},
{"a,", "--join=a:" + base.DefaultPort, ""},
{"a:123,b", "--join=a:123 --join=b:" + base.DefaultPort, ""},
{"[::1]:123,b", "--join=[::1]:123 --join=b:" + base.DefaultPort, ""},
{"[::1,b", "", `address \[::1: missing ']' in address`},
}
for _, test := range testData {
t.Run(test.join, func(t *testing.T) {
var jls base.JoinListType
err := jls.Set(test.join)
if !testutils.IsError(err, test.err) {
t.Fatalf("error: expected %q, got: %+v", test.err, err)
}
if test.err != "" {
return
}
actual := jls.String()
if actual != test.exp {
t.Errorf("expected: %q, got: %q", test.exp, actual)
}
})
}
}
|
go
|
github
|
https://github.com/cockroachdb/cockroach
|
pkg/base/join_list_test.go
|
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
import networkx as nx
from networkx.algorithms.connectivity import (minimum_st_edge_cut,
minimum_st_node_cut)
from networkx.algorithms.flow import (edmonds_karp, preflow_push,
shortest_augmenting_path)
from networkx.utils import arbitrary_element
flow_funcs = [edmonds_karp, preflow_push, shortest_augmenting_path]
msg = "Assertion failed in function: {0}"
# Tests for node and edge cutsets
def _generate_no_biconnected(max_attempts=50):
attempts = 0
while True:
G = nx.fast_gnp_random_graph(100,0.0575)
if nx.is_connected(G) and not nx.is_biconnected(G):
attempts = 0
yield G
else:
if attempts >= max_attempts:
msg = "Tried %d times: no suitable Graph."%attempts
raise Exception(msg % max_attempts)
else:
attempts += 1
def test_articulation_points():
Ggen = _generate_no_biconnected()
for flow_func in flow_funcs:
for i in range(3):
G = next(Ggen)
cut = nx.minimum_node_cut(G, flow_func=flow_func)
assert_true(len(cut) == 1, msg=msg.format(flow_func.__name__))
assert_true(cut.pop() in set(nx.articulation_points(G)),
msg=msg.format(flow_func.__name__))
def test_brandes_erlebach_book():
# Figure 1 chapter 7: Connectivity
# http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
G = nx.Graph()
G.add_edges_from([(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 6), (3, 4),
(3, 6), (4, 6), (4, 7), (5, 7), (6, 8), (6, 9), (7, 8),
(7, 10), (8, 11), (9, 10), (9, 11), (10, 11)])
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cutsets
assert_equal(3, len(nx.minimum_edge_cut(G, 1, 11, **kwargs)),
msg=msg.format(flow_func.__name__))
edge_cut = nx.minimum_edge_cut(G, **kwargs)
# Node 5 has only two edges
assert_equal(2, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
assert_equal(set([6, 7]), minimum_st_node_cut(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(set([6, 7]), nx.minimum_node_cut(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(2, len(node_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_white_harary_paper():
# Figure 1b white and harary (2001)
# http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
# A graph with high adhesion (edge connectivity) and low cohesion
# (node connectivity)
G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
G.remove_node(7)
for i in range(4,7):
G.add_edge(0,i)
G = nx.disjoint_union(G, nx.complete_graph(4))
G.remove_node(G.order()-1)
for i in range(7,10):
G.add_edge(0,i)
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cuts
edge_cut = nx.minimum_edge_cut(G, **kwargs)
assert_equal(3, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(set([0]), node_cut, msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_petersen_cutset():
G = nx.petersen_graph()
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cuts
edge_cut = nx.minimum_edge_cut(G, **kwargs)
assert_equal(3, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(3, len(node_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_octahedral_cutset():
G=nx.octahedral_graph()
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cuts
edge_cut = nx.minimum_edge_cut(G, **kwargs)
assert_equal(4, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(4, len(node_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_icosahedral_cutset():
G=nx.icosahedral_graph()
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cuts
edge_cut = nx.minimum_edge_cut(G, **kwargs)
assert_equal(5, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(5, len(node_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_node_cutset_exception():
G=nx.Graph()
G.add_edges_from([(1, 2), (3, 4)])
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.minimum_node_cut, G, flow_func=flow_func)
def test_node_cutset_random_graphs():
for flow_func in flow_funcs:
for i in range(3):
G = nx.fast_gnp_random_graph(50, 0.25)
if not nx.is_connected(G):
ccs = iter(nx.connected_components(G))
start = arbitrary_element(next(ccs))
G.add_edges_from((start, arbitrary_element(c)) for c in ccs)
cutset = nx.minimum_node_cut(G, flow_func=flow_func)
assert_equal(nx.node_connectivity(G), len(cutset),
msg=msg.format(flow_func.__name__))
G.remove_nodes_from(cutset)
assert_false(nx.is_connected(G), msg=msg.format(flow_func.__name__))
def test_edge_cutset_random_graphs():
for flow_func in flow_funcs:
for i in range(3):
G = nx.fast_gnp_random_graph(50, 0.25)
if not nx.is_connected(G):
ccs = iter(nx.connected_components(G))
start = arbitrary_element(next(ccs))
G.add_edges_from((start, arbitrary_element(c)) for c in ccs)
cutset = nx.minimum_edge_cut(G, flow_func=flow_func)
assert_equal(nx.edge_connectivity(G), len(cutset),
msg=msg.format(flow_func.__name__))
G.remove_edges_from(cutset)
assert_false(nx.is_connected(G), msg=msg.format(flow_func.__name__))
def test_empty_graphs():
G = nx.Graph()
D = nx.DiGraph()
for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXPointlessConcept, interface_func, G,
flow_func=flow_func)
assert_raises(nx.NetworkXPointlessConcept, interface_func, D,
flow_func=flow_func)
def test_unbounded():
G = nx.complete_graph(5)
for flow_func in flow_funcs:
assert_equal(4, len(minimum_st_edge_cut(G, 1, 4, flow_func=flow_func)))
def test_missing_source():
G = nx.path_graph(4)
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, interface_func, G, 10, 1,
flow_func=flow_func)
def test_missing_target():
G = nx.path_graph(4)
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, interface_func, G, 1, 10,
flow_func=flow_func)
def test_not_weakly_connected():
G = nx.DiGraph()
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5])
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, interface_func, G,
flow_func=flow_func)
def test_not_connected():
G = nx.Graph()
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5])
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, interface_func, G,
flow_func=flow_func)
def tests_min_cut_complete():
G = nx.complete_graph(5)
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_equal(4, len(interface_func(G, flow_func=flow_func)))
def tests_min_cut_complete_directed():
G = nx.complete_graph(5)
G = G.to_directed()
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_equal(4, len(interface_func(G, flow_func=flow_func)))
def tests_minimum_st_node_cut():
G = nx.Graph()
G.add_nodes_from([0, 1, 2, 3, 7, 8, 11, 12])
G.add_edges_from([(7, 11), (1, 11), (1, 12), (12, 8), (0, 1)])
nodelist = minimum_st_node_cut(G, 7, 11)
assert(nodelist == [])
def test_invalid_auxiliary():
G = nx.complete_graph(5)
assert_raises(nx.NetworkXError, minimum_st_node_cut, G, 0, 3,
auxiliary=G)
def test_interface_only_source():
G = nx.complete_graph(5)
for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]:
assert_raises(nx.NetworkXError, interface_func, G, s=0)
def test_interface_only_target():
G = nx.complete_graph(5)
for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]:
assert_raises(nx.NetworkXError, interface_func, G, t=3)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import maskgen
from maskgen_coco import createMaskImageWithParams
import sys
from maskgen.image_wrap import ImageWrapper
"""
Selects a Mask from Coco presegmented images
"""
def transform(img, source, target, **kwargs):
areaConstraints = (int(kwargs['area.lower.bound']) if 'area.lower.bound' in kwargs else 0,
int(kwargs['area.upper.bound']) if 'area.upper.bound' in kwargs else sys.maxint)
annotation,mask =createMaskImageWithParams(np.asarray(img), source, kwargs, areaConstraint=areaConstraints)
ImageWrapper(mask).save(target)
return {'subject':annotation},None
def operation():
return {'name': 'SelectRegion',
'category': 'Select',
'software': 'maskgen',
'version': maskgen.__version__[0:6],
'arguments': {
'coco': {
"type": "str",
"description": "Coco Object."
},
'coco.index': {
"type": "str",
"description": "Coco file->id Dictionary"
},
'area.lower.bound': {
"type": "int[0:100000000000]",
"description": "lower bound on area of segment in pixels"
},
'area.upper.bound': {
"type": "int[0:100000000000]",
"description": "upper bound on area of segment in pixels"
}
},
'description': 'Create a limited selection in a donor image. The provided inputmask is placed as the alpha channel of the result image',
'transitions': [
'image.image'
]
}
def suffix():
return '.png'
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Admin module"""
import os
import math
from datetime import datetime
import copy
import discord
from discord.ext import commands
from modules.utils import checks
from modules.utils import utils
def is_owner_or_moderator(ctx):
"""Returns true if the author is the bot's owner or a moderator on the server"""
return ctx.message.author.id == ctx.bot.owner_id \
or ctx.message.author.id == ctx.message.server.owner.id \
or (ctx.message.server.id in ctx.bot.moderators \
and ctx.message.author.id in ctx.bot.moderators[ctx.message.server.id])
COLORS = { \
"Unban": 255 * math.pow(16, 2), \
"Warning": 250 * math.pow(16, 4) + 219 * math.pow(16, 2) + 24, \
"Kick": 243 * math.pow(16, 4) + 111 * math.pow(16, 2) + 40, \
"Ban": 255 * math.pow(16, 4), \
"b1nzy ban": 1 \
}
class Log:
"""A class representing a log element"""
id_counter = 1
def __init__(self, log_type: str, member_id: str, \
responsible_id: str, reason: str, date: str):
"""Init function"""
#pylint: disable=too-many-arguments
self.type = log_type
self.user_id = member_id
self.responsible_id = responsible_id
self.reason = reason
self.date = date
self.log_id = Log.id_counter
Log.id_counter += 1
def get_save(self):
"""Returns a dict representing a log element"""
data = {}
data["type"] = self.type
data["responsible"] = self.responsible_id
data["reason"] = self.reason
data["date"] = self.date
data["id"] = self.log_id
return data
def get_embed(self, bot):
"""Returns an embed corresponding to the log"""
embed = discord.Embed()
user = discord.utils.find(lambda u: u.id == self.user_id, \
bot.get_all_members())
if user:
embed.title = user.name + "#" + user.discriminator + " (" + user.id + ")"
embed.set_thumbnail(url=user.avatar_url)
else:
embed.title = "Unknown member (" + self.user_id + ")"
responsible = discord.utils.find(lambda u: u.id == self.responsible_id, \
bot.get_all_members())
if responsible:
embed.add_field(name="Responsible", \
value=responsible.name + "#" + responsible.discriminator + \
" (" + responsible.id + ")", inline=False)
else:
embed.add_field(name="Responsible", \
value="Uknown responsible (" + self.responsible_id + ")", inline=False)
embed.timestamp = datetime.strptime(self.date, "%d/%m/%Y %H:%M:%S")
embed.colour = discord.Colour(value=COLORS[self.type])
embed.set_author(name="Case #" + str(self.log_id))
embed.add_field(name="Reason", value=self.reason, inline=False)
return embed
class Admin:
"""Admin module"""
#pylint: disable=too-many-public-methods
def load_moderators(self):
"""Loads the moderators"""
if not os.path.exists(self.bot.moderators_file_path):
if not os.path.isdir("data/admin"):
os.makedirs("data/admin")
utils.save_json(self.bot.moderators, self.bot.moderators_file_path)
else:
self.bot.moderators = utils.load_json(self.bot.moderators_file_path)
def save_moderators(self):
"""Saves the moderators"""
utils.save_json(self.bot.moderators, self.bot.moderators_file_path)
def load_servers_config(self):
"""Loads the configuration"""
#pylint: disable=too-many-nested-blocks
if not os.path.exists(self.servers_config_file_path):
if not os.path.isdir("data/admin"):
os.makedirs("data/admin")
self.servers_config["id counter"] = 1
self.servers_config["servers"] = {}
Log.id_counter = 1
utils.save_json(self.servers_config, self.servers_config_file_path)
else:
data = utils.load_json(self.servers_config_file_path)
for server in data["servers"]:
if "log channel" in data["servers"][server]:
data["servers"][server]["log channel"] = discord.utils.find(lambda c, s=server:\
c.id == data["servers"][s]["log channel"], self.bot.get_all_channels())
if "logs" in data["servers"][server]:
logs = {}
known_admins = {}
for user_id in data["servers"][server]["logs"]:
member = discord.utils.find(lambda m, u=user_id: m.id == u, \
self.bot.get_all_members())
logs[user_id] = []
for log in data["servers"][server]["logs"][user_id]:
if log["responsible"] not in known_admins:
responsible = discord.utils.find(lambda r, l=log: \
r.id == l["responsible"], self.bot.get_all_members())
known_admins[log["responsible"]] = responsible
else:
responsible = known_admins[log["responsible"]]
logs[user_id].append(Log(log_type=log["type"],
member_id=member.id if member else "UNKNOWN",
responsible_id=responsible.id,
reason=log["reason"],
date=log["date"]))
data["servers"][server]["logs"] = logs
Log.id_counter = data["id counter"]
self.servers_config = data
def save_servers_config(self):
"""Saves the configuration"""
self.servers_config["id counter"] = Log.id_counter
data = copy.deepcopy(self.servers_config)
for server in data["servers"]:
if "log channel" in data["servers"][server]:
data["servers"][server]["log channel"] = data["servers"][server]["log channel"].id
if "logs" in data["servers"][server]:
logs = {}
for user_id in data["servers"][server]["logs"]:
logs[user_id] = []
for log in data["servers"][server]["logs"][user_id]:
logs[user_id].append(log.get_save())
data["servers"][server]["logs"] = logs
utils.save_json(data, self.servers_config_file_path)
def load_b1nzy_banlist(self):
"""Loads the b1nzy banlist"""
if not os.path.exists(self.b1nzy_banlist_path):
if not os.path.isdir("data/admin"):
os.makedirs("data/admin")
utils.save_json(self.b1nzy_banlist, self.b1nzy_banlist_path)
else:
self.b1nzy_banlist = utils.load_json(self.b1nzy_banlist_path)
def save_b1nzy_banlist(self):
"""Saves the b1nzy banlist"""
utils.save_json(self.b1nzy_banlist, self.b1nzy_banlist_path)
def __init__(self, bot):
"""Init function"""
self.bot = bot
self.servers_config = {}
self.servers_config_file_path = "data/admin/servers_config.json"
self.load_servers_config()
self.bot.moderators = {}
self.bot.moderators_file_path = "data/admin/moderators.json"
self.load_moderators()
self.b1nzy_banlist = {}
self.b1nzy_banlist_path = "data/admin/b1nzy_banlist.json"
self.load_b1nzy_banlist()
async def send_log(self, server: discord.Server, log: Log):
"""Sends a embed corresponding to the log in the log channel of the server"""
if server.id in self.servers_config["servers"]:
if "log channel" in self.servers_config["servers"][server.id]:
embed = log.get_embed(self.bot)
channel = self.servers_config["servers"][server.id]["log channel"]
try:
await self.bot.send_message(destination=channel, embed=embed)
except discord.Forbidden:
await self.bot.send_message(destination=server.owner, content=\
"I'm not allowed to send embeds in the log channel (#" + \
channel.name + "). Please change my permissions.")
except discord.NotFound:
await self.bot.send_message(destination=server.owner, content=\
"I'm not allowed to send embeds in the log channel because " + \
"it doesn't exists anymore. Please set another log channel " + \
"using the `[p]set_log_channel` command.")
except discord.HTTPException:
pass
except discord.InvalidArgument:
pass
@commands.command()
@checks.is_owner()
async def add_blacklist(self, user: discord.Member):
"""Adds an user to the bot's blacklist
Parameters:
user: The user you want to add to the bot's blacklist.
Example: [p]add_blacklist @AVeryMeanUser"""
if user.id not in self.bot.blacklist:
self.bot.blacklist.append(user.id)
utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)
await self.bot.say("Done.")
else:
await self.bot.say(user.name + "#" + user.discriminator + " (" + \
user.id + ") is already blacklisted.")
@commands.command()
@checks.is_owner()
async def add_blacklist_id(self, user_id: str):
"""Adds an user to the bot's blacklist using his ID
Parameters:
user_id: The ID of the user you want to add to the bot's blacklist.
Example: [p]add_blacklist_id 346654353341546499"""
if user_id not in self.bot.blacklist:
self.bot.blacklist.append(user_id)
utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)
await self.bot.say("Done.")
else:
await self.bot.say("This ID is already in the blacklist.")
@commands.command()
@checks.is_owner()
async def rem_blacklist(self, user: discord.Member):
"""Removes an user from the bot's blacklist
Parameters:
user: The user you want to remove from the bot's blacklist.
Example: [p]rem_blacklist @AGoodGuyUnfairlyBlacklisted"""
if user.id in self.bot.blacklist:
self.bot.blacklist.remove(user.id)
utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)
await self.bot.say("Done.")
else:
await self.bot.say("This user wasn't even blacklisted.")
@commands.command()
@checks.is_owner()
async def rem_blacklist_id(self, user_id: str):
"""Removes an user from the bot's blacklist using his ID
Parameters:
user_id: The ID of the user you want to to remove from the bot's blacklist.
Example: [p]rem_blacklist @AGoodGuyUnfairlyBlacklisted"""
if user_id in self.bot.blacklist:
self.bot.blacklist.remove(user_id)
utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)
await self.bot.say("Done.")
else:
await self.bot.say("This ID wasn't even in the blacklist.")
@commands.command(pass_context=True)
@checks.is_owner_or_server_owner()
async def add_moderator(self, ctx, member: discord.Member):
"""Adds a moderator for the server
Parameters:
member: The member that will become a moderator.
Example: [p]add_moderator @Beafantles"""
if ctx.message.server.id not in self.bot.moderators:
self.bot.moderators[ctx.message.server.id] = [member.id]
else:
if member.id not in self.bot.moderators[ctx.message.server.id]:
self.bot.moderators[ctx.message.server.id].append(member.id)
else:
await self.bot.say(member.name + "#" + member.discriminator + \
" is already a moderator on this server.")
return
self.save_moderators()
await self.bot.say("Done.")
@commands.command(pass_context=True)
@checks.is_owner_or_server_owner()
async def add_moderator_id(self, ctx, member_id: str):
"""Adds a moderator for the server using his ID
Parameters:
member_id: The ID of the member that will become a moderator.
Example: [p]add_moderator_id 151661401411289088"""
member = discord.utils.find(lambda m: m.id == member_id, \
ctx.message.server.members)
if member:
if ctx.message.server.id not in self.bot.moderators:
self.bot.moderators[ctx.message.server.id] = [member.id]
else:
if member.id not in self.bot.moderators[ctx.message.server.id]:
self.bot.moderators[ctx.message.server.id].append(member.id)
else:
await self.bot.say(member.name + "#" + member.discriminator + \
" is already a moderator on this server.")
return
self.save_moderators()
await self.bot.say("Done.")
else:
await self.bot.say("There's no member with such ID on this server.")
@commands.command(pass_context=True)
@checks.is_owner_or_server_owner()
async def rem_moderator(self, ctx, moderator: discord.Member):
"""Removes a moderator
Parameters:
moderator: The moderator you want to revoke.
Example: [p]rem_moderator @Kazutsuki"""
if ctx.message.server.id in self.bot.moderators:
if moderator.id in self.bot.moderators[ctx.message.server.id]:
self.bot.moderators[ctx.message.server.id].remove(moderator.id)
if not self.bot.moderators[ctx.message.server.id]:
del self.bot.moderators[ctx.message.server.id]
self.save_moderators()
await self.bot.say("Done.")
else:
await self.bot.say(moderator.name + "#" + moderator.discriminator + \
" wasn't even a moderator on this server.")
else:
await self.bot.say("There's no moderators on this server.")
@commands.command(pass_context=True)
@checks.is_owner_or_server_owner()
async def rem_moderator_id(self, ctx, moderator_id: str):
"""Removes a moderator
Parameters:
moderator_id: The ID of the moderator you want to revoke.
Example: [p]rem_moderator_id 118473388262948869"""
moderator = discord.utils.find(lambda m: m.id == moderator_id, \
ctx.message.server.members)
if moderator:
if ctx.message.server.id in self.bot.moderators:
if moderator.id in self.bot.moderators[ctx.message.server.id]:
self.bot.moderators[ctx.message.server.id].remove(moderator.id)
if not self.bot.moderators[ctx.message.server.id]:
del self.bot.moderators[ctx.message.server.id]
self.save_moderators()
await self.bot.say("Done.")
else:
await self.bot.say(moderator.name + "#" + moderator.discriminator + \
" wasn't even a moderator on this server.")
else:
await self.bot.say("There's no moderators on this server.")
else:
await self.bot.say("There's no member with such ID on this server.")
@commands.command(pass_context=True)
@checks.is_owner_or_server_owner()
async def list_moderators(self, ctx):
"""Lists all the moderators on this server"""
if ctx.message.server.id in self.bot.moderators:
msg = "```Markdown\nModerators on this server\n========================\n\n"
has_unknown = False
i = 1
for mod_id in self.bot.moderators[ctx.message.server.id]:
msg += str(i) + ". "
moderator = discord.utils.find(lambda m, mod=mod_id: m.id == mod, \
ctx.message.server.members)
if moderator:
msg += moderator.name + "#" + moderator.discriminator + " (" + \
moderator.id + ")"
else:
msg += "Unknown moderator"
has_unknown = True
msg += "\n"
i += 1
msg += "```"
if has_unknown:
msg += "`Unknown` means that this moderator isn't on this server anymore."
await self.bot.say(msg)
else:
await self.bot.say("There's no moderators on this server.")
@commands.command()
@checks.is_owner()
async def list_blacklist(self):
"""Lists all the blacklisted users"""
if self.bot.blacklist:
msg = "```Markdown\nList of blacklisted users:\n=================\n\n"
i = 1
has_unknown = False
for user_id in self.bot.blacklist:
user = discord.utils.find(lambda u, u_id=user_id: u.id == u_id, \
self.bot.get_all_members())
msg += str(i) + ". "
if user:
msg += user.name + "#" + user.discriminator + " (" + user.id + ")\n"
else:
has_unknown = True
msg += "UNKNOWN USER\n"
i += 1
msg += "```"
if has_unknown:
msg += "\n`UNKNOWN USER` means that this user hasn't any server in " + \
"common with the bot."
await self.bot.say(msg)
else:
await self.bot.say("There is no blacklisted users.")
@commands.command(pass_context=True)
@checks.is_owner_or_server_owner()
async def set_log_channel(self, ctx, channel: discord.Channel=None):
"""Set's log channel for warn / mute / kick / ban commands
Parameters:
channel: The channel you want to set for the logs.
Leaving this blank will remove the channel for logs.abs
Example: [p]set_log_channel #logs-channel
[p]set_log_channel"""
bot = discord.utils.find(lambda m: m.id == self.bot.user.id, \
ctx.message.server.members)
if bot:
if channel:
permissions = channel.permissions_for(bot)
if permissions.send_messages:
if ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id]["log channel"] = channel #pylint: disable=line-too-long
else:
self.servers_config["servers"][ctx.message.server.id] = {"log channel": channel} #pylint: disable=line-too-long
self.save_servers_config()
await self.bot.say("Done. :ok_hand:")
else:
await self.bot.say("I'm not allowed to send messages there.\n" + \
"(Missing permissions)")
else:
if ctx.message.server.id in self.servers_config["servers"]:
del self.servers_config["servers"][ctx.message.server.id]["log channel"]
self.save_servers_config()
await self.bot.say("Done! :ok_hand:")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def show_log_channel(self, ctx):
"""Shows the log channel of the server"""
if ctx.message.server.id in self.servers_config["servers"] \
and "log channel" in self.servers_config["servers"][ctx.message.server.id]:
channel = self.servers_config["servers"][ctx.message.server.id]["log channel"]
await self.bot.say("Log channel: <#" + channel.id + ">")
else:
await self.bot.say("There's no log channel set on this server.")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def warn(self, ctx, member: discord.Member, *reason):
"""Warns a member
Parameters:
member: The member you want to warn.
*reason: The reason of the warning.
Example: [p]warn @BagGuy Rude words.
[p]warn @AnotherBadGuy"""
reason = " ".join(reason)
log = Log(log_type="Warning", member_id=member.id, \
responsible_id=ctx.message.author.id, reason=reason, \
date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if ctx.message.server.id not in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if "logs" not in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id].append(log)
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id] = [log]
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def warn_id(self, ctx, member_id: str, *reason):
"""Warns a member
Parameters:
member_id: The ID of the member you want to warn.
*reason: The reason of the warning.
Example: [p]warn 346654353341546499 Rude words.
[p]warn 346654353341546499"""
member = discord.utils.find(lambda m: m.id == member_id, \
ctx.message.server.members)
if member:
reason = " ".join(reason)
log = Log(log_type="Warning", member_id=member.id, \
responsible_id=ctx.message.author.id, reason=reason, \
date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if not ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if not "logs" in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id].append(log)
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id] = [log]
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
else:
await self.bot.say("There's no member with such ID on this server.")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def kick(self, ctx, member: discord.Member, *reason):
"""Kicks a member
Parameters:
member: The member you want to kick.
*reason: The reason of the kick.
Example: [p]kick @ABadGuy He was too rude.
[p]kick @AnotherBadGuy"""
try:
await self.bot.kick(member)
reason = " ".join(reason)
log = Log(log_type="Kick", member_id=member.id, responsible_id=ctx.message.author.id, \
reason=reason, date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if not ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if not "logs" in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id].append(log)
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id] = [log]
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
except discord.Forbidden:
await self.bot.say("I'm not allowed to do that.\n" + \
"(Missing permissions)")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def kick_id(self, ctx, member_id: str, *reason):
"""Kicks a member using his ID
Parameters:
member_id: The ID of member you want to kick.
*reason: The reason of the kick.
Example: [p]kick_id 346654353341546499 Bad guy.
[p]kick_id 346654353341546499"""
try:
member = discord.utils.find(lambda m: m.id == member_id, \
ctx.message.server.members)
if member:
await self.bot.kick(member)
reason = " ".join(reason)
log = Log(log_type="Kick", member_id=member.id, \
responsible_id=ctx.message.author.id, \
reason=reason, date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if not ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if not "logs" in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id].append(log) #pylint: disable=line-too-long
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id] = [log]
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
else:
await self.bot.say("There's no member with such ID " + \
"in this server.")
except discord.Forbidden:
await self.bot.say("I'm not allowed to do that.\n" + \
"(Missing permissions)")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def ban(self, ctx, member: discord.Member, days="0", *reason):
"""Bans a member
Parameters:
member: The member you want to ban from the server.
days: The number of days worth of messages to delete from the member in the server.
Default value: 0 (which means that no messages from the member will be deleted).
Note: The minimum is 0 and the maximum is 7.
*reason: The reason of the ban.
Example: [p]ban @AMeanMember 3 He was very mean!
[p]ban @AnotherMeanMember 4
[p]ban @AnotherMeanMember Spaming cute cat pics
[p]ban @AnotherMeanMember"""
try:
days = int(days)
to_add = ""
except ValueError:
to_add = days
days = 0
if days >= 0 and days <= 7:
try:
await self.bot.ban(member, days)
reason = to_add + " ".join(reason)
log = Log(log_type="Ban", member_id=member.id, \
responsible_id=ctx.message.author.id, \
reason=reason, date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if not ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if not "logs" in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id].append(log) #pylint: disable=line-too-long
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id] = [log]
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
except discord.Forbidden:
await self.bot.say("I'm not allowed to do that.\n" + \
"(Missing permissions)")
else:
await self.bot.say("Incorrect days value.\n" + \
"The minimum is 0 and the maximum is 7.")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def ban_id(self, ctx, member_id: str, days="0", *reason):
"""Bans a member using his ID
Parameters:
member_id: The ID of the member you want to ban from the server.
days: The number of days worth of messages to delete from the member in the server.
Default value: 0 (which means that no messages from the member will be deleted).
Note: The minimum is 0 and the maximum is 7.
*reason: The reason of the ban.
Example: [p]ban_id 346654353341546499 3 Bad guy.
[p]ban_id 346654353341546499 He shouldn't be here.
[p]ban_id 346654353341546499 4.
[p]ban_id 346654353341546499"""
try:
days = int(days)
to_add = ""
except ValueError:
to_add = days
days = 0
if days >= 0 and days <= 7:
member = discord.utils.find(lambda m: m.id == member_id, \
ctx.message.server.members)
if member:
try:
await self.bot.ban(member, days)
reason = to_add + " ".join(reason)
log = Log(log_type="Ban", member_id=member.id, \
responsible_id=ctx.message.author.id, \
reason=reason, date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if not ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if not "logs" in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id].append(log) #pylint: disable=line-too-long
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id] = [log] #pylint: disable=line-too-long
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
except discord.Forbidden:
await self.bot.say("I'm not allowed to do that.\n" + \
"(Missing permissions)")
else:
await self.bot.say("There's no member with such ID on this server.\n" + \
"You may be interested in the `[p]b1nzy_ban` command.")
else:
await self.bot.say("Incorrect days value.\n" + \
"The minimum is 0 and the maximum is 7.")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def b1nzy_ban(self, ctx, member_id: str, *reason):
"""Bans a member even if he's not on the server, using his ID
Parameters:
member_id: The ID of the member you want to ban.
*reason: The reason of the ban.
Example: [p]b1nzy_ban 346654353341546499 Bad guy.
[p]b1nzy_ban 346654353341546499
Note: How does it works?
If the member left the server to avoid the ban hammer, you
can still use this command on him. He actually wouldn't be
banned but as soon as he would come to the server again, he
would be instantanely banned"""
member = discord.utils.find(lambda m: m.id == member_id, \
ctx.message.server.members)
reason = " ".join(reason)
if member:
try:
await self.bot.ban(member)
except discord.Forbidden:
await self.bot.say("I'm not allowed to do that.\n" + \
"(Missing permissions)")
else:
if not ctx.message.server.id in self.b1nzy_banlist:
self.b1nzy_banlist[ctx.message.server.id] = []
if not member_id in self.b1nzy_banlist[ctx.message.server.id]:
self.b1nzy_banlist[ctx.message.server.id].append(member_id)
self.save_b1nzy_banlist()
else:
await self.bot.say("This user was already in the b1nzy banlist.")
return
log = Log(log_type="b1nzy ban", member_id=member_id, responsible_id=ctx.message.author.id, \
reason=reason, date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if not ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if not "logs" in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member_id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member_id].append(log) #pylint: disable=line-too-long
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member_id] = [log] #pylint: disable=line-too-long
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def unban(self, ctx, member: str, *reason):
"""Unbans a member
Parameters:
member: The member you want to unban from this server.
The format of this argument is Username#discriminator (see example).
*reason: The reason of the unban.
Example: [p]unban I'm not mean after all#1234
[p]unban AnInnocent#1234 He wasn't that mean."""
member_info = member.split("#")
if len(member_info) == 2:
try:
banned_members = await self.bot.get_bans(ctx.message.server)
for banned in banned_members:
if banned.name == member_info[0] \
and banned.discriminator == member_info[1]:
member = banned
break
if member:
await self.bot.unban(ctx.message.server, member)
reason = " ".join(reason)
log = Log(log_type="Unban", member_id=member.id, \
responsible_id=ctx.message.author.id, \
reason=reason, date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if not ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if not "logs" in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id].append(log) #pylint: disable=line-too-long
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id] = [log] #pylint: disable=line-too-long
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
else:
await self.bot.say("This user wasn't even banned here.")
except discord.Forbidden:
await self.bot.say("I'm not allowed to do that.\n" + \
"(Missing permissions")
else:
await self.bot.say("Incorrect format. Please check `[p]help unban`.")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def unban_id(self, ctx, member_id: str, *reason):
"""Unbans a member using his ID
Parameters:
member_id: The ID of the member you want to unban from this server.
*reason: The reason of the unban.
Example: [p]unban_id 151661401411289088 I shouldn't have banned him, he's too cool.
[p]unban_id 151661401411289088"""
try:
banned = await self.bot.get_bans(ctx.message.server)
member = discord.utils.find(lambda u: u.id == member_id, banned)
if member:
await self.bot.unban(ctx.message.server, member)
reason = " ".join(reason)
log = Log(log_type="Unban", member_id=member.id, \
responsible_id=ctx.message.author.id, \
reason=reason, date=datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
if not ctx.message.server.id in self.servers_config["servers"]:
self.servers_config["servers"][ctx.message.server.id] = {}
if not "logs" in self.servers_config["servers"][ctx.message.server.id]:
self.servers_config["servers"][ctx.message.server.id]["logs"] = {}
if member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id].append(log) #pylint: disable=line-too-long
else:
self.servers_config["servers"][ctx.message.server.id]["logs"][member.id] = [log]
self.save_servers_config()
await self.send_log(server=ctx.message.server, log=log)
await self.bot.say("Done.")
else:
await self.bot.say("This user wasn't even banned here.")
except discord.Forbidden:
await self.bot.say("I'm not allowed to do that.\n" + \
"(Missing permissions")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def list_logs(self, ctx, member: discord.Member):
"""Lists all the logs for a member of the server
Parameters:
member: The member you want to get the logs from.
Example: [p]list_logs @Beafantles"""
if ctx.message.server.id in self.servers_config["servers"] \
and "logs" in self.servers_config["servers"][ctx.message.server.id] \
and member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
msg = "```Markdown\nLogs for " + member.name + "#" + member.discriminator + \
"\n========================\n\n"
i = 1
for log in self.servers_config["servers"][ctx.message.server.id]["logs"][member.id]:
msg += str(i) + ". " + log.type + "\n"
msg += "\tCase#" + str(log.log_id) + "\n"
msg += "\tResponsible: " + log.responsible.name + "#" + log.responsible.discriminator + " (" + log.responsible.id + ")\n" #pylint: disable=line-too-long
msg += "\tReason: " + log.reason + "\n"
msg += "\tDate: " + log.date + "\n\n"
i += 1
msg += "```"
await self.bot.say(msg)
else:
await self.bot.say("No logs found for " + member.name + "#" + \
member.discriminator + " in this server.")
@commands.command(pass_context=True)
@checks.custom(is_owner_or_moderator)
async def list_logs_id(self, ctx, member_id: str):
"""Lists all the logs for a member of the server using his ID
Parameters:
member_id: The ID of the member you want to get the logs from.
Example: [p]list_logs_id 151661401411289088"""
member = discord.utils.find(lambda m: m.id == member_id, \
self.bot.get_all_members())
if member:
if ctx.message.server.id in self.servers_config \
and "logs" in self.servers_config["servers"][ctx.message.server.id] \
and member.id in self.servers_config["servers"][ctx.message.server.id]["logs"]:
msg = "```Markdown\nLogs for " + member.name + "#" + member.discriminator + \
"\n========================\n\n"
i = 1
for log in self.servers_config["servers"][ctx.message.server.id]["logs"][member.id]:
msg += str(i) + ". " + log.type + "\n"
msg += "\tCase#" + str(log.log_id) + "\n"
msg += "\tResponsible: " + log.responsible.name + "#" + log.responsible.discriminator + " (" + log.responsible.id + ")\n" #pylint: disable=line-too-long
msg += "\tReason: " + log.reason + "\n"
msg += "\tDate: " + log.date + "\n\n"
i += 1
msg += "```"
await self.bot.say(msg)
else:
await self.bot.say("No logs found for " + member.name + "#" + \
member.discriminator + " in this server.")
else:
await self.bot.say("There's no member with such ID on this server.")
async def check_new_comers(self, member):
"""Checks if a new comer is in the b1nzy banlist"""
if member.server.id in self.b1nzy_banlist:
if member.id in self.b1nzy_banlist[member.server.id]:
try:
await self.bot.ban(member)
self.b1nzy_banlist[member.server.id].remove(member.id)
if not self.b1nzy_banlist[member.server.id]:
del self.b1nzy_banlist[member.server.id]
self.save_b1nzy_banlist()
except discord.Forbidden:
await self.bot.send_message(member.server.owner, \
"Couldn't ban " + member.name + "#" + member.discriminator + \
" (" + member.id + ") who's in the b1nzy banlist --> missing permissions")
except discord.HTTPException:
pass
def setup(bot):
"""Setup function"""
mod = Admin(bot)
bot.add_listener(mod.check_new_comers, "on_member_join")
bot.add_cog(mod)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: net_l2_interface
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage Layer-2 interface on network devices
description:
- This module provides declarative management of Layer-2 interface
on network devices.
options:
name:
description:
- Name of the interface excluding any logical unit number.
aggregate:
description:
- List of Layer-2 interface definitions.
mode:
description:
- Mode in which interface needs to be configured.
default: access
choices: ['access', 'trunk']
access_vlan:
description:
- Configure given VLAN in access port.
trunk_vlans:
description:
- List of VLANs to be configured in trunk port.
native_vlan:
description:
- Native VLAN to be configured in trunk port.
trunk_allowed_vlans:
description:
- List of allowed VLAN's in a given trunk port.
state:
description:
- State of the Layer-2 Interface configuration.
default: present
choices: ['present', 'absent',]
"""
EXAMPLES = """
- name: configure Layer-2 interface
net_l2_interface:
name: gigabitethernet0/0/1
mode: access
access_vlan: 30
- name: remove Layer-2 interface configuration
net_l2_interface:
name: gigabitethernet0/0/1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface gigabitethernet0/0/1
- switchport mode access
- switchport access vlan 30
"""
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
This module implements the Residue function and related tools for working
with residues.
"""
from __future__ import print_function, division
from sympy import sympify
from sympy.utilities.timeutils import timethis
@timethis('residue')
def residue(expr, x, x0):
"""
Finds the residue of ``expr`` at the point x=x0.
The residue is defined as the coefficient of 1/(x-x0) in the power series
expansion about x=x0.
Examples
========
>>> from sympy import Symbol, residue, sin
>>> x = Symbol("x")
>>> residue(1/x, x, 0)
1
>>> residue(1/x**2, x, 0)
0
>>> residue(2/sin(x), x, 0)
2
This function is essential for the Residue Theorem [1].
References
==========
1. http://en.wikipedia.org/wiki/Residue_theorem
"""
# The current implementation uses series expansion to
# calculate it. A more general implementation is explained in
# the section 5.6 of the Bronstein's book {M. Bronstein:
# Symbolic Integration I, Springer Verlag (2005)}. For purely
# rational functions, the algorithm is much easier. See
# sections 2.4, 2.5, and 2.7 (this section actually gives an
# algorithm for computing any Laurent series coefficient for
# a rational function). The theory in section 2.4 will help to
# understand why the resultant works in the general algorithm.
# For the definition of a resultant, see section 1.4 (and any
# previous sections for more review).
from sympy import collect, Mul, Order, S
expr = sympify(expr)
if x0 != 0:
expr = expr.subs(x, x + x0)
for n in [0, 1, 2, 4, 8, 16, 32]:
if n == 0:
s = expr.series(x, n=0)
else:
s = expr.nseries(x, n=n)
if s.has(Order) and s.removeO() == 0:
# bug in nseries
continue
if not s.has(Order) or s.getn() >= 0:
break
if s.has(Order) and s.getn() < 0:
raise NotImplementedError('Bug in nseries?')
s = collect(s.removeO(), x)
if s.is_Add:
args = s.args
else:
args = [s]
res = S(0)
for arg in args:
c, m = arg.as_coeff_mul(x)
m = Mul(*m)
if not (m == 1 or m == x or (m.is_Pow and m.exp.is_Integer)):
raise NotImplementedError('term of unexpected form: %s' % m)
if m == 1/x:
res += c
return res
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
import django.db.models.deletion
import oscar.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('partner', '0001_initial'),
('customer', '0001_initial'),
('address', '0001_initial'),
('basket', '0002_auto_20140827_1705'),
('catalogue', '0001_initial'),
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BillingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(verbose_name='Title', max_length=64, blank=True, choices=[('Mr', 'Mr'), ('Miss', 'Miss'), ('Mrs', 'Mrs'), ('Ms', 'Ms'), ('Dr', 'Dr')])),
('first_name', models.CharField(max_length=255, verbose_name='First name', blank=True)),
('last_name', models.CharField(max_length=255, verbose_name='Last name', blank=True)),
('line1', models.CharField(max_length=255, verbose_name='First line of address')),
('line2', models.CharField(max_length=255, verbose_name='Second line of address', blank=True)),
('line3', models.CharField(max_length=255, verbose_name='Third line of address', blank=True)),
('line4', models.CharField(max_length=255, verbose_name='City', blank=True)),
('state', models.CharField(max_length=255, verbose_name='State/County', blank=True)),
('postcode', oscar.models.fields.UppercaseCharField(max_length=64, verbose_name='Post/Zip-code', blank=True)),
('search_text', models.TextField(editable=False, verbose_name='Search text - used only for searching addresses')),
('country', models.ForeignKey(verbose_name='Country', to='address.Country')),
],
options={
'verbose_name_plural': 'Billing addresses',
'verbose_name': 'Billing address',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CommunicationEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date')),
('event_type', models.ForeignKey(verbose_name='Event Type', to='customer.CommunicationEventType')),
],
options={
'ordering': ['-date_created'],
'verbose_name_plural': 'Communication Events',
'verbose_name': 'Communication Event',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Line',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('partner_name', models.CharField(max_length=128, verbose_name='Partner name', blank=True)),
('partner_sku', models.CharField(max_length=128, verbose_name='Partner SKU')),
('partner_line_reference', models.CharField(verbose_name='Partner reference', max_length=128, help_text='This is the item number that the partner uses within their system', blank=True)),
('partner_line_notes', models.TextField(verbose_name='Partner Notes', blank=True)),
('title', models.CharField(max_length=255, verbose_name='Title')),
('upc', models.CharField(verbose_name='UPC', max_length=128, blank=True, null=True)),
('quantity', models.PositiveIntegerField(default=1, verbose_name='Quantity')),
('line_price_incl_tax', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Price (inc. tax)')),
('line_price_excl_tax', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Price (excl. tax)')),
('line_price_before_discounts_incl_tax', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Price before discounts (inc. tax)')),
('line_price_before_discounts_excl_tax', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Price before discounts (excl. tax)')),
('unit_cost_price', models.DecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Unit Cost Price', null=True)),
('unit_price_incl_tax', models.DecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Unit Price (inc. tax)', null=True)),
('unit_price_excl_tax', models.DecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Unit Price (excl. tax)', null=True)),
('unit_retail_price', models.DecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Unit Retail Price', null=True)),
('status', models.CharField(max_length=255, verbose_name='Status', blank=True)),
('est_dispatch_date', models.DateField(blank=True, verbose_name='Estimated Dispatch Date', null=True)),
],
options={
'verbose_name_plural': 'Order Lines',
'verbose_name': 'Order Line',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LineAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=128, verbose_name='Type')),
('value', models.CharField(max_length=255, verbose_name='Value')),
('line', models.ForeignKey(verbose_name='Line', related_name='attributes', to='order.Line')),
('option', models.ForeignKey(verbose_name='Option', on_delete=django.db.models.deletion.SET_NULL, related_name='line_attributes', to='catalogue.Option', null=True)),
],
options={
'verbose_name_plural': 'Line Attributes',
'verbose_name': 'Line Attribute',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinePrice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='Quantity')),
('price_incl_tax', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Price (inc. tax)')),
('price_excl_tax', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Price (excl. tax)')),
('shipping_incl_tax', models.DecimalField(default=0, max_digits=12, decimal_places=2, verbose_name='Shiping (inc. tax)')),
('shipping_excl_tax', models.DecimalField(default=0, max_digits=12, decimal_places=2, verbose_name='Shipping (excl. tax)')),
('line', models.ForeignKey(verbose_name='Line', related_name='prices', to='order.Line')),
],
options={
'ordering': ('id',),
'verbose_name_plural': 'Line Prices',
'verbose_name': 'Line Price',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=128, unique=True, db_index=True, verbose_name='Order number')),
('currency', models.CharField(default='GBP', max_length=12, verbose_name='Currency')),
('total_incl_tax', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Order total (inc. tax)')),
('total_excl_tax', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Order total (excl. tax)')),
('shipping_incl_tax', models.DecimalField(default=0, max_digits=12, decimal_places=2, verbose_name='Shipping charge (inc. tax)')),
('shipping_excl_tax', models.DecimalField(default=0, max_digits=12, decimal_places=2, verbose_name='Shipping charge (excl. tax)')),
('shipping_method', models.CharField(max_length=128, verbose_name='Shipping method', blank=True)),
('shipping_code', models.CharField(default='', max_length=128, blank=True)),
('status', models.CharField(max_length=100, verbose_name='Status', blank=True)),
('guest_email', models.EmailField(max_length=75, verbose_name='Guest email address', blank=True)),
('date_placed', models.DateTimeField(auto_now_add=True, db_index=True)),
('basket', models.ForeignKey(null=True, verbose_name='Basket', on_delete=django.db.models.deletion.SET_NULL, to='basket.Basket', blank=True)),
('billing_address', models.ForeignKey(null=True, verbose_name='Billing Address', on_delete=django.db.models.deletion.SET_NULL, to='order.BillingAddress', blank=True)),
],
options={
'ordering': ['-date_placed'],
'verbose_name_plural': 'Orders',
'verbose_name': 'Order',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OrderDiscount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(default='Basket', max_length=64, verbose_name='Discount category', choices=[('Basket', 'Basket'), ('Shipping', 'Shipping'), ('Deferred', 'Deferred')])),
('offer_id', models.PositiveIntegerField(blank=True, verbose_name='Offer ID', null=True)),
('offer_name', models.CharField(max_length=128, db_index=True, verbose_name='Offer name', blank=True)),
('voucher_id', models.PositiveIntegerField(blank=True, verbose_name='Voucher ID', null=True)),
('voucher_code', models.CharField(max_length=128, db_index=True, verbose_name='Code', blank=True)),
('frequency', models.PositiveIntegerField(verbose_name='Frequency', null=True)),
('amount', models.DecimalField(default=0, max_digits=12, decimal_places=2, verbose_name='Amount')),
('message', models.TextField(blank=True)),
('order', models.ForeignKey(verbose_name='Order', related_name='discounts', to='order.Order')),
],
options={
'verbose_name_plural': 'Order Discounts',
'verbose_name': 'Order Discount',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OrderNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note_type', models.CharField(max_length=128, verbose_name='Note Type', blank=True)),
('message', models.TextField(verbose_name='Message')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('date_updated', models.DateTimeField(auto_now=True, verbose_name='Date Updated')),
('order', models.ForeignKey(verbose_name='Order', related_name='notes', to='order.Order')),
('user', models.ForeignKey(verbose_name='User', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name_plural': 'Order Notes',
'verbose_name': 'Order Note',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PaymentEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Amount')),
('reference', models.CharField(max_length=128, verbose_name='Reference', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
],
options={
'ordering': ['-date_created'],
'verbose_name_plural': 'Payment Events',
'verbose_name': 'Payment Event',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PaymentEventQuantity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(verbose_name='Quantity')),
('event', models.ForeignKey(verbose_name='Event', related_name='line_quantities', to='order.PaymentEvent')),
('line', models.ForeignKey(verbose_name='Line', related_name='payment_event_quantities', to='order.Line')),
],
options={
'verbose_name_plural': 'Payment Event Quantities',
'verbose_name': 'Payment Event Quantity',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PaymentEventType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(unique=True, max_length=128, verbose_name='Name')),
('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', max_length=128, editable=False, blank=True)),
],
options={
'ordering': ('name',),
'verbose_name_plural': 'Payment Event Types',
'verbose_name': 'Payment Event Type',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(verbose_name='Title', max_length=64, blank=True, choices=[('Mr', 'Mr'), ('Miss', 'Miss'), ('Mrs', 'Mrs'), ('Ms', 'Ms'), ('Dr', 'Dr')])),
('first_name', models.CharField(max_length=255, verbose_name='First name', blank=True)),
('last_name', models.CharField(max_length=255, verbose_name='Last name', blank=True)),
('line1', models.CharField(max_length=255, verbose_name='First line of address')),
('line2', models.CharField(max_length=255, verbose_name='Second line of address', blank=True)),
('line3', models.CharField(max_length=255, verbose_name='Third line of address', blank=True)),
('line4', models.CharField(max_length=255, verbose_name='City', blank=True)),
('state', models.CharField(max_length=255, verbose_name='State/County', blank=True)),
('postcode', oscar.models.fields.UppercaseCharField(max_length=64, verbose_name='Post/Zip-code', blank=True)),
('search_text', models.TextField(editable=False, verbose_name='Search text - used only for searching addresses')),
('phone_number', oscar.models.fields.PhoneNumberField(verbose_name='Phone number', help_text='In case we need to call you about your order', blank=True)),
('notes', models.TextField(verbose_name='Instructions', help_text='Tell us anything we should know when delivering your order.', blank=True)),
('country', models.ForeignKey(verbose_name='Country', to='address.Country')),
],
options={
'verbose_name_plural': 'Shipping addresses',
'verbose_name': 'Shipping address',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ShippingEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notes', models.TextField(verbose_name='Event notes', help_text='This could be the dispatch reference, or a tracking number', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
],
options={
'ordering': ['-date_created'],
'verbose_name_plural': 'Shipping Events',
'verbose_name': 'Shipping Event',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ShippingEventQuantity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(verbose_name='Quantity')),
('event', models.ForeignKey(verbose_name='Event', related_name='line_quantities', to='order.ShippingEvent')),
('line', models.ForeignKey(verbose_name='Line', related_name='shipping_event_quantities', to='order.Line')),
],
options={
'verbose_name_plural': 'Shipping Event Quantities',
'verbose_name': 'Shipping Event Quantity',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ShippingEventType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(unique=True, max_length=255, verbose_name='Name')),
('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', max_length=128, editable=False, blank=True)),
],
options={
'ordering': ('name',),
'verbose_name_plural': 'Shipping Event Types',
'verbose_name': 'Shipping Event Type',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='shippingeventquantity',
unique_together=set([('event', 'line')]),
),
migrations.AddField(
model_name='shippingevent',
name='event_type',
field=models.ForeignKey(verbose_name='Event Type', to='order.ShippingEventType'),
preserve_default=True,
),
migrations.AddField(
model_name='shippingevent',
name='lines',
field=models.ManyToManyField(related_name='shipping_events', verbose_name='Lines', to='order.Line', through='order.ShippingEventQuantity'),
preserve_default=True,
),
migrations.AddField(
model_name='shippingevent',
name='order',
field=models.ForeignKey(verbose_name='Order', related_name='shipping_events', to='order.Order'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='paymenteventquantity',
unique_together=set([('event', 'line')]),
),
migrations.AddField(
model_name='paymentevent',
name='event_type',
field=models.ForeignKey(verbose_name='Event Type', to='order.PaymentEventType'),
preserve_default=True,
),
migrations.AddField(
model_name='paymentevent',
name='lines',
field=models.ManyToManyField(through='order.PaymentEventQuantity', verbose_name='Lines', to='order.Line'),
preserve_default=True,
),
migrations.AddField(
model_name='paymentevent',
name='order',
field=models.ForeignKey(verbose_name='Order', related_name='payment_events', to='order.Order'),
preserve_default=True,
),
migrations.AddField(
model_name='paymentevent',
name='shipping_event',
field=models.ForeignKey(related_name='payment_events', to='order.ShippingEvent', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(null=True, verbose_name='Shipping Address', on_delete=django.db.models.deletion.SET_NULL, to='order.ShippingAddress', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='order',
name='site',
field=models.ForeignKey(verbose_name='Site', on_delete=django.db.models.deletion.SET_NULL, to='sites.Site', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='order',
name='user',
field=models.ForeignKey(null=True, verbose_name='User', on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to=settings.AUTH_USER_MODEL, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='lineprice',
name='order',
field=models.ForeignKey(verbose_name='Option', related_name='line_prices', to='order.Order'),
preserve_default=True,
),
migrations.AddField(
model_name='line',
name='order',
field=models.ForeignKey(verbose_name='Order', related_name='lines', to='order.Order'),
preserve_default=True,
),
migrations.AddField(
model_name='line',
name='partner',
field=models.ForeignKey(null=True, verbose_name='Partner', on_delete=django.db.models.deletion.SET_NULL, related_name='order_lines', to='partner.Partner', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='line',
name='product',
field=models.ForeignKey(null=True, verbose_name='Product', on_delete=django.db.models.deletion.SET_NULL, to='catalogue.Product', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='line',
name='stockrecord',
field=models.ForeignKey(null=True, verbose_name='Stock record', on_delete=django.db.models.deletion.SET_NULL, to='partner.StockRecord', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='communicationevent',
name='order',
field=models.ForeignKey(verbose_name='Order', related_name='communication_events', to='order.Order'),
preserve_default=True,
),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import warnings
from .decorators import apply_defaults as _apply_defaults
def apply_defaults(func):
warnings.warn_explicit(
"""
You are importing apply_defaults from airflow.utils which
will be deprecated in a future version.
Please use :
from airflow.utils.decorators import apply_defaults
""",
category=PendingDeprecationWarning,
filename=func.__code__.co_filename,
lineno=func.__code__.co_firstlineno + 1
)
return _apply_defaults(func)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/Parallel.h>
#include <ATen/TensorMeta.h>
#include <ATen/native/TriangularOpsUtils.h>
#include <ATen/TensorSubclassLikeUtils.h>
#include <c10/util/irange.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/trace_backward_native.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/triu_native.h>
#include <ATen/ops/zeros.h>
#endif
namespace at::meta {
TORCH_META_FUNC(tril)(const Tensor& self, int64_t k) {
TORCH_CHECK(self.dim() >= 2, "tril: input tensor must have at least 2 dimensions")
set_output_raw_strided(0, self.sizes(), {}, self.options());
}
TORCH_META_FUNC(triu)(const Tensor& self, int64_t k) {
TORCH_CHECK(self.dim() >= 2, "triu: input tensor must have at least 2 dimensions")
set_output_raw_strided(0, self.sizes(), {}, self.options());
}
} // namespace at::meta
namespace at::native {
namespace {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
void apply_triu_tril_single(
scalar_t* result,
const scalar_t* self,
bool inplace,
int64_t k,
int64_t n,
int64_t m,
int64_t res_row_stride,
int64_t res_col_stride,
int64_t self_row_stride,
int64_t self_col_stride,
bool upper) {
constexpr int64_t zero = 0;
k = std::clamp(k, -n, m); // Clamp k to [-n, m] to prevent i + k arithmetic overflow, especially if k approaches INT64_MAX/INT64_MIN.
if (upper) {
parallel_for(0, n, 0, [&](int64_t start, int64_t end) {
for (int64_t i : c10::irange(start, end)) {
for (int64_t j = 0; j < std::min(m, i + k); j++) {
result[i * res_row_stride + j * res_col_stride] = static_cast<scalar_t>(0);
}
if (!inplace) { // copy the rest of the self if not inplace
for (int64_t j = std::max(zero, i + k); j < m; j++) {
result[i * res_row_stride + j * res_col_stride] = c10::load(&self[i * self_row_stride + j * self_col_stride]);
}
}
}
});
} else {
parallel_for(0, n, 0, [&](int64_t start, int64_t end) {
for (int64_t i : c10::irange(start, end)) {
for (int64_t j = std::max(zero, i + k + 1); j < m; j++) {
result[i * res_row_stride + j * res_col_stride] = static_cast<scalar_t>(0);
}
if (!inplace) { // copy the rest of the self if not inplace
for (int64_t j = zero; j < std::min(m, i + k + 1); j++) {
result[i * res_row_stride + j * res_col_stride] = c10::load(&self[i * self_row_stride + j * self_col_stride]);
}
}
}
});
}
}
template <typename scalar_t>
void apply_triu_tril(const Tensor& result, const Tensor& self, bool inplace, int64_t k, bool upper) {
auto n = self.size(-2);
auto m = self.size(-1);
auto self_data = self.const_data_ptr<scalar_t>();
auto self_stride = (self.dim() > 2 && self.stride(-3) > 0) ? self.stride(-3) : 1;
auto batchsize = batchCountTrilTriu(result);
auto self_row_stride = self.stride(-2);
auto self_col_stride = self.stride(-1);
auto result_data = result.data_ptr<scalar_t>();
int64_t result_stride = 0, result_row_stride = 0, result_col_stride = 0;
if (result_data != self_data) {
result_stride = (result.dim() > 2 && result.stride(-3) > 0) ? result.stride(-3) : 1;
result_row_stride = result.stride(-2);
result_col_stride = result.stride(-1);
} else {
result_stride = self_stride;
result_row_stride = self_row_stride;
result_col_stride = self_col_stride;
}
parallel_for(0, batchsize, 0, [&](int64_t start, int64_t end) {
for (const auto b : c10::irange(start, end)) {
const scalar_t* self_batch = &self_data[b * self_stride];
scalar_t* result_batch = &result_data[b * result_stride];
apply_triu_tril_single<scalar_t>(
result_batch,
self_batch,
inplace,
k,
n,
m,
result_row_stride,
result_col_stride,
self_row_stride,
self_col_stride,
upper);
}
});
}
struct UpperTriangle {
static constexpr const char* op_name = "triu";
static constexpr bool upper = true;
};
struct LowerTriangle {
static constexpr const char *op_name = "tril";
static constexpr bool upper = false;
};
template <typename Triangle>
void compute_triu_tril(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() == 0) {
return;
}
bool inplace_op = self.is_same(result);
bool inplace_update = false;
Tensor self_c;
std::tie(inplace_update, self_c) = checkTrilTriuBatchContiguous(self, inplace_op);
Tensor result_c;
if (inplace_op && !inplace_update) {
result_c = at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
result_c = result;
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
ScalarType::ComplexHalf,
ScalarType::BFloat16,
ScalarType::Half,
ScalarType::Bool,
self.scalar_type(),
Triangle::op_name,
[&]{
apply_triu_tril<scalar_t>(result_c, self_c, inplace_op && inplace_update, k, Triangle::upper);
});
if (inplace_op && !inplace_update) {
result.copy_(result_c);
}
}
} // namespace
TORCH_IMPL_FUNC(tril_cpu)(const Tensor& self, int64_t k, const Tensor &result) {
compute_triu_tril<LowerTriangle>(self, k, result);
}
TORCH_IMPL_FUNC(triu_cpu)(const Tensor& self, int64_t k, const Tensor &result) {
compute_triu_tril<UpperTriangle>(self, k, result);
}
Tensor trace_backward_symint(const Tensor& grad, c10::SymIntArrayRef sizes) {
TORCH_CHECK(sizes.size() == 2, "expected matrix input");
auto grad_input = at::zeros_symint(sizes[0] * sizes[1], grad.options());
auto indices = at::arange(0, grad_input.numel(), sizes[1] + 1, grad.options().dtype(at::kLong));
// for composite compliance, use out-of-place variant of
// `index_fill` if grad tensor is a Tensor Subclass.
if (isTensorSubclassLike(grad)) {
grad_input = grad_input.index_fill(0, indices, grad);
} else {
grad_input.index_fill_(0, indices, grad);
}
return grad_input.view_symint(sizes);
}
} // namespace at::native
|
cpp
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/native/TriangularOps.cpp
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Varun Chopra (@chopraaa) <v@chopraaa.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
module: win_format
version_added: '2.8'
short_description: Formats an existing volume or a new volume on an existing partition on Windows
description:
- The M(win_format) module formats an existing volume or a new volume on an existing partition on Windows
options:
drive_letter:
description:
- Used to specify the drive letter of the volume to be formatted.
type: str
path:
description:
- Used to specify the path to the volume to be formatted.
type: str
label:
description:
- Used to specify the label of the volume to be formatted.
type: str
new_label:
description:
- Used to specify the new file system label of the formatted volume.
type: str
file_system:
description:
- Used to specify the file system to be used when formatting the target volume.
type: str
choices: [ ntfs, refs, exfat, fat32, fat ]
allocation_unit_size:
description:
- Specifies the cluster size to use when formatting the volume.
- If no cluster size is specified when you format a partition, defaults are selected based on
the size of the partition.
- This value must be a multiple of the physical sector size of the disk.
type: int
large_frs:
description:
- Specifies that large File Record System (FRS) should be used.
type: bool
compress:
description:
- Enable compression on the resulting NTFS volume.
- NTFS compression is not supported where I(allocation_unit_size) is more than 4096.
type: bool
integrity_streams:
description:
- Enable integrity streams on the resulting ReFS volume.
type: bool
full:
description:
- A full format writes to every sector of the disk, takes much longer to perform than the
default (quick) format, and is not recommended on storage that is thinly provisioned.
- Specify C(true) for full format.
type: bool
force:
description:
- Specify if formatting should be forced for volumes that are not created from new partitions
or if the source and target file system are different.
type: bool
notes:
- Microsoft Windows Server 2012 or Microsoft Windows 8 or newer is required to use this module. To check if your system is compatible, see
U(https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version).
- One of three parameters (I(drive_letter), I(path) and I(label)) are mandatory to identify the target
volume but more than one cannot be specified at the same time.
- This module is idempotent if I(force) is not specified and file system labels remain preserved.
- For more information, see U(https://docs.microsoft.com/en-us/previous-versions/windows/desktop/stormgmt/format-msft-volume)
seealso:
- module: win_disk_facts
- module: win_partition
author:
- Varun Chopra (@chopraaa) <v@chopraaa.com>
'''
EXAMPLES = r'''
- name: Create a partition with drive letter D and size 5 GiB
win_partition:
drive_letter: D
partition_size: 5 GiB
disk_number: 1
- name: Full format the newly created partition as NTFS and label it
win_format:
drive_letter: D
file_system: NTFS
new_label: Formatted
full: True
'''
RETURN = r'''
#
'''
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
/**
* The result of {@link Admin#electLeaders(ElectionType, Set, ElectLeadersOptions)}
*
* The API of this class is evolving, see {@link Admin} for details.
*/
public final class ElectLeadersResult {
private final KafkaFuture<Map<TopicPartition, Optional<Throwable>>> electionFuture;
ElectLeadersResult(KafkaFuture<Map<TopicPartition, Optional<Throwable>>> electionFuture) {
this.electionFuture = electionFuture;
}
/**
* <p>Get a future for the topic partitions for which a leader election was attempted.
* If the election succeeded then the value for a topic partition will be the empty Optional.
* Otherwise the election failed and the Optional will be set with the error.</p>
*/
public KafkaFuture<Map<TopicPartition, Optional<Throwable>>> partitions() {
return electionFuture;
}
/**
* Return a future which succeeds if all the topic elections succeed.
*/
public KafkaFuture<Void> all() {
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
partitions().whenComplete(
(topicPartitions, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else {
for (Optional<Throwable> exception : topicPartitions.values()) {
if (exception.isPresent()) {
result.completeExceptionally(exception.get());
return;
}
}
result.complete(null);
}
});
return result;
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/clients/admin/ElectLeadersResult.java
|
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .audio_transcription import AudioTranscription
from .noise_reduction_type import NoiseReductionType
from .realtime_audio_formats import RealtimeAudioFormats
from .realtime_audio_input_turn_detection import RealtimeAudioInputTurnDetection
__all__ = ["RealtimeAudioConfigInput", "NoiseReduction"]
class NoiseReduction(BaseModel):
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off.
Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model.
Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio.
"""
type: Optional[NoiseReductionType] = None
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
class RealtimeAudioConfigInput(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the input audio."""
noise_reduction: Optional[NoiseReduction] = None
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off. Noise reduction filters audio added to
the input audio buffer before it is sent to VAD and the model. Filtering the
audio can improve VAD and turn detection accuracy (reducing false positives) and
model performance by improving perception of the input audio.
"""
transcription: Optional[AudioTranscription] = None
"""
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
"""
turn_detection: Optional[RealtimeAudioInputTurnDetection] = None
"""Configuration for turn detection, ether Server VAD or Semantic VAD.
This can be set to `null` to turn off, in which case the client must manually
trigger model response.
Server VAD means that the model will detect the start and end of speech based on
audio volume and respond at the end of user speech.
Semantic VAD is more advanced and uses a turn detection model (in conjunction
with VAD) to semantically estimate whether the user has finished speaking, then
dynamically sets a timeout based on this probability. For example, if user audio
trails off with "uhhm", the model will score a low probability of turn end and
wait longer for the user to continue speaking. This can be useful for more
natural conversations, but may have a higher latency.
"""
|
python
|
github
|
https://github.com/openai/openai-python
|
src/openai/types/realtime/realtime_audio_config_input.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2048: Alignak contrib team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak contrib projet.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
"""
This module is an Alignak Broker module that collects the `monitoring_log` broks to send
them to a Python logger configured in the module configuration file
"""
import os
import json
import time
import queue
import logging
from logging import Formatter
from logging.handlers import TimedRotatingFileHandler
from logging.config import dictConfig as logger_dictConfig
import psutil
from alignak.stats import Stats
from alignak.basemodule import BaseModule
from alignak_backend_client.client import Backend, BackendException
from alignak_module_logs.logevent import LogEvent
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
for handler in logger.parent.handlers:
if isinstance(handler, logging.StreamHandler):
logger.parent.removeHandler(handler)
# pylint: disable=invalid-name
properties = {
'daemons': ['broker'],
'type': 'logs',
'external': True,
'phases': ['running'],
}
class UTCFormatter(logging.Formatter):
"""This logging formatter converts the log date/time to UTC"""
converter = time.gmtime
def get_instance(mod_conf):
"""Return a module instance for the modules manager
:param mod_conf: the module properties as defined globally in this file
:return:
"""
# logger.info("Give an instance of %s for alias: %s",
# mod_conf.python_name, mod_conf.module_alias)
return MonitoringLogsCollector(mod_conf)
class MonitoringLogsCollector(BaseModule):
"""Monitoring logs module main class"""
def __init__(self, mod_conf):
# pylint: disable=global-statement
"""Module initialization
mod_conf is a dictionary that contains:
- all the variables declared in the module configuration file
- a 'properties' value that is the module properties as defined globally in this file
:param mod_conf: module configuration file as a dictionary
"""
BaseModule.__init__(self, mod_conf)
# pylint: disable=global-statement
global logger
logger = logging.getLogger('alignak.module.%s' % self.alias)
# Do not change log level for this module ...
# logger.setLevel(getattr(mod_conf, 'log_level', logging.INFO))
logger.debug("inner properties: %s", self.__dict__)
logger.debug("received configuration: %s", mod_conf.__dict__)
# Internal logger for the monitoring logs
self.logger = None
self.loop_count = 0
# Self daemon monitoring (cpu, memory)
self.daemon_monitoring = False
self.daemon_monitoring_period = 10
if 'ALIGNAK_DAEMON_MONITORING' in os.environ:
self.daemon_monitoring = True
try:
self.daemon_monitoring_period = \
int(os.environ.get('ALIGNAK_DAEMON_MONITORING', '10'))
except ValueError: # pragma: no cover, simple protection
pass
if self.daemon_monitoring:
print("Module self monitoring is enabled, reporting every %d loop count."
% self.daemon_monitoring_period)
# Logger configuration file
self.logger_configuration = os.getenv('ALIGNAK_MONITORING_LOGS_CFG', None)
if not self.logger_configuration:
self.logger_configuration = getattr(mod_conf, 'logger_configuration', None)
if self.logger_configuration and self.logger_configuration != \
os.path.abspath(self.logger_configuration):
self.logger_configuration = os.path.abspath(self.logger_configuration)
# Logger default parameters (used if logger_configuration is not defined)
self.default_configuration = True
self.log_logger_name = getattr(mod_conf, 'log_logger_name', 'monitoring-logs')
self.log_dir = getattr(mod_conf, 'log_dir', '/tmp')
if "ALIGNAKLOG" in self.log_dir:
self.log_dir = '/tmp'
self.log_file = getattr(mod_conf, 'log_file', 'monitoring-logs.log')
self.log_filename = os.path.join(self.log_dir, self.log_file)
self.log_rotation_when = getattr(mod_conf, 'log_rotation_when', 'midnight')
self.log_rotation_interval = int(getattr(mod_conf, 'log_rotation_interval', '1'))
self.log_rotation_count = int(getattr(mod_conf, 'log_rotation_count', '365'))
self.log_level = getattr(mod_conf, 'log_level', 'INFO')
self.log_level = getattr(logging, self.log_level, None)
self.log_format = getattr(mod_conf, 'log_format ',
'[%(created)i] %(levelname)s: %(message)s')
self.log_date = getattr(mod_conf, 'log_date', '%Y-%m-%d %H:%M:%S %Z')
if not self.logger_configuration and not self.log_dir and not self.log_file:
logger.info("The logging feature is disabled")
else:
if self.logger_configuration:
logger.info("logger configuration defined in %s", self.logger_configuration)
self.default_configuration = False
if not os.path.exists(self.logger_configuration):
self.default_configuration = True
logger.warning("defined logger configuration file (%s) does not exist! "
"Using default configuration.", self.logger_configuration)
if self.default_configuration:
logger.info("logger default configuration:")
logger.info(" - rotating logs in %s", self.log_filename)
logger.info(" - log level: %s", self.log_level)
logger.info(" - rotation every %d %s, keeping %s files",
self.log_rotation_interval, self.log_rotation_when,
self.log_rotation_count)
self.setup_logging()
stats_host = getattr(mod_conf, 'statsd_host', 'localhost')
stats_port = int(getattr(mod_conf, 'statsd_port', '8125'))
stats_prefix = getattr(mod_conf, 'statsd_prefix', 'alignak')
statsd_enabled = (getattr(mod_conf, 'statsd_enabled', '0') != '0')
if isinstance(getattr(mod_conf, 'statsd_enabled', '0'), bool):
statsd_enabled = getattr(mod_conf, 'statsd_enabled')
graphite_enabled = (getattr(mod_conf, 'graphite_enabled', '0') != '0')
if isinstance(getattr(mod_conf, 'graphite_enabled', '0'), bool):
graphite_enabled = getattr(mod_conf, 'graphite_enabled')
logger.info("StatsD configuration: %s:%s, prefix: %s, enabled: %s, graphite: %s",
stats_host, stats_port, stats_prefix, statsd_enabled, graphite_enabled)
self.statsmgr = Stats()
# Configure our Stats manager
if not graphite_enabled:
self.statsmgr.register(self.alias, 'module',
statsd_host=stats_host, statsd_port=stats_port,
statsd_prefix=stats_prefix, statsd_enabled=statsd_enabled)
else:
self.statsmgr.connect(self.alias, 'module',
host=stats_host, port=stats_port,
prefix=stats_prefix, enabled=True)
# logger.info("StatsD configuration: %s:%s, prefix: %s, enabled: %s",
# getattr(mod_conf, 'statsd_host', 'localhost'),
# int(getattr(mod_conf, 'statsd_port', '8125')),
# getattr(mod_conf, 'statsd_prefix', 'alignak'),
# (getattr(mod_conf, 'statsd_enabled', '0') != '0'))
# self.statsmgr = Stats()
# self.statsmgr.register(self.alias, 'module',
# statsd_host=getattr(mod_conf, 'statsd_host', 'localhost'),
# statsd_port=int(getattr(mod_conf, 'statsd_port', '8125')),
# statsd_prefix=getattr(mod_conf, 'statsd_prefix', 'alignak'),
# statsd_enabled=(getattr(mod_conf, 'statsd_enabled', '0') != '0'))
# Alignak Backend part
# ---
self.backend_available = False
self.backend_connected = False
self.backend_url = getattr(mod_conf, 'alignak_backend', '')
if self.backend_url:
logger.info("Alignak backend endpoint: %s", self.backend_url)
self.client_processes = int(getattr(mod_conf, 'client_processes', '1'))
logger.info("Number of processes used by backend client: %s", self.client_processes)
self.backend_connected = False
self.backend_connection_retry_planned = 0
try:
self.backend_connection_retry_delay = int(getattr(mod_conf,
'backend_connection_retry_delay',
'10'))
except ValueError:
self.backend_connection_retry_delay = 10
self.backend_errors_count = 0
self.backend_username = getattr(mod_conf, 'username', '')
self.backend_password = getattr(mod_conf, 'password', '')
self.backend_generate = getattr(mod_conf, 'allowgeneratetoken', False)
self.backend_token = getattr(mod_conf, 'token', '')
self.backend = Backend(self.backend_url, self.client_processes)
if not self.backend.token and not self.backend_username:
logger.warning("No Alignak backend credentials configured (empty token and "
"empty username. "
"The requested backend connection will not be available")
self.backend_url = ''
else:
# Log in to the backend
self.logged_in = False
self.backend_connected = self.backend_connection()
self.backend_available = self.backend_connected
# Get the default realm
self.default_realm = self.get_default_realm()
else:
logger.warning('Alignak Backend is not configured. '
'Some module features will not be available.')
def init(self):
"""Handle this module "post" init ; just before it'll be started.
Like just open necessaries file(s), database(s),
or whatever the module will need.
:return: None
"""
return True
def setup_logging(self):
"""Setup logging configuration
:return: none
"""
self.logger = logging.getLogger(self.log_logger_name)
if self.default_configuration:
# Set logger level
self.logger.setLevel(self.log_level)
logger.debug("Logger (default) handlers: %s", self.logger.handlers)
if not self.logger.handlers:
print("Log dir: %s" % self.log_dir)
print("Log filename: %s" % self.log_filename)
file_handler = TimedRotatingFileHandler(self.log_filename.replace("ALIGNAKLOG",
self.log_dir),
when=self.log_rotation_when,
interval=self.log_rotation_interval,
backupCount=self.log_rotation_count)
file_handler.setFormatter(Formatter(self.log_format, self.log_date))
self.logger.addHandler(file_handler)
logger.debug("Logger (default), added a TimedRotatingFileHandler")
else:
try:
with open(self.logger_configuration, 'rt') as my_logger_configuration_file:
config = json.load(my_logger_configuration_file)
# Update the declared log file names with the log directory
for hdlr in config['handlers']:
if 'filename' in config['handlers'][hdlr]:
config['handlers'][hdlr]['filename'] = \
config['handlers'][hdlr]['filename'].replace("ALIGNAKLOG",
self.log_dir)
logger_dictConfig(config)
except ValueError as exp:
logger.error("Logger configuration file is not parsable correctly!")
logger.exception(exp)
def backend_connection(self):
"""Backend connection to check live state update is allowed
:return: True/False
"""
if self.backend_login():
self.get_default_realm()
try:
start = time.time()
params = {'where': '{"token":"%s"}' % self.backend.token}
users = self.backend.get('user', params)
self.statsmgr.counter('backend-get.user', 1)
self.statsmgr.timer('backend-get-time.user', time.time() - start)
except BackendException as exp:
logger.warning("Error on backend when retrieving user information: %s", exp)
else:
try:
for item in users['_items']:
self.logged_in = item['can_update_livestate']
return self.logged_in
except Exception as exp:
logger.error("Can't get the user information in the backend response: %s", exp)
logger.error("Configured user account is not allowed for this module")
return False
def backend_login(self):
"""Log in to the backend
:return: bool
"""
generate = 'enabled'
if not self.backend_generate:
generate = 'disabled'
if self.backend_token:
# We have a token, don't ask for a new one
self.backend.token = self.backend_token
connected = True # Not really yet, but assume yes
else:
if not self.backend_username or not self.backend_password:
logger.error("No user or password supplied, and no default token defined. "
"Can't connect to backend")
connected = False
else:
try:
start = time.time()
connected = self.backend.login(self.backend_username, self.backend_password,
generate)
self.statsmgr.counter('backend-login', 1)
self.statsmgr.timer('backend-login-time', time.time() - start)
except BackendException as exp:
logger.error("Error on backend login: %s", exp)
connected = False
return connected
def get_default_realm(self):
"""
Retrieves the default top level realm for the connected user
:return: str or None
"""
default_realm = None
if self.backend_connected:
try:
start = time.time()
result = self.backend.get('/realm', {'max_results': 1, 'sort': '_level'})
self.statsmgr.counter('backend-get.realm', 1)
self.statsmgr.timer('backend-get-time.realm', time.time() - start)
except BackendException as exp:
logger.warning("Error on backend when retrieving default realm: %s", exp)
else:
try:
default_realm = result['_items'][0]['_id']
except Exception as exp:
logger.error("Can't get the default realm in the backend response: %s", exp)
return default_realm
def do_loop_turn(self): # pragma: no cover
"""This function is present because of an abstract function in the BaseModule class"""
logger.info("In loop")
time.sleep(1)
def manage_brok(self, brok):
"""We got the data to manage
:param brok: Brok object
:type brok: object
:return: False if a backend post error happens
"""
# Ignore all except 'monitoring_log' broks...
if brok.type not in ['monitoring_log']:
return False
level = brok.data['level'].lower()
if level not in ['debug', 'info', 'warning', 'error', 'critical']:
return False
logger.debug("Got monitoring log brok: %s", brok)
# Send to configured logger
if self.logger:
message = brok.data['message']
message = message.replace('\r', '\\r')
message = message.replace('\n', '\\n')
func = getattr(self.logger, level)
func(message)
if not self.backend_url:
return False
if not self.backend_connected and int(time.time() > self.backend_connection_retry_planned):
self.backend_connected = self.backend_connection()
if not self.backend_connected:
logger.error("Alignak backend connection is not available. Ignoring event.")
return False
# Try to get a monitoring event
try:
event = LogEvent(('[%s] ' % int(time.time())) + brok.data['message'])
if event.valid:
# -------------------------------------------
# Add an history event
self.statsmgr.counter('monitoring-event-get.%s' % event.event_type, 1)
data = {}
if event.event_type == 'TIMEPERIOD':
data = {
"host_name": 'n/a',
"service_name": 'n/a',
"user_name": "Alignak",
"type": "monitoring.timeperiod_transition",
"message": brok.data['message'],
}
if event.event_type == 'NOTIFICATION':
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": "Alignak",
"type": "monitoring.notification",
"message": brok.data['message'],
}
if event.event_type == 'ALERT':
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": "Alignak",
"type": "monitoring.alert",
"message": brok.data['message'],
}
if event.event_type == 'DOWNTIME':
downtime_type = "monitoring.downtime_start"
if event.data['state'] == 'STOPPED':
downtime_type = "monitoring.downtime_end"
if event.data['state'] == 'CANCELLED':
downtime_type = "monitoring.downtime_cancelled"
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": "Alignak",
"type": downtime_type,
"message": brok.data['message'],
}
if event.event_type == 'FLAPPING':
flapping_type = "monitoring.flapping_start"
if event.data['state'] == 'STOPPED':
flapping_type = "monitoring.flapping_stop"
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": "Alignak",
"type": flapping_type,
"message": brok.data['message'],
}
if event.event_type == 'COMMENT':
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": event.data['author'] or 'Alignak',
"type": "webui.comment",
"message": event.data['comment'],
}
if data:
try:
logger.debug("Posting history data: %s", data)
start = time.time()
self.backend.post('history', data)
self.statsmgr.counter('monitoring-event-stored.%s' % event.event_type, 1)
self.statsmgr.timer('backend-post-time.history', time.time() - start)
except BackendException as exp:
logger.exception("Exception: %s", exp)
logger.error("Exception response: %s", exp.response)
return False
else:
self.statsmgr.counter('monitoring-event-ignored.%s' % event.event_type, 1)
logger.debug("Monitoring event not stored in the backend: %s",
brok.data['message'])
else:
logger.warning("No monitoring event detected from: %s", brok.data['message'])
except ValueError:
logger.warning("Unable to decode a monitoring event from: %s", brok.data['message'])
return True
def main(self):
"""Main loop of the process
This module is an "external" module
:return:
"""
# Set the OS process title
self.set_proctitle(self.alias)
self.set_exit_handler()
logger.info("starting...")
# Increased on each loop turn
self.loop_count = 0
while not self.interrupted:
# Increment loop count
self.loop_count += 1
try:
queue_size = self.to_q.qsize()
if queue_size:
logger.debug("queue length: %s", queue_size)
self.statsmgr.gauge('queue-size', queue_size)
message = self.to_q.get_nowait()
start = time.time()
for brok in message:
# Prepare and manage each brok in the queue message
brok.prepare()
self.manage_brok(brok)
logger.debug("time to manage %s broks (%d secs)", len(message), time.time() - start)
self.statsmgr.timer('managed-broks-time', time.time() - start)
except queue.Empty:
# logger.debug("No message in the module queue")
time.sleep(0.1)
if self.daemon_monitoring and (self.loop_count
% self.daemon_monitoring_period == 1):
perfdatas = []
my_process = psutil.Process()
with my_process.oneshot():
perfdatas.append("num_threads=%d" % my_process.num_threads())
self.statsmgr.counter("num_threads", my_process.num_threads())
# perfdatas.append("num_ctx_switches=%d" % my_process.num_ctx_switches())
perfdatas.append("num_fds=%d" % my_process.num_fds())
# perfdatas.append("num_handles=%d" % my_process.num_handles())
perfdatas.append("create_time=%d" % my_process.create_time())
perfdatas.append("cpu_num=%d" % my_process.cpu_num())
self.statsmgr.counter("cpu_num", my_process.cpu_num())
perfdatas.append("cpu_usable=%d" % len(my_process.cpu_affinity()))
self.statsmgr.counter("cpu_usable", len(my_process.cpu_affinity()))
perfdatas.append("cpu_percent=%.2f%%" % my_process.cpu_percent())
self.statsmgr.counter("cpu_percent", my_process.cpu_percent())
cpu_times_percent = my_process.cpu_times()
for key in cpu_times_percent._fields:
perfdatas.append("cpu_%s_time=%.2fs"
% (key, getattr(cpu_times_percent, key)))
self.statsmgr.counter("cpu_%s_time" % key,
getattr(cpu_times_percent, key))
memory = my_process.memory_full_info()
for key in memory._fields:
perfdatas.append("mem_%s=%db" % (key, getattr(memory, key)))
self.statsmgr.counter("mem_%s" % key, getattr(memory, key))
logger.debug("Daemon %s (%s), pid=%s, ppid=%s, status=%s, cpu/memory|%s",
self.name, my_process.name(),
my_process.pid, my_process.ppid(),
my_process.status(), " ".join(perfdatas))
logger.info("stopping...")
# Properly close all the Python logging stuff
# See: http://stackoverflow.com/questions/24816456/python-logging-wont-shutdown
logging.shutdown()
logger.info("stopped")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import db
from nova.objects import virtual_interface as vif_obj
from nova.tests.objects import test_objects
fake_vif = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'address': '00:00:00:00:00:00',
'network_id': 123,
'instance_uuid': 'fake-uuid',
'uuid': 'fake-uuid-2',
}
class _TestVirtualInterface(object):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
def test_get_by_id(self):
with mock.patch.object(db, 'virtual_interface_get') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_id(self.context, 1)
self._compare(self, fake_vif, vif)
def test_get_by_uuid(self):
with mock.patch.object(db, 'virtual_interface_get_by_uuid') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_uuid(self.context,
'fake-uuid-2')
self._compare(self, fake_vif, vif)
def test_get_by_address(self):
with mock.patch.object(db, 'virtual_interface_get_by_address') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_address(self.context,
'00:00:00:00:00:00')
self._compare(self, fake_vif, vif)
def test_get_by_instance_and_network(self):
with mock.patch.object(db,
'virtual_interface_get_by_instance_and_network') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_instance_and_network(
self.context, 'fake-uuid', 123)
self._compare(self, fake_vif, vif)
def test_create(self):
vif = vif_obj.VirtualInterface()
vif.address = '00:00:00:00:00:00'
vif.network_id = 123
vif.instance_uuid = 'fake-uuid'
vif.uuid = 'fake-uuid-2'
with mock.patch.object(db, 'virtual_interface_create') as create:
create.return_value = fake_vif
vif.create(self.context)
self.assertEqual(self.context, vif._context)
vif._context = None
self._compare(self, fake_vif, vif)
def test_delete_by_instance_uuid(self):
with mock.patch.object(db,
'virtual_interface_delete_by_instance') as delete:
vif_obj.VirtualInterface.delete_by_instance_uuid(self.context,
'fake-uuid')
delete.assert_called_with(self.context, 'fake-uuid')
class TestVirtualInterfaceObject(test_objects._LocalTest,
_TestVirtualInterface):
pass
class TestRemoteVirtualInterfaceObject(test_objects._RemoteTest,
_TestVirtualInterface):
pass
class _TestVirtualInterfaceList(object):
def test_get_all(self):
with mock.patch.object(db, 'virtual_interface_get_all') as get:
get.return_value = [fake_vif]
vifs = vif_obj.VirtualInterfaceList.get_all(self.context)
self.assertEqual(1, len(vifs))
_TestVirtualInterface._compare(self, fake_vif, vifs[0])
def test_get_by_instance_uuid(self):
with mock.patch.object(db, 'virtual_interface_get_by_instance') as get:
get.return_value = [fake_vif]
vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(
self.context, 'fake-uuid')
self.assertEqual(1, len(vifs))
_TestVirtualInterface._compare(self, fake_vif, vifs[0])
class TestVirtualInterfaceList(test_objects._LocalTest,
_TestVirtualInterfaceList):
pass
class TestRemoteVirtualInterfaceList(test_objects._RemoteTest,
_TestVirtualInterfaceList):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Simple Training CLI.
"""
import argparse
import os
from contextlib import ExitStack
from typing import Optional, List, Tuple
from . import arguments
from . import constants as C
from . import data_io
from . import inference
from . import model
from . import scoring
from . import utils
from . import vocab
from .log import setup_main_logger
from .output_handler import get_output_handler
from .utils import check_condition
# Temporary logger, the real one (logging to a file probably, will be created in the main function)
logger = setup_main_logger(__name__, file_logging=False, console=True)
def main():
params = arguments.ConfigArgumentParser(description='Score data with an existing model.')
arguments.add_score_cli_args(params)
args = params.parse_args()
score(args)
def get_data_iters_and_vocabs(args: argparse.Namespace,
model_folder: Optional[str]) -> Tuple['data_io.BaseParallelSampleIter',
'data_io.DataConfig',
List[vocab.Vocab], vocab.Vocab, model.ModelConfig]:
"""
Loads the data iterators and vocabularies.
:param args: Arguments as returned by argparse.
:param model_folder: Output folder.
:return: The data iterators (train, validation, config_data) as well as the source and target vocabularies,
and data_info if not using prepared data.
"""
model_config = model.SockeyeModel.load_config(os.path.join(args.model, C.CONFIG_NAME))
if args.max_seq_len is None:
max_seq_len_source = model_config.config_data.max_seq_len_source
max_seq_len_target = model_config.config_data.max_seq_len_target
else:
max_seq_len_source, max_seq_len_target = args.max_seq_len
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
batch_by_words = args.batch_type == C.BATCH_TYPE_WORD
# Load the existing vocabs created when starting the training run.
source_vocabs = vocab.load_source_vocabs(model_folder)
target_vocab = vocab.load_target_vocab(model_folder)
sources = [args.source] + args.source_factors
sources = [str(os.path.abspath(source)) for source in sources]
train_iter, _, config_data, data_info = data_io.get_training_data_iters(
sources=sources,
target=os.path.abspath(args.target),
validation_sources=None,
validation_target=None,
source_vocabs=source_vocabs,
target_vocab=target_vocab,
source_vocab_paths=[None],
target_vocab_path=None,
shared_vocab=False,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
fill_up=C.FILL_UP_ZEROS,
permute=False,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
bucketing=False,
bucket_width=args.bucket_width,
allow_empty=True)
return train_iter, config_data, source_vocabs, target_vocab, model_config
def score(args: argparse.Namespace):
global logger
logger = setup_main_logger(__name__, file_logging=False)
utils.log_basic_info(args)
with ExitStack() as exit_stack:
context = utils.determine_context(device_ids=args.device_ids,
use_cpu=args.use_cpu,
disable_device_locking=args.disable_device_locking,
lock_dir=args.lock_dir,
exit_stack=exit_stack)
if args.batch_type == C.BATCH_TYPE_SENTENCE:
check_condition(args.batch_size % len(context) == 0, "When using multiple devices the batch size must be "
"divisible by the number of devices. Choose a batch "
"size that is a multiple of %d." % len(context))
logger.info("Scoring Device(s): %s", ", ".join(str(c) for c in context))
# This call has a number of different parameters compared to training which reflect our need to get scores
# one-for-one and in the same order as the input data.
# To enable code reuse, we stuff the `args` parameter with some values.
# Bucketing and permuting need to be turned off in order to preserve the ordering of sentences.
# The 'zeros' fill_up strategy fills underfilled buckets with zeros which can then be used to find the last item.
# Finally, 'resume_training' needs to be set to True because it causes the model to be loaded instead of initialized.
args.no_bucketing = True
args.fill_up = 'zeros'
args.bucket_width = 10
score_iter, config_data, source_vocabs, target_vocab, model_config = get_data_iters_and_vocabs(
args=args,
model_folder=args.model)
scoring_model = scoring.ScoringModel(config=model_config,
model_dir=args.model,
context=context,
provide_data=score_iter.provide_data,
provide_label=score_iter.provide_label,
default_bucket_key=score_iter.default_bucket_key,
score_type=args.score_type,
bucketing=False,
length_penalty=inference.LengthPenalty(alpha=args.length_penalty_alpha,
beta=args.length_penalty_beta),
softmax_temperature=args.softmax_temperature)
scorer = scoring.Scorer(scoring_model, source_vocabs, target_vocab)
scorer.score(score_iter=score_iter,
output_handler=get_output_handler(output_type=args.output_type,
output_fname=args.output))
if config_data.data_statistics.num_discarded != 0:
num_discarded = config_data.data_statistics.num_discarded
logger.warning('Warning: %d %s longer than %s %s skipped. '
'As a result, the output won\'t be parallel with the input. '
'Increase the maximum length (--max-seq-len M:N) or trim your training data.',
num_discarded,
utils.inflect('sentence', num_discarded),
args.max_seq_len,
utils.inflect('was', num_discarded))
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import unicode_literals
import warnings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.shortcuts import get_current_site
from django.db.utils import IntegrityError, OperationalError, ProgrammingError
from django.http import Http404, HttpRequest
from django.test import TestCase, mock, override_settings
from django.utils import six
from .models import (
ConcreteModel, FooWithBrokenAbsoluteUrl, FooWithoutUrl, FooWithUrl,
ProxyModel,
)
class ContentTypesTests(TestCase):
def setUp(self):
ContentType.objects.clear_cache()
def tearDown(self):
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model, ID
or natural key -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ProxyModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ConcreteModel,
for_concrete_model=False))
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel,
for_concrete_model=False))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel))
self.assertEqual(proxy_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel,
for_concrete_model=False))
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
})
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
})
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
})
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
})
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name="john")
with self.modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % get_current_site(request).domain,
response._headers.get("location")[1])
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
def test_shortcut_view_without_get_absolute_url(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns 404 when get_absolute_url is not defined.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name="john")
self.assertRaises(Http404, shortcut, request, user_ct.id, obj.id)
def test_shortcut_view_with_broken_get_absolute_url(self):
"""
Check that the shortcut view does not catch an AttributeError raised
by the model's get_absolute_url method.
Refs #8997.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name="john")
self.assertRaises(AttributeError, shortcut, request, user_ct.id, obj.id)
def test_missing_model(self):
"""
Ensures that displaying content types in admin (or anywhere) doesn't
break on leftover content type records in the DB for which no model
is defined anymore.
"""
ct = ContentType.objects.create(
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(six.text_type(ct), 'OldModel')
self.assertIsNone(ct.model_class())
# Make sure stale ContentTypes can be fetched like any other object.
# Before Django 1.6 this caused a NoneType error in the caching mechanism.
# Instead, just return the ContentType object and let the app detect stale states.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
def test_name_deprecation(self):
"""
ContentType.name has been removed. Test that a warning is emitted when
creating a ContentType with a `name`, but the creation should not fail.
"""
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
ContentType.objects.create(
name='Name',
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"ContentType.name field doesn't exist any longer. Please remove it from your code."
)
self.assertTrue(ContentType.objects.filter(model='OldModel').exists())
@mock.patch('django.contrib.contenttypes.models.ContentTypeManager.get_or_create')
@mock.patch('django.contrib.contenttypes.models.ContentTypeManager.get')
def test_message_if_get_for_model_fails(self, mocked_get, mocked_get_or_create):
"""
Check that `RuntimeError` with nice error message is raised if
`get_for_model` fails because of database errors.
"""
def _test_message(mocked_method):
for ExceptionClass in (IntegrityError, OperationalError, ProgrammingError):
mocked_method.side_effect = ExceptionClass
with self.assertRaisesMessage(
RuntimeError,
"Error creating new content types. Please make sure contenttypes "
"is migrated before trying to migrate apps individually."
):
ContentType.objects.get_for_model(ContentType)
_test_message(mocked_get)
mocked_get.side_effect = ContentType.DoesNotExist
_test_message(mocked_get_or_create)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Helper methods for CORS and CSRF checks. """
import contextlib
import logging
import urlparse
from django.conf import settings
log = logging.getLogger(__name__)
def is_cross_domain_request_allowed(request):
"""Check whether we should allow the cross-domain request.
We allow a cross-domain request only if:
1) The request is made securely and the referer has "https://" as the protocol.
2) The referer domain has been whitelisted.
Arguments:
request (HttpRequest)
Returns:
bool
"""
referer = request.META.get('HTTP_REFERER')
referer_parts = urlparse.urlparse(referer) if referer else None
referer_hostname = referer_parts.hostname if referer_parts is not None else None
# Use CORS_ALLOW_INSECURE *only* for development and testing environments;
# it should never be enabled in production.
if not getattr(settings, 'CORS_ALLOW_INSECURE', False):
if not request.is_secure():
log.debug(
u"Request is not secure, so we cannot send the CSRF token. "
u"For testing purposes, you can disable this check by setting "
u"`CORS_ALLOW_INSECURE` to True in the settings"
)
return False
if not referer:
log.debug(u"No referer provided over a secure connection, so we cannot check the protocol.")
return False
if not referer_parts.scheme == 'https':
log.debug(u"Referer '%s' must have the scheme 'https'")
return False
domain_is_whitelisted = (
getattr(settings, 'CORS_ORIGIN_ALLOW_ALL', False) or
referer_hostname in getattr(settings, 'CORS_ORIGIN_WHITELIST', [])
)
if not domain_is_whitelisted:
if referer_hostname is None:
# If no referer is specified, we can't check if it's a cross-domain
# request or not.
log.debug(u"Referrer hostname is `None`, so it is not on the whitelist.")
elif referer_hostname != request.get_host():
log.info(
(
u"Domain '%s' is not on the cross domain whitelist. "
u"Add the domain to `CORS_ORIGIN_WHITELIST` or set "
u"`CORS_ORIGIN_ALLOW_ALL` to True in the settings."
), referer_hostname
)
else:
log.debug(
(
u"Domain '%s' is the same as the hostname in the request, "
u"so we are not going to treat it as a cross-domain request."
), referer_hostname
)
return False
return True
@contextlib.contextmanager
def skip_cross_domain_referer_check(request):
"""Skip the cross-domain CSRF referer check.
Django's CSRF middleware performs the referer check
only when the request is made over a secure connection.
To skip the check, we patch `request.is_secure()` to
False.
"""
is_secure_default = request.is_secure
request.is_secure = lambda: False
try:
yield
finally:
request.is_secure = is_secure_default
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Constants for the homematic component."""
DOMAIN = "homematic"
DISCOVER_SWITCHES = "homematic.switch"
DISCOVER_LIGHTS = "homematic.light"
DISCOVER_SENSORS = "homematic.sensor"
DISCOVER_BINARY_SENSORS = "homematic.binary_sensor"
DISCOVER_COVER = "homematic.cover"
DISCOVER_CLIMATE = "homematic.climate"
DISCOVER_LOCKS = "homematic.locks"
DISCOVER_BATTERY = "homematic.battery"
ATTR_DISCOVER_DEVICES = "devices"
ATTR_PARAM = "param"
ATTR_CHANNEL = "channel"
ATTR_ADDRESS = "address"
ATTR_VALUE = "value"
ATTR_VALUE_TYPE = "value_type"
ATTR_INTERFACE = "interface"
ATTR_ERRORCODE = "error"
ATTR_MESSAGE = "message"
ATTR_TIME = "time"
ATTR_UNIQUE_ID = "unique_id"
ATTR_PARAMSET_KEY = "paramset_key"
ATTR_PARAMSET = "paramset"
ATTR_DISCOVERY_TYPE = "discovery_type"
ATTR_LOW_BAT = "LOW_BAT"
ATTR_LOWBAT = "LOWBAT"
EVENT_KEYPRESS = "homematic.keypress"
EVENT_IMPULSE = "homematic.impulse"
EVENT_ERROR = "homematic.error"
SERVICE_VIRTUALKEY = "virtualkey"
SERVICE_RECONNECT = "reconnect"
SERVICE_SET_VARIABLE_VALUE = "set_variable_value"
SERVICE_SET_DEVICE_VALUE = "set_device_value"
SERVICE_SET_INSTALL_MODE = "set_install_mode"
SERVICE_PUT_PARAMSET = "put_paramset"
HM_DEVICE_TYPES = {
DISCOVER_SWITCHES: [
"Switch",
"SwitchPowermeter",
"IOSwitch",
"IPSwitch",
"RFSiren",
"IPSwitchPowermeter",
"HMWIOSwitch",
"Rain",
"EcoLogic",
"IPKeySwitchPowermeter",
"IPGarage",
"IPKeySwitch",
"IPKeySwitchLevel",
"IPMultiIO",
],
DISCOVER_LIGHTS: [
"Dimmer",
"KeyDimmer",
"IPKeyDimmer",
"IPDimmer",
"ColorEffectLight",
"IPKeySwitchLevel",
],
DISCOVER_SENSORS: [
"SwitchPowermeter",
"Motion",
"MotionV2",
"RemoteMotion",
"MotionIP",
"ThermostatWall",
"AreaThermostat",
"RotaryHandleSensor",
"WaterSensor",
"PowermeterGas",
"LuxSensor",
"WeatherSensor",
"WeatherStation",
"ThermostatWall2",
"TemperatureDiffSensor",
"TemperatureSensor",
"CO2Sensor",
"IPSwitchPowermeter",
"HMWIOSwitch",
"FillingLevel",
"ValveDrive",
"EcoLogic",
"IPThermostatWall",
"IPSmoke",
"RFSiren",
"PresenceIP",
"IPAreaThermostat",
"IPWeatherSensor",
"RotaryHandleSensorIP",
"IPPassageSensor",
"IPKeySwitchPowermeter",
"IPThermostatWall230V",
"IPWeatherSensorPlus",
"IPWeatherSensorBasic",
"IPBrightnessSensor",
"IPGarage",
"UniversalSensor",
"MotionIPV2",
"IPMultiIO",
"IPThermostatWall2",
],
DISCOVER_CLIMATE: [
"Thermostat",
"ThermostatWall",
"MAXThermostat",
"ThermostatWall2",
"MAXWallThermostat",
"IPThermostat",
"IPThermostatWall",
"ThermostatGroup",
"IPThermostatWall230V",
"IPThermostatWall2",
],
DISCOVER_BINARY_SENSORS: [
"ShutterContact",
"Smoke",
"SmokeV2",
"Motion",
"MotionV2",
"MotionIP",
"RemoteMotion",
"WeatherSensor",
"TiltSensor",
"IPShutterContact",
"HMWIOSwitch",
"MaxShutterContact",
"Rain",
"WiredSensor",
"PresenceIP",
"IPWeatherSensor",
"IPPassageSensor",
"SmartwareMotion",
"IPWeatherSensorPlus",
"MotionIPV2",
"WaterIP",
"IPMultiIO",
"TiltIP",
"IPShutterContactSabotage",
"IPContact",
],
DISCOVER_COVER: ["Blind", "KeyBlind", "IPKeyBlind", "IPKeyBlindTilt"],
DISCOVER_LOCKS: ["KeyMatic"],
}
HM_IGNORE_DISCOVERY_NODE = ["ACTUAL_TEMPERATURE", "ACTUAL_HUMIDITY"]
HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS = {
"ACTUAL_TEMPERATURE": [
"IPAreaThermostat",
"IPWeatherSensor",
"IPWeatherSensorPlus",
"IPWeatherSensorBasic",
"IPThermostatWall",
"IPThermostatWall2",
]
}
HM_ATTRIBUTE_SUPPORT = {
"LOWBAT": ["battery", {0: "High", 1: "Low"}],
"LOW_BAT": ["battery", {0: "High", 1: "Low"}],
"ERROR": ["error", {0: "No"}],
"ERROR_SABOTAGE": ["sabotage", {0: "No", 1: "Yes"}],
"SABOTAGE": ["sabotage", {0: "No", 1: "Yes"}],
"RSSI_PEER": ["rssi_peer", {}],
"RSSI_DEVICE": ["rssi_device", {}],
"VALVE_STATE": ["valve", {}],
"LEVEL": ["level", {}],
"BATTERY_STATE": ["battery", {}],
"CONTROL_MODE": [
"mode",
{0: "Auto", 1: "Manual", 2: "Away", 3: "Boost", 4: "Comfort", 5: "Lowering"},
],
"POWER": ["power", {}],
"CURRENT": ["current", {}],
"VOLTAGE": ["voltage", {}],
"OPERATING_VOLTAGE": ["voltage", {}],
"WORKING": ["working", {0: "No", 1: "Yes"}],
"STATE_UNCERTAIN": ["state_uncertain", {}],
}
HM_PRESS_EVENTS = [
"PRESS_SHORT",
"PRESS_LONG",
"PRESS_CONT",
"PRESS_LONG_RELEASE",
"PRESS",
]
HM_IMPULSE_EVENTS = ["SEQUENCE_OK"]
CONF_RESOLVENAMES_OPTIONS = ["metadata", "json", "xml", False]
DATA_HOMEMATIC = "homematic"
DATA_STORE = "homematic_store"
DATA_CONF = "homematic_conf"
CONF_INTERFACES = "interfaces"
CONF_LOCAL_IP = "local_ip"
CONF_LOCAL_PORT = "local_port"
CONF_PORT = "port"
CONF_PATH = "path"
CONF_CALLBACK_IP = "callback_ip"
CONF_CALLBACK_PORT = "callback_port"
CONF_RESOLVENAMES = "resolvenames"
CONF_JSONPORT = "jsonport"
|
unknown
|
codeparrot/codeparrot-clean
| ||
// WARNING: The script `configurePrerelease.ts` uses a regexp to parse out these values.
// If changing the text in this section, be sure to test `configurePrerelease` too.
export const versionMajorMinor = "6.0";
// The following is baselined as a literal template type without intervention
/** The version of the TypeScript compiler release */
export const version: string = `${versionMajorMinor}.0-dev`;
/**
* Type of objects whose values are all of the same type.
* The `in` and `for-in` operators can *not* be safely used,
* since `Object.prototype` may be modified by outside code.
*/
export interface MapLike<T> {
[index: string]: T;
}
export interface SortedReadonlyArray<T> extends ReadonlyArray<T> {
" __sortedArrayBrand": any;
}
export interface SortedArray<T> extends Array<T> {
" __sortedArrayBrand": any;
}
/**
* Common read methods for ES6 Map/Set.
*
* @internal
*/
export interface ReadonlyCollection<K> {
readonly size: number;
has(key: K): boolean;
keys(): IterableIterator<K>;
}
/** @internal */
export type EqualityComparer<T> = (a: T, b: T) => boolean;
/** @internal */
export type Comparer<T> = (a: T, b: T) => Comparison;
/** @internal */
export const enum Comparison {
LessThan = -1,
EqualTo = 0,
GreaterThan = 1,
}
|
typescript
|
github
|
https://github.com/microsoft/TypeScript
|
src/compiler/corePublic.ts
|
import datetime
import decimal
from time import time
from django.utils.hashcompat import md5_constructor
from django.utils.log import getLogger
logger = getLogger('django.db.backends')
class CursorDebugWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db # Instance of a BaseDatabaseWrapper subclass
def execute(self, sql, params=()):
start = time()
try:
return self.cursor.execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration':duration, 'sql':sql, 'params':params}
)
def executemany(self, sql, param_list):
start = time()
try:
return self.cursor.executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
self.db.queries.append({
'sql': '%s times: %s' % (len(param_list), sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration':duration, 'sql':sql, 'params':param_list}
)
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return s and datetime.date(*map(int, s.split('-'))) or None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s: return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.'+microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s: return None
if not ' ' in s: return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds), int((microseconds + '000000')[:6]))
def typecast_boolean(s):
if s is None: return None
if not s: return False
return str(s)[0].lower() == 't'
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_boolean(obj, d):
return obj and '1' or '0'
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hash = md5_constructor(name).hexdigest()[:hash_len]
return '%s%s' % (name[:length-hash_len], hash)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return u'%s' % str(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return u"%.*f" % (decimal_places, value)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""MNE software for MEG and EEG data analysis."""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.devN' where N is an integer.
#
from ._version import __version__
# have to import verbose first since it's needed by many things
from .utils import (set_log_level, set_log_file, verbose, set_config,
get_config, get_config_path, set_cache_dir,
set_memmap_min_size, grand_average, sys_info, open_docs)
from .io.pick import (pick_types, pick_channels,
pick_channels_regexp, pick_channels_forward,
pick_types_forward, pick_channels_cov,
pick_channels_evoked, pick_info,
channel_type, channel_indices_by_type)
from .io.base import concatenate_raws
from .io.meas_info import create_info, Info
from .io.proj import Projection
from .io.kit import read_epochs_kit
from .io.eeglab import read_epochs_eeglab
from .io.reference import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from .io.what import what
from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
read_bem_surfaces, write_bem_surfaces, write_head_bem,
read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance,
compute_covariance, whiten_evoked, make_ad_hoc_cov)
from .event import (read_events, write_events, find_events, merge_events,
pick_events, make_fixed_length_events, concatenate_events,
find_stim_steps, AcqParserFIF)
from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, Forward,
write_forward_solution, make_forward_solution,
convert_forward_solution, make_field_map,
make_forward_dipole, use_coil_def)
from .source_estimate import (read_source_estimate,
SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, VolVectorSourceEstimate,
MixedSourceEstimate, MixedVectorSourceEstimate,
grade_to_tris,
spatial_src_adjacency,
spatial_tris_adjacency,
spatial_dist_adjacency,
spatial_inter_hemi_adjacency,
spatio_temporal_src_adjacency,
spatio_temporal_tris_adjacency,
spatio_temporal_dist_adjacency,
extract_label_time_course, stc_near_sensors)
from .surface import (read_surface, write_surface, decimate_surface, read_tri,
read_morph_map, get_head_surf, get_meg_helmet_surf,
dig_mri_distances)
from .morph import (SourceMorph, read_source_morph, grade_to_vertices,
compute_source_morph)
from .source_space import (read_source_spaces, vertex_to_mni,
head_to_mni, head_to_mri, read_talxfm,
write_source_spaces, setup_source_space,
setup_volume_source_space, SourceSpaces,
add_source_space_distances, morph_source_spaces,
get_volume_labels_from_aseg,
get_volume_labels_from_src, read_freesurfer_lut)
from .annotations import (Annotations, read_annotations, annotations_from_events,
events_from_annotations)
from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs,
concatenate_epochs, make_fixed_length_epochs)
from .evoked import Evoked, EvokedArray, read_evokeds, write_evokeds, combine_evoked
from .label import (read_label, label_sign_flip,
write_label, stc_to_label, grow_labels, Label, split_label,
BiHemiLabel, read_labels_from_annot, write_labels_to_annot,
random_parcellation, morph_labels, labels_to_stc)
from .misc import parse_config, read_reject_parameters
from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
scale_source_space)
from .transforms import (read_trans, write_trans,
transform_surface_to, Transform)
from .proj import (read_proj, write_proj, compute_proj_epochs,
compute_proj_evoked, compute_proj_raw, sensitivity_map)
from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole
from .channels import (equalize_channels, rename_channels, find_layout,
read_vectorview_selection)
from .report import Report, open_report
from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff
from .rank import compute_rank
from . import beamformer
from . import channels
from . import chpi
from . import commands
from . import connectivity
from . import coreg
from . import cuda
from . import datasets
from . import dipole
from . import epochs
from . import event
from . import externals
from . import io
from . import filter
from . import gui
from . import inverse_sparse
from . import minimum_norm
from . import preprocessing
from . import simulation
from . import stats
from . import surface
from . import time_frequency
from . import viz
from . import decoding
# deprecations
from .utils import deprecated_alias
deprecated_alias('read_selection', read_vectorview_selection)
# initialize logging
set_log_level(None, False)
set_log_file()
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.