text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Demo platform that has two fake switches.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import DEVICE_DEFAULT_NAME
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Set up the demo switches."""
add_devices_callback([
DemoSwitch('Decorative Lights', True, None, True),
DemoSwitch('AC', False, 'mdi:air-conditioner', False)
])
class DemoSwitch(SwitchDevice):
"""Representation of a demo switch."""
def __init__(self, name, state, icon, assumed):
"""Initialize the Demo switch."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self._icon = icon
self._assumed = assumed
@property
def should_poll(self):
"""No polling needed for a demo switch."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def assumed_state(self):
"""Return if the state is based on assumptions."""
return self._assumed
@property
def current_power_w(self):
"""Return the current power usage in W."""
if self._state:
return 100
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return 15
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._state = False
self.schedule_update_ha_state()
|
{
"content_hash": "03d09fda4c451ef7accaec51a4dd3fb1",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 76,
"avg_line_length": 27.135135135135137,
"alnum_prop": 0.6145418326693227,
"repo_name": "JshWright/home-assistant",
"id": "83b8ae796bb0589b9bf56bea8371e67e7fe28bbb",
"size": "2008",
"binary": false,
"copies": "17",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1808411"
},
{
"name": "Python",
"bytes": "6070409"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15525"
}
],
"symlink_target": ""
}
|
'''Enhances builder connections, provides object to access glade objects'''
from gi.repository import GObject, Gtk # pylint: disable=E0611
import inspect
import functools
import logging
logger = logging.getLogger('sms_quickly_lib')
from xml.etree.cElementTree import ElementTree
# this module is big so uses some conventional prefixes and postfixes
# *s list, except self.widgets is a dictionary
# *_dict dictionary
# *name string
# ele_* element in a ElementTree
# pylint: disable=R0904
# the many public methods is a feature of Gtk.Builder
class Builder(Gtk.Builder):
''' extra features
connects glade defined handler to default_handler if necessary
auto connects widget to handler with matching name or alias
auto connects several widgets to a handler via multiple aliases
allow handlers to lookup widget name
logs every connection made, and any on_* not made
'''
def __init__(self):
Gtk.Builder.__init__(self)
self.widgets = {}
self.glade_handler_dict = {}
self.connections = []
self._reverse_widget_dict = {}
# pylint: disable=R0201
# this is a method so that a subclass of Builder can redefine it
def default_handler(self,
handler_name, filename, *args, **kwargs):
'''helps the apprentice guru
glade defined handlers that do not exist come here instead.
An apprentice guru might wonder which signal does what he wants,
now he can define any likely candidates in glade and notice which
ones get triggered when he plays with the project.
this method does not appear in Gtk.Builder'''
logger.debug('''tried to call non-existent function:%s()
expected in %s
args:%s
kwargs:%s''', handler_name, filename, args, kwargs)
# pylint: enable=R0201
def get_name(self, widget):
''' allows a handler to get the name (id) of a widget
this method does not appear in Gtk.Builder'''
return self._reverse_widget_dict.get(widget)
def add_from_file(self, filename):
'''parses xml file and stores wanted details'''
Gtk.Builder.add_from_file(self, filename)
# extract data for the extra interfaces
tree = ElementTree()
tree.parse(filename)
ele_widgets = tree.getiterator("object")
for ele_widget in ele_widgets:
name = ele_widget.attrib['id']
widget = self.get_object(name)
# populate indexes - a dictionary of widgets
self.widgets[name] = widget
# populate a reversed dictionary
self._reverse_widget_dict[widget] = name
# populate connections list
ele_signals = ele_widget.findall("signal")
connections = [
(name,
ele_signal.attrib['name'],
ele_signal.attrib['handler']) for ele_signal in ele_signals]
if connections:
self.connections.extend(connections)
ele_signals = tree.getiterator("signal")
for ele_signal in ele_signals:
self.glade_handler_dict.update(
{ele_signal.attrib["handler"]: None})
def connect_signals(self, callback_obj):
'''connect the handlers defined in glade
reports successful and failed connections
and logs call to missing handlers'''
filename = inspect.getfile(callback_obj.__class__)
callback_handler_dict = dict_from_callback_obj(callback_obj)
connection_dict = {}
connection_dict.update(self.glade_handler_dict)
connection_dict.update(callback_handler_dict)
for item in connection_dict.items():
if item[1] is None:
# the handler is missing so reroute to default_handler
handler = functools.partial(
self.default_handler, item[0], filename)
connection_dict[item[0]] = handler
# replace the run time warning
logger.warn("expected handler '%s' in %s",
item[0], filename)
# connect glade define handlers
Gtk.Builder.connect_signals(self, connection_dict)
# let's tell the user how we applied the glade design
for connection in self.connections:
widget_name, signal_name, handler_name = connection
logger.debug("connect builder by design '%s', '%s', '%s'",
widget_name, signal_name, handler_name)
def get_ui(self, callback_obj=None, by_name=True):
'''Creates the ui object with widgets as attributes
connects signals by 2 methods
this method does not appear in Gtk.Builder'''
result = UiFactory(self.widgets)
# Hook up any signals the user defined in glade
if callback_obj is not None:
# connect glade define handlers
self.connect_signals(callback_obj)
if by_name:
auto_connect_by_name(callback_obj, self)
return result
# pylint: disable=R0903
# this class deliberately does not provide any public interfaces
# apart from the glade widgets
class UiFactory():
''' provides an object with attributes as glade widgets'''
def __init__(self, widget_dict):
self._widget_dict = widget_dict
for (widget_name, widget) in widget_dict.items():
setattr(self, widget_name, widget)
# Mangle any non-usable names (like with spaces or dashes)
# into pythonic ones
cannot_message = """cannot bind ui.%s, name already exists
consider using a pythonic name instead of design name '%s'"""
consider_message = """consider using a pythonic name instead of design name '%s'"""
for (widget_name, widget) in widget_dict.items():
pyname = make_pyname(widget_name)
if pyname != widget_name:
if hasattr(self, pyname):
logger.debug(cannot_message, pyname, widget_name)
else:
logger.debug(consider_message, widget_name)
setattr(self, pyname, widget)
def iterator():
'''Support 'for o in self' '''
return iter(widget_dict.values())
setattr(self, '__iter__', iterator)
def __getitem__(self, name):
'access as dictionary where name might be non-pythonic'
return self._widget_dict[name]
# pylint: enable=R0903
def make_pyname(name):
''' mangles non-pythonic names into pythonic ones'''
pyname = ''
for character in name:
if (character.isalpha() or character == '_' or
(pyname and character.isdigit())):
pyname += character
else:
pyname += '_'
return pyname
# Until bug https://bugzilla.gnome.org/show_bug.cgi?id=652127 is fixed, we
# need to reimplement inspect.getmembers. GObject introspection doesn't
# play nice with it.
def getmembers(obj, check):
members = []
for k in dir(obj):
try:
attr = getattr(obj, k)
except:
continue
if check(attr):
members.append((k, attr))
members.sort()
return members
def dict_from_callback_obj(callback_obj):
'''a dictionary interface to callback_obj'''
methods = getmembers(callback_obj, inspect.ismethod)
aliased_methods = [x[1] for x in methods if hasattr(x[1], 'aliases')]
# a method may have several aliases
#~ @alias('on_btn_foo_clicked')
#~ @alias('on_tool_foo_activate')
#~ on_menu_foo_activate():
#~ pass
alias_groups = [(x.aliases, x) for x in aliased_methods]
aliases = []
for item in alias_groups:
for alias in item[0]:
aliases.append((alias, item[1]))
dict_methods = dict(methods)
dict_aliases = dict(aliases)
results = {}
results.update(dict_methods)
results.update(dict_aliases)
return results
def auto_connect_by_name(callback_obj, builder):
'''finds handlers like on_<widget_name>_<signal> and connects them
i.e. find widget,signal pair in builder and call
widget.connect(signal, on_<widget_name>_<signal>)'''
callback_handler_dict = dict_from_callback_obj(callback_obj)
for item in builder.widgets.items():
(widget_name, widget) = item
signal_ids = []
try:
widget_type = type(widget)
while widget_type:
signal_ids.extend(GObject.signal_list_ids(widget_type))
widget_type = GObject.type_parent(widget_type)
except RuntimeError: # pylint wants a specific error
pass
signal_names = [GObject.signal_name(sid) for sid in signal_ids]
# Now, automatically find any the user didn't specify in glade
for sig in signal_names:
# using convention suggested by glade
sig = sig.replace("-", "_")
handler_names = ["on_%s_%s" % (widget_name, sig)]
# Using the convention that the top level window is not
# specified in the handler name. That is use
# on_destroy() instead of on_windowname_destroy()
if widget is callback_obj:
handler_names.append("on_%s" % sig)
do_connect(item, sig, handler_names,
callback_handler_dict, builder.connections)
log_unconnected_functions(callback_handler_dict, builder.connections)
def do_connect(item, signal_name, handler_names,
callback_handler_dict, connections):
'''connect this signal to an unused handler'''
widget_name, widget = item
for handler_name in handler_names:
target = handler_name in callback_handler_dict.keys()
connection = (widget_name, signal_name, handler_name)
duplicate = connection in connections
if target and not duplicate:
widget.connect(signal_name, callback_handler_dict[handler_name])
connections.append(connection)
logger.debug("connect builder by name '%s','%s', '%s'",
widget_name, signal_name, handler_name)
def log_unconnected_functions(callback_handler_dict, connections):
'''log functions like on_* that we could not connect'''
connected_functions = [x[2] for x in connections]
handler_names = callback_handler_dict.keys()
unconnected = [x for x in handler_names if x.startswith('on_')]
for handler_name in connected_functions:
try:
unconnected.remove(handler_name)
except ValueError:
pass
for handler_name in unconnected:
logger.debug("Not connected to builder '%s'", handler_name)
|
{
"content_hash": "61c7ec2c2af6712a3d7101780a748cc3",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 91,
"avg_line_length": 34.60912052117264,
"alnum_prop": 0.624,
"repo_name": "Deluxo/sms",
"id": "bd064801a7946f7cc3b01aa5cc2c5dd47cc3bcd4",
"size": "10802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sms_quickly_lib/Builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35346"
},
{
"name": "Shell",
"bytes": "369"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class MailgunConfig(AppConfig):
name = 'mailgun'
|
{
"content_hash": "6097e543bd60a7e73081561bbdf5d574",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.8,
"alnum_prop": 0.7528089887640449,
"repo_name": "KirovVerst/qproject",
"id": "0fa7417a8345dcc5184c666a32319fc72eb585d5",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailgun/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "4022"
},
{
"name": "HTML",
"bytes": "275"
},
{
"name": "Python",
"bytes": "17476"
}
],
"symlink_target": ""
}
|
from typing import Callable, Dict
import requests
from . import ClientTransport
class HttpPostClientTransport(ClientTransport):
"""HTTP POST based client transport.
Requires :py:mod:`requests`. Submits messages to a server using the body of
an ``HTTP`` ``POST`` request. Replies are taken from the responses body.
:param str endpoint: The URL to send ``POST`` data to.
:param callable post_method: allows to replace `requests.post` with another method,
e.g. the post method of a `requests.Session()` instance.
:param dict kwargs: Additional parameters for :py:func:`requests.post`.
"""
def __init__(
self, endpoint: str, post_method: Callable = None, **kwargs: Dict
):
self.endpoint = endpoint
self.request_kwargs = kwargs
if post_method is None:
self.post = requests.post
else:
self.post = post_method
def send_message(self, message: bytes, expect_reply: bool = True):
if not isinstance(message, bytes):
raise TypeError('message must by of type bytes')
r = self.post(self.endpoint, data=message, **self.request_kwargs)
if expect_reply:
# Note that this is not strictly conforming to the (JSON RPC) standard since
# even notifications may, under certain conditions, return an
# error message which is completely ignored by this implementation.
return r.content
|
{
"content_hash": "2135c9e6ebc7f4cb8bb57a3f921d59b0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 88,
"avg_line_length": 37.69230769230769,
"alnum_prop": 0.6557823129251701,
"repo_name": "mbr/tinyrpc",
"id": "80b769e2e528d08d2b6f283bed30b4e939bc466d",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinyrpc/transports/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "166265"
}
],
"symlink_target": ""
}
|
import d1_common.types.dataoneTypes_v1_2
import d1_common.types.dataoneTypes_v2_0
import d1_test.d1_test_case
import d1_test.mock_api.util
class TestMockUtil(d1_test.d1_test_case.D1TestCase):
def test_1000(self):
"""parse_rest_url() 1."""
version_tag, endpoint_str, param_list, query_dict, client = d1_test.mock_api.util.parse_rest_url(
"/v1/log"
)
assert version_tag == "v1"
assert endpoint_str == "log"
assert param_list == []
assert query_dict == {}
assert (
client.pyxb_binding.Namespace == d1_common.types.dataoneTypes_v1_2.Namespace
)
def test_1010(self):
"""parse_rest_url() 2."""
# GET /object[?fromDate={fromDate}&toDate={toDate}&
# identifier={identifier}&formatId={formatId}&replicaStatus={replicaStatus}
# &start={start}&count={count}]
version_tag, endpoint_str, param_list, query_dict, client = d1_test.mock_api.util.parse_rest_url(
"http://dataone.server.edu/dataone/mn/v2/object/"
"ar%2f%2fg1/arg2%2f?fromDate=date1&toDate=date2&start=500&count=50"
)
assert version_tag == "v2"
assert endpoint_str == "object"
assert param_list == ["ar//g1", "arg2/"]
assert query_dict == {
"count": ["50"],
"toDate": ["date2"],
"fromDate": ["date1"],
"start": ["500"],
}
assert (
client.pyxb_binding.Namespace == d1_common.types.dataoneTypes_v2_0.Namespace
)
|
{
"content_hash": "c3f11369610b6917aec062c790708046",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 105,
"avg_line_length": 37,
"alnum_prop": 0.5778635778635779,
"repo_name": "DataONEorg/d1_python",
"id": "e697185f5311c5931ed1a3947a3ca40e3352f0a1",
"size": "2343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_utilities/src/d1_test/mock_api/tests/test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4798"
},
{
"name": "HTML",
"bytes": "13358"
},
{
"name": "Inno Setup",
"bytes": "3430"
},
{
"name": "JavaScript",
"bytes": "2068"
},
{
"name": "Python",
"bytes": "3547939"
},
{
"name": "Shell",
"bytes": "5670"
},
{
"name": "XSLT",
"bytes": "89205"
}
],
"symlink_target": ""
}
|
import tests.perf.test_ozone_ar_speed_many as gen
gen.run_test(750)
|
{
"content_hash": "e9efd90b4e22019c3694dbd1cf7c7eec",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 49,
"avg_line_length": 17.5,
"alnum_prop": 0.7571428571428571,
"repo_name": "antoinecarme/pyaf",
"id": "a5c1e96cb9f77bdf0412794034d75bcd790024dc",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/perf/test_ozone_ar_speed_order_750.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import mock
from oslo.config import cfg
from neutron.common import constants as n_consts
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as db
from neutron.plugins.vmware.api_client.exception import NsxApiException
from neutron.plugins.vmware.common import exceptions as p_exc
from neutron.plugins.vmware.dbexts import lsn_db
from neutron.plugins.vmware.dhcp_meta import constants
from neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man
from neutron.plugins.vmware.dhcp_meta import migration as mig_man
from neutron.plugins.vmware.dhcp_meta import nsx
from neutron.plugins.vmware.dhcp_meta import rpc
from neutron.tests import base
class DhcpMetadataBuilderTestCase(base.BaseTestCase):
def setUp(self):
super(DhcpMetadataBuilderTestCase, self).setUp()
self.builder = mig_man.DhcpMetadataBuilder(mock.Mock(), mock.Mock())
self.network_id = 'foo_network_id'
self.subnet_id = 'foo_subnet_id'
self.router_id = 'foo_router_id'
def test_dhcp_agent_get_all(self):
expected = []
self.builder.plugin.list_dhcp_agents_hosting_network.return_value = (
{'agents': expected})
agents = self.builder.dhcp_agent_get_all(mock.ANY, self.network_id)
self.assertEqual(expected, agents)
def test_dhcp_port_get_all(self):
expected = []
self.builder.plugin.get_ports.return_value = expected
ports = self.builder.dhcp_port_get_all(mock.ANY, self.network_id)
self.assertEqual(expected, ports)
def test_router_id_get(self):
port = {
'device_id': self.router_id,
'network_id': self.network_id,
'fixed_ips': [{'subnet_id': self.subnet_id}]
}
subnet = {
'id': self.subnet_id,
'network_id': self.network_id
}
self.builder.plugin.get_ports.return_value = [port]
result = self.builder.router_id_get(context, subnet)
self.assertEqual(self.router_id, result)
def test_router_id_get_none_subnet(self):
self.assertIsNone(self.builder.router_id_get(mock.ANY, None))
def test_metadata_deallocate(self):
self.builder.metadata_deallocate(
mock.ANY, self.router_id, self.subnet_id)
self.assertTrue(self.builder.plugin.remove_router_interface.call_count)
def test_metadata_allocate(self):
self.builder.metadata_allocate(
mock.ANY, self.router_id, self.subnet_id)
self.assertTrue(self.builder.plugin.add_router_interface.call_count)
def test_dhcp_deallocate(self):
agents = [{'id': 'foo_agent_id'}]
ports = [{'id': 'foo_port_id'}]
self.builder.dhcp_deallocate(mock.ANY, self.network_id, agents, ports)
self.assertTrue(
self.builder.plugin.remove_network_from_dhcp_agent.call_count)
self.assertTrue(self.builder.plugin.delete_port.call_count)
def _test_dhcp_allocate(self, subnet, expected_notify_count):
with mock.patch.object(mig_man.nsx, 'handle_network_dhcp_access') as f:
self.builder.dhcp_allocate(mock.ANY, self.network_id, subnet)
self.assertTrue(f.call_count)
self.assertEqual(expected_notify_count,
self.builder.notifier.notify.call_count)
def test_dhcp_allocate(self):
subnet = {'network_id': self.network_id, 'id': self.subnet_id}
self._test_dhcp_allocate(subnet, 2)
def test_dhcp_allocate_none_subnet(self):
self._test_dhcp_allocate(None, 0)
class MigrationManagerTestCase(base.BaseTestCase):
def setUp(self):
super(MigrationManagerTestCase, self).setUp()
self.manager = mig_man.MigrationManager(mock.Mock(),
mock.Mock(),
mock.Mock())
self.network_id = 'foo_network_id'
self.router_id = 'foo_router_id'
self.subnet_id = 'foo_subnet_id'
self.mock_builder_p = mock.patch.object(self.manager, 'builder')
self.mock_builder = self.mock_builder_p.start()
self.addCleanup(self.mock_builder_p.stop)
def _test_validate(self, lsn_exists=False, ext_net=False, subnets=None):
network = {'router:external': ext_net}
self.manager.manager.lsn_exists.return_value = lsn_exists
self.manager.plugin.get_network.return_value = network
self.manager.plugin.get_subnets.return_value = subnets
result = self.manager.validate(mock.ANY, self.network_id)
if len(subnets):
self.assertEqual(subnets[0], result)
else:
self.assertIsNone(result)
def test_validate_no_subnets(self):
self._test_validate(subnets=[])
def test_validate_with_one_subnet(self):
self._test_validate(subnets=[{'cidr': '0.0.0.0/0'}])
def test_validate_raise_conflict_many_subnets(self):
self.assertRaises(p_exc.LsnMigrationConflict,
self._test_validate,
subnets=[{'id': 'sub1'}, {'id': 'sub2'}])
def test_validate_raise_conflict_lsn_exists(self):
self.assertRaises(p_exc.LsnMigrationConflict,
self._test_validate,
lsn_exists=True)
def test_validate_raise_badrequest_external_net(self):
self.assertRaises(n_exc.BadRequest,
self._test_validate,
ext_net=True)
def test_validate_raise_badrequest_metadata_net(self):
self.assertRaises(n_exc.BadRequest,
self._test_validate,
ext_net=False,
subnets=[{'cidr': rpc.METADATA_SUBNET_CIDR}])
def _test_migrate(self, router, subnet, expected_calls):
self.mock_builder.router_id_get.return_value = router
self.manager.migrate(mock.ANY, self.network_id, subnet)
# testing the exact the order of calls is important
self.assertEqual(expected_calls, self.mock_builder.mock_calls)
def test_migrate(self):
subnet = {
'id': self.subnet_id,
'network_id': self.network_id
}
call_sequence = [
mock.call.router_id_get(mock.ANY, subnet),
mock.call.metadata_deallocate(
mock.ANY, self.router_id, self.subnet_id),
mock.call.dhcp_agent_get_all(mock.ANY, self.network_id),
mock.call.dhcp_port_get_all(mock.ANY, self.network_id),
mock.call.dhcp_deallocate(
mock.ANY, self.network_id, mock.ANY, mock.ANY),
mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet),
mock.call.metadata_allocate(
mock.ANY, self.router_id, self.subnet_id)
]
self._test_migrate(self.router_id, subnet, call_sequence)
def test_migrate_no_router_uplink(self):
subnet = {
'id': self.subnet_id,
'network_id': self.network_id
}
call_sequence = [
mock.call.router_id_get(mock.ANY, subnet),
mock.call.dhcp_agent_get_all(mock.ANY, self.network_id),
mock.call.dhcp_port_get_all(mock.ANY, self.network_id),
mock.call.dhcp_deallocate(
mock.ANY, self.network_id, mock.ANY, mock.ANY),
mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet),
]
self._test_migrate(None, subnet, call_sequence)
def test_migrate_no_subnet(self):
call_sequence = [
mock.call.router_id_get(mock.ANY, None),
mock.call.dhcp_allocate(mock.ANY, self.network_id, None),
]
self._test_migrate(None, None, call_sequence)
def _test_report(self, lsn_attrs, expected):
self.manager.manager.lsn_port_get.return_value = lsn_attrs
report = self.manager.report(mock.ANY, self.network_id, self.subnet_id)
self.assertEqual(expected, report)
def test_report_for_lsn(self):
self._test_report(('foo_lsn_id', 'foo_lsn_port_id'),
{'ports': ['foo_lsn_port_id'],
'services': ['foo_lsn_id'], 'type': 'lsn'})
def test_report_for_lsn_without_lsn_port(self):
self._test_report(('foo_lsn_id', None),
{'ports': [],
'services': ['foo_lsn_id'], 'type': 'lsn'})
def _test_report_for_lsn_without_subnet(self, validated_subnet):
with mock.patch.object(self.manager, 'validate',
return_value=validated_subnet):
self.manager.manager.lsn_port_get.return_value = (
('foo_lsn_id', 'foo_lsn_port_id'))
report = self.manager.report(context, self.network_id)
expected = {
'ports': ['foo_lsn_port_id'] if validated_subnet else [],
'services': ['foo_lsn_id'], 'type': 'lsn'
}
self.assertEqual(expected, report)
def test_report_for_lsn_without_subnet_subnet_found(self):
self._test_report_for_lsn_without_subnet({'id': self.subnet_id})
def test_report_for_lsn_without_subnet_subnet_not_found(self):
self.manager.manager.lsn_get.return_value = 'foo_lsn_id'
self._test_report_for_lsn_without_subnet(None)
def test_report_for_dhcp_agent(self):
self.manager.manager.lsn_port_get.return_value = (None, None)
self.mock_builder.dhcp_agent_get_all.return_value = (
[{'id': 'foo_agent_id'}])
self.mock_builder.dhcp_port_get_all.return_value = (
[{'id': 'foo_dhcp_port_id'}])
result = self.manager.report(mock.ANY, self.network_id, self.subnet_id)
expected = {
'ports': ['foo_dhcp_port_id'],
'services': ['foo_agent_id'],
'type': 'agent'
}
self.assertEqual(expected, result)
class LsnManagerTestCase(base.BaseTestCase):
def setUp(self):
super(LsnManagerTestCase, self).setUp()
self.net_id = 'foo_network_id'
self.sub_id = 'foo_subnet_id'
self.port_id = 'foo_port_id'
self.lsn_id = 'foo_lsn_id'
self.mac = 'aa:bb:cc:dd:ee:ff'
self.lsn_port_id = 'foo_lsn_port_id'
self.tenant_id = 'foo_tenant_id'
self.manager = lsn_man.LsnManager(mock.Mock())
self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api')
self.mock_lsn_api = self.mock_lsn_api_p.start()
nsx.register_dhcp_opts(cfg)
nsx.register_metadata_opts(cfg)
self.addCleanup(self.mock_lsn_api_p.stop)
def test_lsn_get(self):
self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
expected = self.manager.lsn_get(mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
mock.ANY, self.net_id)
self.assertEqual(expected, self.lsn_id)
def _test_lsn_get_raise_not_found_with_exc(self, exc):
self.mock_lsn_api.lsn_for_network_get.side_effect = exc
self.assertRaises(p_exc.LsnNotFound,
self.manager.lsn_get,
mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
mock.ANY, self.net_id)
def test_lsn_get_raise_not_found_with_not_found(self):
self._test_lsn_get_raise_not_found_with_exc(n_exc.NotFound)
def test_lsn_get_raise_not_found_with_api_error(self):
self._test_lsn_get_raise_not_found_with_exc(NsxApiException)
def _test_lsn_get_silent_raise_with_exc(self, exc):
self.mock_lsn_api.lsn_for_network_get.side_effect = exc
expected = self.manager.lsn_get(
mock.ANY, self.net_id, raise_on_err=False)
self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
mock.ANY, self.net_id)
self.assertIsNone(expected)
def test_lsn_get_silent_raise_with_not_found(self):
self._test_lsn_get_silent_raise_with_exc(n_exc.NotFound)
def test_lsn_get_silent_raise_with_api_error(self):
self._test_lsn_get_silent_raise_with_exc(NsxApiException)
def test_lsn_create(self):
self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
self.manager.lsn_create(mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_create.assert_called_once_with(
mock.ANY, self.net_id)
def test_lsn_create_raise_api_error(self):
self.mock_lsn_api.lsn_for_network_create.side_effect = NsxApiException
self.assertRaises(p_exc.NsxPluginException,
self.manager.lsn_create,
mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_create.assert_called_once_with(
mock.ANY, self.net_id)
def test_lsn_delete(self):
self.manager.lsn_delete(mock.ANY, self.lsn_id)
self.mock_lsn_api.lsn_delete.assert_called_once_with(
mock.ANY, self.lsn_id)
def _test_lsn_delete_with_exc(self, exc):
self.mock_lsn_api.lsn_delete.side_effect = exc
self.manager.lsn_delete(mock.ANY, self.lsn_id)
self.mock_lsn_api.lsn_delete.assert_called_once_with(
mock.ANY, self.lsn_id)
def test_lsn_delete_with_not_found(self):
self._test_lsn_delete_with_exc(n_exc.NotFound)
def test_lsn_delete_api_exception(self):
self._test_lsn_delete_with_exc(NsxApiException)
def test_lsn_delete_by_network(self):
self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
with mock.patch.object(self.manager, 'lsn_delete') as f:
self.manager.lsn_delete_by_network(mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
mock.ANY, self.net_id)
f.assert_called_once_with(mock.ANY, self.lsn_id)
def _test_lsn_delete_by_network_with_exc(self, exc):
self.mock_lsn_api.lsn_for_network_get.side_effect = exc
with mock.patch.object(lsn_man.LOG, 'warn') as l:
self.manager.lsn_delete_by_network(mock.ANY, self.net_id)
self.assertEqual(1, l.call_count)
def test_lsn_delete_by_network_with_not_found(self):
self._test_lsn_delete_by_network_with_exc(n_exc.NotFound)
def test_lsn_delete_by_network_with_not_api_error(self):
self._test_lsn_delete_by_network_with_exc(NsxApiException)
def test_lsn_port_get(self):
self.mock_lsn_api.lsn_port_by_subnet_get.return_value = (
self.lsn_port_id)
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
expected = self.manager.lsn_port_get(
mock.ANY, self.net_id, self.sub_id)
self.assertEqual(expected, (self.lsn_id, self.lsn_port_id))
def test_lsn_port_get_lsn_not_found_on_raise(self):
with mock.patch.object(
self.manager, 'lsn_get',
side_effect=p_exc.LsnNotFound(entity='network',
entity_id=self.net_id)):
self.assertRaises(p_exc.LsnNotFound,
self.manager.lsn_port_get,
mock.ANY, self.net_id, self.sub_id)
def test_lsn_port_get_lsn_not_found_silent_raise(self):
with mock.patch.object(self.manager, 'lsn_get', return_value=None):
expected = self.manager.lsn_port_get(
mock.ANY, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual(expected, (None, None))
def test_lsn_port_get_port_not_found_on_raise(self):
self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
self.assertRaises(p_exc.LsnPortNotFound,
self.manager.lsn_port_get,
mock.ANY, self.net_id, self.sub_id)
def test_lsn_port_get_port_not_found_silent_raise(self):
self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
expected = self.manager.lsn_port_get(
mock.ANY, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual(expected, (self.lsn_id, None))
def test_lsn_port_create(self):
self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
expected = self.manager.lsn_port_create(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(expected, self.lsn_port_id)
def _test_lsn_port_create_with_exc(self, exc, expected):
self.mock_lsn_api.lsn_port_create.side_effect = exc
self.assertRaises(expected,
self.manager.lsn_port_create,
mock.ANY, mock.ANY, mock.ANY)
def test_lsn_port_create_with_not_found(self):
self._test_lsn_port_create_with_exc(n_exc.NotFound, p_exc.LsnNotFound)
def test_lsn_port_create_api_exception(self):
self._test_lsn_port_create_with_exc(NsxApiException,
p_exc.NsxPluginException)
def test_lsn_port_delete(self):
self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count)
def _test_lsn_port_delete_with_exc(self, exc):
self.mock_lsn_api.lsn_port_delete.side_effect = exc
with mock.patch.object(lsn_man.LOG, 'warn') as l:
self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count)
self.assertEqual(1, l.call_count)
def test_lsn_port_delete_with_not_found(self):
self._test_lsn_port_delete_with_exc(n_exc.NotFound)
def test_lsn_port_delete_api_exception(self):
self._test_lsn_port_delete_with_exc(NsxApiException)
def _test_lsn_port_dhcp_setup(self, ret_val, sub):
self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag'):
expected = self.manager.lsn_port_dhcp_setup(
mock.ANY, mock.ANY, mock.ANY, mock.ANY, subnet_config=sub)
self.assertEqual(
1, self.mock_lsn_api.lsn_port_create.call_count)
self.assertEqual(
1, self.mock_lsn_api.lsn_port_plug_network.call_count)
self.assertEqual(expected, ret_val)
def test_lsn_port_dhcp_setup(self):
self._test_lsn_port_dhcp_setup((self.lsn_id, self.lsn_port_id), None)
def test_lsn_port_dhcp_setup_with_config(self):
with mock.patch.object(self.manager, 'lsn_port_dhcp_configure') as f:
self._test_lsn_port_dhcp_setup(None, mock.ANY)
self.assertEqual(1, f.call_count)
def test_lsn_port_dhcp_setup_with_not_found(self):
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag') as f:
f.side_effect = n_exc.NotFound
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_dhcp_setup,
mock.ANY, mock.ANY, mock.ANY, mock.ANY)
def test_lsn_port_dhcp_setup_with_conflict(self):
self.mock_lsn_api.lsn_port_plug_network.side_effect = (
p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id))
with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'):
with mock.patch.object(self.manager, 'lsn_port_delete') as g:
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_dhcp_setup,
mock.ANY, mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, g.call_count)
def _test_lsn_port_dhcp_configure_with_subnet(
self, expected, dns=None, gw=None, routes=None):
subnet = {
'enable_dhcp': True,
'dns_nameservers': dns or [],
'gateway_ip': gw,
'host_routes': routes
}
self.manager.lsn_port_dhcp_configure(mock.ANY, self.lsn_id,
self.lsn_port_id, subnet)
self.mock_lsn_api.lsn_port_dhcp_configure.assert_called_once_with(
mock.ANY, self.lsn_id, self.lsn_port_id, subnet['enable_dhcp'],
expected)
def test_lsn_port_dhcp_configure(self):
expected = {
'routers': '127.0.0.1',
'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
'domain_name': cfg.CONF.NSX_DHCP.domain_name
}
self._test_lsn_port_dhcp_configure_with_subnet(
expected, dns=[], gw='127.0.0.1', routes=[])
def test_lsn_port_dhcp_configure_gatewayless(self):
expected = {
'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
'domain_name': cfg.CONF.NSX_DHCP.domain_name
}
self._test_lsn_port_dhcp_configure_with_subnet(expected, gw=None)
def test_lsn_port_dhcp_configure_with_extra_dns_servers(self):
expected = {
'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
'domain_name_servers': '8.8.8.8,9.9.9.9',
'domain_name': cfg.CONF.NSX_DHCP.domain_name
}
self._test_lsn_port_dhcp_configure_with_subnet(
expected, dns=['8.8.8.8', '9.9.9.9'])
def test_lsn_port_dhcp_configure_with_host_routes(self):
expected = {
'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
'domain_name': cfg.CONF.NSX_DHCP.domain_name,
'classless_static_routes': '8.8.8.8,9.9.9.9'
}
self._test_lsn_port_dhcp_configure_with_subnet(
expected, routes=['8.8.8.8', '9.9.9.9'])
def _test_lsn_metadata_configure(self, is_enabled):
with mock.patch.object(self.manager, 'lsn_port_dispose') as f:
self.manager.plugin.get_subnet.return_value = (
{'network_id': self.net_id})
self.manager.lsn_metadata_configure(mock.ANY,
self.sub_id, is_enabled)
expected = {
'metadata_server_port': 8775,
'metadata_server_ip': '127.0.0.1',
'metadata_proxy_shared_secret': ''
}
self.mock_lsn_api.lsn_metadata_configure.assert_called_once_with(
mock.ANY, mock.ANY, is_enabled, expected)
if is_enabled:
self.assertEqual(
1, self.mock_lsn_api.lsn_port_by_subnet_get.call_count)
else:
self.assertEqual(1, f.call_count)
def test_lsn_metadata_configure_enabled(self):
self._test_lsn_metadata_configure(True)
def test_lsn_metadata_configure_disabled(self):
self._test_lsn_metadata_configure(False)
def test_lsn_metadata_configure_not_found(self):
self.mock_lsn_api.lsn_metadata_configure.side_effect = (
p_exc.LsnNotFound(entity='lsn', entity_id=self.lsn_id))
self.manager.plugin.get_subnet.return_value = (
{'network_id': self.net_id})
self.assertRaises(p_exc.NsxPluginException,
self.manager.lsn_metadata_configure,
mock.ANY, self.sub_id, True)
def test_lsn_port_metadata_setup(self):
subnet = {
'cidr': '0.0.0.0/0',
'id': self.sub_id,
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
f.return_value = {'uuid': self.port_id}
self.manager.lsn_port_metadata_setup(mock.ANY, self.lsn_id, subnet)
self.assertEqual(1, self.mock_lsn_api.lsn_port_create.call_count)
self.mock_lsn_api.lsn_port_plug_network.assert_called_once_with(
mock.ANY, self.lsn_id, mock.ANY, self.port_id)
def test_lsn_port_metadata_setup_raise_not_found(self):
subnet = {
'cidr': '0.0.0.0/0',
'id': self.sub_id,
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
f.side_effect = n_exc.NotFound
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_metadata_setup,
mock.ANY, self.lsn_id, subnet)
def test_lsn_port_metadata_setup_raise_conflict(self):
subnet = {
'cidr': '0.0.0.0/0',
'id': self.sub_id,
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
with mock.patch.object(lsn_man.switch_api, 'delete_port') as g:
f.return_value = {'uuid': self.port_id}
self.mock_lsn_api.lsn_port_plug_network.side_effect = (
p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id))
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_metadata_setup,
mock.ANY, self.lsn_id, subnet)
self.assertEqual(1,
self.mock_lsn_api.lsn_port_delete.call_count)
self.assertEqual(1, g.call_count)
def _test_lsn_port_dispose_with_values(self, lsn_id, lsn_port_id, count):
with mock.patch.object(self.manager,
'lsn_port_get_by_mac',
return_value=(lsn_id, lsn_port_id)):
self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac)
self.assertEqual(count,
self.mock_lsn_api.lsn_port_delete.call_count)
def test_lsn_port_dispose(self):
self._test_lsn_port_dispose_with_values(
self.lsn_id, self.lsn_port_id, 1)
def test_lsn_port_dispose_meta_mac(self):
self.mac = constants.METADATA_MAC
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag') as f:
with mock.patch.object(lsn_man.switch_api, 'delete_port') as g:
f.return_value = {'uuid': self.port_id}
self._test_lsn_port_dispose_with_values(
self.lsn_id, self.lsn_port_id, 1)
f.assert_called_once_with(
mock.ANY, self.net_id, constants.METADATA_PORT_ID)
g.assert_called_once_with(mock.ANY, self.net_id, self.port_id)
def test_lsn_port_dispose_lsn_not_found(self):
self._test_lsn_port_dispose_with_values(None, None, 0)
def test_lsn_port_dispose_lsn_port_not_found(self):
self._test_lsn_port_dispose_with_values(self.lsn_id, None, 0)
def test_lsn_port_dispose_api_error(self):
self.mock_lsn_api.lsn_port_delete.side_effect = NsxApiException
with mock.patch.object(lsn_man.LOG, 'warn') as l:
self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac)
self.assertEqual(1, l.call_count)
def test_lsn_port_host_conf(self):
with mock.patch.object(self.manager,
'lsn_port_get',
return_value=(self.lsn_id, self.lsn_port_id)):
f = mock.Mock()
self.manager._lsn_port_host_conf(mock.ANY, self.net_id,
self.sub_id, mock.ANY, f)
self.assertEqual(1, f.call_count)
def test_lsn_port_host_conf_lsn_port_not_found(self):
with mock.patch.object(
self.manager,
'lsn_port_get',
side_effect=p_exc.LsnPortNotFound(lsn_id=self.lsn_id,
entity='subnet',
entity_id=self.sub_id)):
self.assertRaises(p_exc.PortConfigurationError,
self.manager._lsn_port_host_conf, mock.ANY,
self.net_id, self.sub_id, mock.ANY, mock.Mock())
def _test_lsn_port_update(self, dhcp=None, meta=None):
self.manager.lsn_port_update(
mock.ANY, self.net_id, self.sub_id, dhcp, meta)
count = 1 if dhcp else 0
count = count + 1 if meta else count
self.assertEqual(count, (self.mock_lsn_api.
lsn_port_host_entries_update.call_count))
def test_lsn_port_update(self):
self._test_lsn_port_update()
def test_lsn_port_update_dhcp_meta(self):
self._test_lsn_port_update(mock.ANY, mock.ANY)
def test_lsn_port_update_dhcp_and_nometa(self):
self._test_lsn_port_update(mock.ANY, None)
def test_lsn_port_update_nodhcp_and_nmeta(self):
self._test_lsn_port_update(None, mock.ANY)
def test_lsn_port_update_raise_error(self):
self.mock_lsn_api.lsn_port_host_entries_update.side_effect = (
NsxApiException)
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_update,
mock.ANY, mock.ANY, mock.ANY, mock.ANY)
class PersistentLsnManagerTestCase(base.BaseTestCase):
def setUp(self):
super(PersistentLsnManagerTestCase, self).setUp()
self.net_id = 'foo_network_id'
self.sub_id = 'foo_subnet_id'
self.port_id = 'foo_port_id'
self.lsn_id = 'foo_lsn_id'
self.mac = 'aa:bb:cc:dd:ee:ff'
self.lsn_port_id = 'foo_lsn_port_id'
self.tenant_id = 'foo_tenant_id'
db.configure_db()
nsx.register_dhcp_opts(cfg)
nsx.register_metadata_opts(cfg)
lsn_man.register_lsn_opts(cfg)
self.manager = lsn_man.PersistentLsnManager(mock.Mock())
self.context = context.get_admin_context()
self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api')
self.mock_lsn_api = self.mock_lsn_api_p.start()
self.addCleanup(self.mock_lsn_api_p.stop)
self.addCleanup(db.clear_db)
def test_lsn_get(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
result = self.manager.lsn_get(self.context, self.net_id)
self.assertEqual(self.lsn_id, result)
def test_lsn_get_raise_not_found(self):
self.assertRaises(p_exc.LsnNotFound,
self.manager.lsn_get, self.context, self.net_id)
def test_lsn_get_silent_not_found(self):
result = self.manager.lsn_get(
self.context, self.net_id, raise_on_err=False)
self.assertIsNone(result)
def test_lsn_get_sync_on_missing(self):
cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN')
self.manager = lsn_man.PersistentLsnManager(mock.Mock())
with mock.patch.object(self.manager, 'lsn_save') as f:
self.manager.lsn_get(self.context, self.net_id, raise_on_err=True)
self.assertTrue(self.mock_lsn_api.lsn_for_network_get.call_count)
self.assertTrue(f.call_count)
def test_lsn_save(self):
self.manager.lsn_save(self.context, self.net_id, self.lsn_id)
result = self.manager.lsn_get(self.context, self.net_id)
self.assertEqual(self.lsn_id, result)
def test_lsn_create(self):
self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
with mock.patch.object(self.manager, 'lsn_save') as f:
result = self.manager.lsn_create(self.context, self.net_id)
self.assertTrue(
self.mock_lsn_api.lsn_for_network_create.call_count)
self.assertTrue(f.call_count)
self.assertEqual(self.lsn_id, result)
def test_lsn_create_failure(self):
with mock.patch.object(
self.manager, 'lsn_save',
side_effect=p_exc.NsxPluginException(err_msg='')):
self.assertRaises(p_exc.NsxPluginException,
self.manager.lsn_create,
self.context, self.net_id)
self.assertTrue(self.mock_lsn_api.lsn_delete.call_count)
def test_lsn_delete(self):
self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
self.manager.lsn_create(self.context, self.net_id)
self.manager.lsn_delete(self.context, self.lsn_id)
self.assertIsNone(self.manager.lsn_get(
self.context, self.net_id, raise_on_err=False))
def test_lsn_delete_not_existent(self):
self.manager.lsn_delete(self.context, self.lsn_id)
self.assertTrue(self.mock_lsn_api.lsn_delete.call_count)
def test_lsn_port_get(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
self.sub_id, self.mac, self.lsn_id)
res = self.manager.lsn_port_get(self.context, self.net_id, self.sub_id)
self.assertEqual((self.lsn_id, self.lsn_port_id), res)
def test_lsn_port_get_raise_not_found(self):
self.assertRaises(p_exc.LsnPortNotFound,
self.manager.lsn_port_get,
self.context, self.net_id, self.sub_id)
def test_lsn_port_get_silent_not_found(self):
result = self.manager.lsn_port_get(
self.context, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual((None, None), result)
def test_lsn_port_get_sync_on_missing(self):
return
cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN')
self.manager = lsn_man.PersistentLsnManager(mock.Mock())
self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
self.mock_lsn_api.lsn_port_by_subnet_get.return_value = (
self.lsn_id, self.lsn_port_id)
with mock.patch.object(self.manager, 'lsn_save') as f:
with mock.patch.object(self.manager, 'lsn_port_save') as g:
self.manager.lsn_port_get(
self.context, self.net_id, self.sub_id)
self.assertTrue(
self.mock_lsn_api.lsn_port_by_subnet_get.call_count)
self.assertTrue(
self.mock_lsn_api.lsn_port_info_get.call_count)
self.assertTrue(f.call_count)
self.assertTrue(g.call_count)
def test_lsn_port_get_by_mac(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
self.sub_id, self.mac, self.lsn_id)
res = self.manager.lsn_port_get_by_mac(
self.context, self.net_id, self.mac)
self.assertEqual((self.lsn_id, self.lsn_port_id), res)
def test_lsn_port_get_by_mac_raise_not_found(self):
self.assertRaises(p_exc.LsnPortNotFound,
self.manager.lsn_port_get_by_mac,
self.context, self.net_id, self.sub_id)
def test_lsn_port_get_by_mac_silent_not_found(self):
result = self.manager.lsn_port_get_by_mac(
self.context, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual((None, None), result)
def test_lsn_port_create(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac}
with mock.patch.object(self.manager, 'lsn_port_save') as f:
result = self.manager.lsn_port_create(
self.context, self.net_id, subnet)
self.assertTrue(
self.mock_lsn_api.lsn_port_create.call_count)
self.assertTrue(f.call_count)
self.assertEqual(self.lsn_port_id, result)
def test_lsn_port_create_failure(self):
subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac}
with mock.patch.object(
self.manager, 'lsn_port_save',
side_effect=p_exc.NsxPluginException(err_msg='')):
self.assertRaises(p_exc.NsxPluginException,
self.manager.lsn_port_create,
self.context, self.net_id, subnet)
self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count)
def test_lsn_port_delete(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
self.sub_id, self.mac, self.lsn_id)
self.manager.lsn_port_delete(
self.context, self.lsn_id, self.lsn_port_id)
self.assertEqual((None, None), self.manager.lsn_port_get(
self.context, self.lsn_id, self.sub_id, raise_on_err=False))
def test_lsn_port_delete_not_existent(self):
self.manager.lsn_port_delete(
self.context, self.lsn_id, self.lsn_port_id)
self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count)
def test_lsn_port_save(self):
self.manager.lsn_save(self.context, self.net_id, self.lsn_id)
self.manager.lsn_port_save(self.context, self.lsn_port_id,
self.sub_id, self.mac, self.lsn_id)
result = self.manager.lsn_port_get(
self.context, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual((self.lsn_id, self.lsn_port_id), result)
class DhcpAgentNotifyAPITestCase(base.BaseTestCase):
def setUp(self):
super(DhcpAgentNotifyAPITestCase, self).setUp()
self.notifier = nsx.DhcpAgentNotifyAPI(mock.Mock(), mock.Mock())
self.plugin = self.notifier.plugin
self.lsn_manager = self.notifier.lsn_manager
def _test_notify_port_update(
self, ports, expected_count, expected_args=None):
port = {
'id': 'foo_port_id',
'network_id': 'foo_network_id',
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
}
self.notifier.plugin.get_ports.return_value = ports
self.notifier.notify(mock.ANY, {'port': port}, 'port.update.end')
self.lsn_manager.lsn_port_update.assert_has_calls(expected_args)
def test_notify_ports_update_no_ports(self):
self._test_notify_port_update(None, 0, [])
self._test_notify_port_update([], 0, [])
def test_notify_ports_update_one_port(self):
ports = [{
'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}],
'device_id': 'foo_device_id',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'
}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id',
dhcp=[{'ip_address': '1.2.3.4',
'mac_address': 'fa:16:3e:da:1d:46'}],
meta=[{'instance_id': 'foo_device_id',
'ip_address': '1.2.3.4'}])
self._test_notify_port_update(ports, 1, call_args)
def test_notify_ports_update_ports_with_empty_device_id(self):
ports = [{
'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}],
'device_id': '',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'
}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id',
dhcp=[{'ip_address': '1.2.3.4',
'mac_address': 'fa:16:3e:da:1d:46'}],
meta=[]
)
self._test_notify_port_update(ports, 1, call_args)
def test_notify_ports_update_ports_with_no_fixed_ips(self):
ports = [{
'fixed_ips': [],
'device_id': 'foo_device_id',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'
}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
self._test_notify_port_update(ports, 1, call_args)
def test_notify_ports_update_ports_with_no_fixed_ips_and_no_device(self):
ports = [{
'fixed_ips': [],
'device_id': '',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'
}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
self._test_notify_port_update(ports, 0, call_args)
def test_notify_ports_update_with_special_ports(self):
ports = [{'fixed_ips': [],
'device_id': '',
'device_owner': n_consts.DEVICE_OWNER_DHCP,
'mac_address': 'fa:16:3e:da:1d:46'},
{'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}],
'device_id': 'foo_device_id',
'device_owner': n_consts.DEVICE_OWNER_ROUTER_GW,
'mac_address': 'fa:16:3e:da:1d:46'}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
self._test_notify_port_update(ports, 0, call_args)
def test_notify_ports_update_many_ports(self):
ports = [{'fixed_ips': [],
'device_id': '',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'},
{'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}],
'device_id': 'foo_device_id',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id',
dhcp=[{'ip_address': '1.2.3.4',
'mac_address': 'fa:16:3e:da:1d:46'}],
meta=[{'instance_id': 'foo_device_id',
'ip_address': '1.2.3.4'}])
self._test_notify_port_update(ports, 1, call_args)
def _test_notify_subnet_action(self, action):
with mock.patch.object(self.notifier, '_subnet_%s' % action) as f:
self.notifier._handle_subnet_dhcp_access[action] = f
subnet = {'subnet': mock.ANY}
self.notifier.notify(
mock.ANY, subnet, 'subnet.%s.end' % action)
f.assert_called_once_with(mock.ANY, subnet)
def test_notify_subnet_create(self):
self._test_notify_subnet_action('create')
def test_notify_subnet_update(self):
self._test_notify_subnet_action('update')
def test_notify_subnet_delete(self):
self._test_notify_subnet_action('delete')
def _test_subnet_create(self, enable_dhcp, exc=None,
exc_obj=None, call_notify=True):
subnet = {
'id': 'foo_subnet_id',
'enable_dhcp': enable_dhcp,
'network_id': 'foo_network_id',
'tenant_id': 'foo_tenant_id',
'cidr': '0.0.0.0/0'
}
if exc:
self.plugin.create_port.side_effect = exc_obj or exc
self.assertRaises(exc,
self.notifier.notify,
mock.ANY,
{'subnet': subnet},
'subnet.create.end')
self.plugin.delete_subnet.assert_called_with(
mock.ANY, subnet['id'])
else:
if call_notify:
self.notifier.notify(
mock.ANY, {'subnet': subnet}, 'subnet.create.end')
if enable_dhcp:
dhcp_port = {
'name': '',
'admin_state_up': True,
'network_id': 'foo_network_id',
'tenant_id': 'foo_tenant_id',
'device_owner': n_consts.DEVICE_OWNER_DHCP,
'mac_address': mock.ANY,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}],
'device_id': ''
}
self.plugin.create_port.assert_called_once_with(
mock.ANY, {'port': dhcp_port})
else:
self.assertEqual(0, self.plugin.create_port.call_count)
def test_subnet_create_enabled_dhcp(self):
self._test_subnet_create(True)
def test_subnet_create_disabled_dhcp(self):
self._test_subnet_create(False)
def test_subnet_create_raise_port_config_error(self):
with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2,
'delete_port') as d:
self._test_subnet_create(
True,
exc=n_exc.Conflict,
exc_obj=p_exc.PortConfigurationError(lsn_id='foo_lsn_id',
net_id='foo_net_id',
port_id='foo_port_id'))
d.assert_called_once_with(self.plugin, mock.ANY, 'foo_port_id')
def test_subnet_update(self):
subnet = {
'id': 'foo_subnet_id',
'network_id': 'foo_network_id',
}
self.lsn_manager.lsn_port_get.return_value = ('foo_lsn_id',
'foo_lsn_port_id')
self.notifier.notify(
mock.ANY, {'subnet': subnet}, 'subnet.update.end')
self.lsn_manager.lsn_port_dhcp_configure.assert_called_once_with(
mock.ANY, 'foo_lsn_id', 'foo_lsn_port_id', subnet)
def test_subnet_update_raise_lsn_not_found(self):
subnet = {
'id': 'foo_subnet_id',
'network_id': 'foo_network_id',
}
self.lsn_manager.lsn_port_get.side_effect = (
p_exc.LsnNotFound(entity='network',
entity_id=subnet['network_id']))
self.assertRaises(p_exc.LsnNotFound,
self.notifier.notify,
mock.ANY, {'subnet': subnet}, 'subnet.update.end')
def _test_subnet_update_lsn_port_not_found(self, dhcp_port):
subnet = {
'id': 'foo_subnet_id',
'enable_dhcp': True,
'network_id': 'foo_network_id',
'tenant_id': 'foo_tenant_id'
}
self.lsn_manager.lsn_port_get.side_effect = (
p_exc.LsnPortNotFound(lsn_id='foo_lsn_id',
entity='subnet',
entity_id=subnet['id']))
self.notifier.plugin.get_ports.return_value = dhcp_port
count = 0 if dhcp_port is None else 1
with mock.patch.object(nsx, 'handle_port_dhcp_access') as h:
self.notifier.notify(
mock.ANY, {'subnet': subnet}, 'subnet.update.end')
self.assertEqual(count, h.call_count)
if not dhcp_port:
self._test_subnet_create(enable_dhcp=True,
exc=None, call_notify=False)
def test_subnet_update_lsn_port_not_found_without_dhcp_port(self):
self._test_subnet_update_lsn_port_not_found(None)
def test_subnet_update_lsn_port_not_found_with_dhcp_port(self):
self._test_subnet_update_lsn_port_not_found([mock.ANY])
def _test_subnet_delete(self, ports=None):
subnet = {
'id': 'foo_subnet_id',
'network_id': 'foo_network_id',
'cidr': '0.0.0.0/0'
}
self.plugin.get_ports.return_value = ports
self.notifier.notify(mock.ANY, {'subnet': subnet}, 'subnet.delete.end')
filters = {
'network_id': [subnet['network_id']],
'device_owner': [n_consts.DEVICE_OWNER_DHCP]
}
self.plugin.get_ports.assert_called_once_with(
mock.ANY, filters=filters)
if ports:
self.plugin.delete_port.assert_called_once_with(
mock.ANY, ports[0]['id'])
else:
self.assertEqual(0, self.plugin.delete_port.call_count)
def test_subnet_delete_enabled_dhcp_no_ports(self):
self._test_subnet_delete()
def test_subnet_delete_enabled_dhcp_with_dhcp_port(self):
self._test_subnet_delete([{'id': 'foo_port_id'}])
class DhcpTestCase(base.BaseTestCase):
def setUp(self):
super(DhcpTestCase, self).setUp()
self.plugin = mock.Mock()
self.plugin.lsn_manager = mock.Mock()
def test_handle_create_network(self):
network = {'id': 'foo_network_id'}
nsx.handle_network_dhcp_access(
self.plugin, mock.ANY, network, 'create_network')
self.plugin.lsn_manager.lsn_create.assert_called_once_with(
mock.ANY, network['id'])
def test_handle_delete_network(self):
network_id = 'foo_network_id'
self.plugin.lsn_manager.lsn_delete_by_network.return_value = (
'foo_lsn_id')
nsx.handle_network_dhcp_access(
self.plugin, mock.ANY, network_id, 'delete_network')
self.plugin.lsn_manager.lsn_delete_by_network.assert_called_once_with(
mock.ANY, 'foo_network_id')
def _test_handle_create_dhcp_owner_port(self, exc=None):
subnet = {
'cidr': '0.0.0.0/0',
'id': 'foo_subnet_id'
}
port = {
'id': 'foo_port_id',
'device_owner': n_consts.DEVICE_OWNER_DHCP,
'mac_address': 'aa:bb:cc:dd:ee:ff',
'network_id': 'foo_network_id',
'fixed_ips': [{'subnet_id': subnet['id']}]
}
expected_data = {
'subnet_id': subnet['id'],
'ip_address': subnet['cidr'],
'mac_address': port['mac_address']
}
self.plugin.get_subnet.return_value = subnet
if exc is None:
nsx.handle_port_dhcp_access(
self.plugin, mock.ANY, port, 'create_port')
(self.plugin.lsn_manager.lsn_port_dhcp_setup.
assert_called_once_with(mock.ANY, port['network_id'],
port['id'], expected_data, subnet))
else:
self.plugin.lsn_manager.lsn_port_dhcp_setup.side_effect = exc
self.assertRaises(n_exc.NeutronException,
nsx.handle_port_dhcp_access,
self.plugin, mock.ANY, port, 'create_port')
def test_handle_create_dhcp_owner_port(self):
self._test_handle_create_dhcp_owner_port()
def test_handle_create_dhcp_owner_port_raise_port_config_error(self):
config_error = p_exc.PortConfigurationError(lsn_id='foo_lsn_id',
net_id='foo_net_id',
port_id='foo_port_id')
self._test_handle_create_dhcp_owner_port(exc=config_error)
def test_handle_delete_dhcp_owner_port(self):
port = {
'id': 'foo_port_id',
'device_owner': n_consts.DEVICE_OWNER_DHCP,
'network_id': 'foo_network_id',
'fixed_ips': [],
'mac_address': 'aa:bb:cc:dd:ee:ff'
}
nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, 'delete_port')
self.plugin.lsn_manager.lsn_port_dispose.assert_called_once_with(
mock.ANY, port['network_id'], port['mac_address'])
def _test_handle_user_port(self, action, handler):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_owner',
'network_id': 'foo_network_id',
'mac_address': 'aa:bb:cc:dd:ee:ff',
'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}]
}
expected_data = {
'ip_address': '1.2.3.4',
'mac_address': 'aa:bb:cc:dd:ee:ff'
}
self.plugin.get_subnet.return_value = {'enable_dhcp': True}
nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
handler.assert_called_once_with(
mock.ANY, port['network_id'], 'foo_subnet_id', expected_data)
def test_handle_create_user_port(self):
self._test_handle_user_port(
'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
def test_handle_delete_user_port(self):
self._test_handle_user_port(
'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
def _test_handle_user_port_disabled_dhcp(self, action, handler):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_owner',
'network_id': 'foo_network_id',
'mac_address': 'aa:bb:cc:dd:ee:ff',
'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}]
}
self.plugin.get_subnet.return_value = {'enable_dhcp': False}
nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
self.assertEqual(0, handler.call_count)
def test_handle_create_user_port_disabled_dhcp(self):
self._test_handle_user_port_disabled_dhcp(
'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
def test_handle_delete_user_port_disabled_dhcp(self):
self._test_handle_user_port_disabled_dhcp(
'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
def _test_handle_user_port_no_fixed_ips(self, action, handler):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_owner',
'network_id': 'foo_network_id',
'fixed_ips': []
}
nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
self.assertEqual(0, handler.call_count)
def test_handle_create_user_port_no_fixed_ips(self):
self._test_handle_user_port_no_fixed_ips(
'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
def test_handle_delete_user_port_no_fixed_ips(self):
self._test_handle_user_port_no_fixed_ips(
'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
class MetadataTestCase(base.BaseTestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.plugin = mock.Mock()
self.plugin.lsn_manager = mock.Mock()
def _test_handle_port_metadata_access_special_owners(
self, owner, dev_id='foo_device_id', ips=None):
port = {
'id': 'foo_port_id',
'device_owner': owner,
'device_id': dev_id,
'fixed_ips': ips or []
}
nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY)
self.assertFalse(
self.plugin.lsn_manager.lsn_port_meta_host_add.call_count)
self.assertFalse(
self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count)
def test_handle_port_metadata_access_external_network(self):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_owner',
'device_id': 'foo_device_id',
'network_id': 'foo_network_id',
'fixed_ips': [{'subnet_id': 'foo_subnet'}]
}
self.plugin.get_network.return_value = {'router:external': True}
nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY)
self.assertFalse(
self.plugin.lsn_manager.lsn_port_meta_host_add.call_count)
self.assertFalse(
self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count)
def test_handle_port_metadata_access_dhcp_port(self):
self._test_handle_port_metadata_access_special_owners(
n_consts.DEVICE_OWNER_DHCP, [{'subnet_id': 'foo_subnet'}])
def test_handle_port_metadata_access_router_port(self):
self._test_handle_port_metadata_access_special_owners(
n_consts.DEVICE_OWNER_ROUTER_INTF, [{'subnet_id': 'foo_subnet'}])
def test_handle_port_metadata_access_no_device_id(self):
self._test_handle_port_metadata_access_special_owners(
n_consts.DEVICE_OWNER_DHCP, '')
def test_handle_port_metadata_access_no_fixed_ips(self):
self._test_handle_port_metadata_access_special_owners(
'foo', 'foo', None)
def _test_handle_port_metadata_access(self, is_delete, raise_exc=False):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_id',
'network_id': 'foo_network_id',
'device_id': 'foo_device_id',
'tenant_id': 'foo_tenant_id',
'fixed_ips': [
{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}
]
}
meta = {
'instance_id': port['device_id'],
'tenant_id': port['tenant_id'],
'ip_address': port['fixed_ips'][0]['ip_address']
}
self.plugin.get_network.return_value = {'router:external': False}
if is_delete:
mock_func = self.plugin.lsn_manager.lsn_port_meta_host_remove
else:
mock_func = self.plugin.lsn_manager.lsn_port_meta_host_add
if raise_exc:
mock_func.side_effect = p_exc.PortConfigurationError(
lsn_id='foo_lsn_id', net_id='foo_net_id', port_id=None)
with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2,
'delete_port') as d:
self.assertRaises(p_exc.PortConfigurationError,
nsx.handle_port_metadata_access,
self.plugin, mock.ANY, port,
is_delete=is_delete)
if not is_delete:
d.assert_called_once_with(mock.ANY, mock.ANY, port['id'])
else:
self.assertFalse(d.call_count)
else:
nsx.handle_port_metadata_access(
self.plugin, mock.ANY, port, is_delete=is_delete)
mock_func.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, meta)
def test_handle_port_metadata_access_on_delete_true(self):
self._test_handle_port_metadata_access(True)
def test_handle_port_metadata_access_on_delete_false(self):
self._test_handle_port_metadata_access(False)
def test_handle_port_metadata_access_on_delete_true_raise(self):
self._test_handle_port_metadata_access(True, raise_exc=True)
def test_handle_port_metadata_access_on_delete_false_raise(self):
self._test_handle_port_metadata_access(False, raise_exc=True)
def _test_handle_router_metadata_access(
self, is_port_found, raise_exc=False):
subnet = {
'id': 'foo_subnet_id',
'network_id': 'foo_network_id'
}
interface = {
'subnet_id': subnet['id'],
'port_id': 'foo_port_id'
}
mock_func = self.plugin.lsn_manager.lsn_metadata_configure
if not is_port_found:
self.plugin.get_port.side_effect = n_exc.NotFound
if raise_exc:
with mock.patch.object(nsx.l3_db.L3_NAT_db_mixin,
'remove_router_interface') as d:
mock_func.side_effect = p_exc.NsxPluginException(err_msg='')
self.assertRaises(p_exc.NsxPluginException,
nsx.handle_router_metadata_access,
self.plugin, mock.ANY, 'foo_router_id',
interface)
d.assert_called_once_with(mock.ANY, mock.ANY, 'foo_router_id',
interface)
else:
nsx.handle_router_metadata_access(
self.plugin, mock.ANY, 'foo_router_id', interface)
mock_func.assert_called_once_with(
mock.ANY, subnet['id'], is_port_found)
def test_handle_router_metadata_access_add_interface(self):
self._test_handle_router_metadata_access(True)
def test_handle_router_metadata_access_delete_interface(self):
self._test_handle_router_metadata_access(False)
def test_handle_router_metadata_access_raise_error_on_add(self):
self._test_handle_router_metadata_access(True, raise_exc=True)
def test_handle_router_metadata_access_raise_error_on_delete(self):
self._test_handle_router_metadata_access(True, raise_exc=False)
|
{
"content_hash": "b356410be7265fd10726bd9d2b4fab51",
"timestamp": "",
"source": "github",
"line_count": 1388,
"max_line_length": 79,
"avg_line_length": 43.75864553314121,
"alnum_prop": 0.5721224294910845,
"repo_name": "vijayendrabvs/hap",
"id": "5b737bde90352ce437b2b8ada599fcfe216cfd3c",
"size": "61315",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/vmware/test_dhcpmeta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8801288"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
"""
This is an example of a quasi-harmonic calculation for an hexagonal system using a quadratic polynomial.
"""
if __name__ == "__main__":
from pyqha import RY_KBAR
from pyqha import gen_TT, read_Etot, read_dos_geo, compute_thermo_geo, read_thermo, rearrange_thermo, fitFvib, write_celldmsT, write_alphaT
from pyqha import simple_plot_xy, plot_Etot, plot_Etot_contour
# this part is for calculating the thermodynamic properties from the dos
fdos="dos_files/output_dos.dat.g" # base name for the dos files (numbers will be added as postfix)
fthermo = "thermo" # base name for the output files (numbers will be added as postfix)
ngeo = 25 # this is the number of volumes for which a dos has been calculated
#TT =gen_TT(1,1000) # generate the numpy array of temperatures for which the properties will be calculated
#T, Evib, Fvib, Svib, Cvib, ZPE, modes = compute_thermo_geo(fdos,fthermo,ngeo,TT)
#nT = len(T)
# Alternatively, read the thermodynamic data from files if you have already
# done the calculations
T1, Evib1, Fvib1, Svib1, Cvib1 = read_thermo( fthermo, ngeo )
nT, T, Evib, Fvib, Svib, Cvib = rearrange_thermo( T1, Evib1, Fvib1, Svib1, Cvib1, ngeo )
fEtot = "./Etot.dat"
thermodata = nT, T, Evib, Fvib, Svib, Cvib
TT, Fmin, celldmsminT, alphaT, a0, chi, aT, chi = fitFvib(fEtot,thermodata,minoptions={'gtol': 1e-7})
fig1 = simple_plot_xy(TT,Fmin,xlabel="T (K)",ylabel="Fmin (Ry/cell)")
fig2 = simple_plot_xy(TT,celldmsminT[:,0],xlabel="T (K)",ylabel="a_min (a.u.)")
fig3 = simple_plot_xy(TT,celldmsminT[:,2],xlabel="T (K)",ylabel="c_min (a.u.)")
fig4 = simple_plot_xy(TT,celldmsminT[:,2]/celldmsminT[:,0],xlabel="T (K)",ylabel="c/a ")
fig5 = simple_plot_xy(TT,alphaT[:,0],xlabel="T (K)",ylabel="alpha_xx (1/K)")
fig6 = simple_plot_xy(TT,alphaT[:,2],xlabel="T (K)",ylabel="alpha_zz (1/K)")
# write a(T) and c(T) on a file
write_celldmsT("celldmminT",T,celldmsminT,ibrav=4)
# write alpha_xx(T) and alpha_zz(T) on a file
write_alphaT("alphaT",T,alphaT,ibrav=4)
# Plot several quantities at T=998+1 K as an example
celldmsx, Ex = read_Etot(fEtot) # since the fitFvib does not return Etot data, you must read them from the original file
iT=998 # this is the index of the temperatures array, not the temperature itself
print("T= ",TT[iT]," (K)")
# 3D plot only with fitted energy (Etot+Fvib)
fig7 = plot_Etot(celldmsx,Ex=None,n=(5,0,5),nmesh=(50,0,50),fittype="quadratic",ibrav=4,a=a0+aT[iT])
# 3D plot fitted energy and points
fig8 = plot_Etot(celldmsx,Ex+Fvib[iT],n=(5,0,5),nmesh=(50,0,50),fittype="quadratic",ibrav=4,a=a0+aT[iT])
# 3D plot with fitted energy Fvib only
fig9 = plot_Etot(celldmsx,Ex=None,n=(5,0,5),nmesh=(50,0,50),fittype="quadratic",ibrav=4,a=aT[iT])
# 2D contour plot with fitted energy (Etot+Fvib)
fig10 = plot_Etot_contour(celldmsx,nmesh=(50,0,50),fittype="quadratic",ibrav=4,a=a0+aT[iT])
# 2D contour plot with fitted energy Fvib only
fig11 = plot_Etot_contour(celldmsx,nmesh=(50,0,50),fittype="quadratic",ibrav=4,a=aT[iT])
# Save all plots
fig1.savefig("figure_1.png")
fig2.savefig("figure_2.png")
fig3.savefig("figure_3.png")
fig4.savefig("figure_4.png")
fig5.savefig("figure_5.png")
fig6.savefig("figure_6.png")
fig7.savefig("figure_7.png")
fig8.savefig("figure_8.png")
fig9.savefig("figure_9.png")
fig10.savefig("figure_10.png")
fig11.savefig("figure_11.png")
|
{
"content_hash": "1dfbad07cd9a1b17464ca7f2fc50fbc8",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 143,
"avg_line_length": 51.08571428571429,
"alnum_prop": 0.660234899328859,
"repo_name": "mauropalumbo75/pyqha",
"id": "d5057dc2485620fe547c28a4fbefd9aa8f303247",
"size": "3616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example6/example6.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2126"
},
{
"name": "Python",
"bytes": "162597"
}
],
"symlink_target": ""
}
|
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-branch-based"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "conda-testenv-"
cfg.versionfile_source = "conda_testenv/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = [r.strip() for r in refnames.strip("()").split(",")]
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(set(refs) - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None, "branch": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags",
"branch": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM). Note, for git v1.7
# and below, it is necessary to run "git update-index --refresh" first.
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# abbrev-ref available with git >= 1.7
branch_name = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root).strip()
if branch_name == 'HEAD':
branches = run_command(GITS, ["branch", "--contains"],
cwd=root).split('\n')
branches = [branch[2:] for branch in branches if branch[4:5] != '(']
if 'master' in branches:
branch_name = 'master'
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
branch_name = branch_name.replace(' ', '.').replace('(', '').replace(')', '')
pieces['branch'] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out or 1) # total number of commits
return pieces
# Default matches v1.2.x, maint/1.2.x, 1.2.x, 1.x etc.
default_maint_branch_regexp = ".*([0-9]+\.)+x$"
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def add_one_to_version(version_string, number_index_to_increment=-1):
"""
Add one to a version string at the given numeric indices.
>>> add_one_to_version('v1.2.3')
'v1.2.4'
"""
# Break up the tag by number groups (preserving multi-digit
# numbers as multidigit)
parts = re.split("([0-9]+)", version_string)
digit_parts = [(i, part) for i, part in enumerate(parts)
if part.isdigit()]
# Deal with negative indexing.
increment_at_index = ((number_index_to_increment + len(digit_parts))
% len(digit_parts))
for n_seen, (i, part) in enumerate(digit_parts):
if n_seen == increment_at_index:
parts[i] = str(int(part) + 1)
elif n_seen > increment_at_index:
parts[i] = '0'
return ''.join(parts)
def render_pep440_branch_based(pieces):
# [TAG+1 of minor number][.devDISTANCE][+gHEX]. The git short is
# included for dirty.
# exceptions:
# 1: no tags. 0.0.0.devDISTANCE[+gHEX]
master = pieces.get('branch') == 'master'
maint = re.match(default_maint_branch_regexp,
pieces.get('branch') or '')
# If we are on a tag, just pep440-pre it.
if pieces["closest-tag"] and not (pieces["distance"] or
pieces["dirty"]):
rendered = pieces["closest-tag"]
else:
# Put a default closest-tag in.
if not pieces["closest-tag"]:
pieces["closest-tag"] = '0.0.0'
if pieces["distance"] or pieces["dirty"]:
if maint:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post%d" % pieces["distance"]
else:
rendered = add_one_to_version(pieces["closest-tag"])
if pieces["distance"]:
rendered += ".dev%d" % pieces["distance"]
# Put the branch name in if it isn't master nor a
# maintenance branch.
plus = '+'
if not (master or maint):
rendered += "%s%s" % (plus,
pieces.get('branch') or
'unknown_branch')
plus = '_'
if pieces["dirty"]:
rendered += "%sg%s" % (plus, pieces["short"])
else:
rendered = pieces["closest-tag"]
return rendered
STYLES = {'default': render_pep440,
'pep440': render_pep440,
'pep440-pre': render_pep440_pre,
'pep440-post': render_pep440_post,
'pep440-old': render_pep440_old,
'git-describe': render_git_describe,
'git-describe-long': render_git_describe_long,
'pep440-old': render_pep440_old,
'pep440-branch-based': render_pep440_branch_based,
}
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style:
style = 'default'
renderer = STYLES.get(style)
if not renderer:
raise ValueError("unknown style '%s'" % style)
rendered = renderer(pieces)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
{
"content_hash": "bb5bace3a24267a3b29492690c5e0980",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 81,
"avg_line_length": 34.22876949740035,
"alnum_prop": 0.564,
"repo_name": "lbdreyer/nc-time-axis",
"id": "d2eef2a8b31e9b15f5505f84e4ab0e2b142e1230",
"size": "20229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nc_time_axis/_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "117327"
}
],
"symlink_target": ""
}
|
import datetime
import hashlib
import json
from bson.objectid import ObjectId
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from mongoengine.base import ValidationError
from crits.core.class_mapper import class_from_id, class_from_value
from crits.core.crits_mongoengine import EmbeddedSource
from crits.core.crits_mongoengine import create_embedded_source, json_handler
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import is_admin, user_sources
from crits.core.user_tools import is_user_subscribed
from crits.certificates.certificate import Certificate
from crits.notifications.handlers import remove_user_from_notification
from crits.services.analysis_result import AnalysisResult
from crits.services.handlers import run_triage, get_supported_services
def generate_cert_csv(request):
"""
Generate a CSV file of the Certificate information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,Certificate)
return response
def get_certificate_details(md5, analyst):
"""
Generate the data to render the Certificate details template.
:param md5: The MD5 of the Certificate to get details for.
:type md5: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(analyst)
cert = Certificate.objects(md5=md5, source__name__in=sources).first()
if not cert:
template = "error.html"
args = {'error': 'Certificate not yet available or you do not have access to view it.'}
else:
cert.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, cert.id, 'Certificate')
# subscription
subscription = {
'type': 'Certificate',
'id': cert.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Certificate', cert.id),
}
#objects
objects = cert.sort_objects()
#relationships
relationships = cert.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Certificate',
'value': cert.id
}
#comments
comments = {'comments': cert.get_comments(),
'url_key': md5}
#screenshots
screenshots = cert.get_screenshots(analyst)
# services
service_list = get_supported_services('Certificate')
# analysis results
service_results = cert.get_analysis_results()
args = {'service_list': service_list,
'objects': objects,
'relationships': relationships,
'comments': comments,
'relationship': relationship,
"subscription": subscription,
"screenshots": screenshots,
'service_results': service_results,
"cert": cert}
return template, args
def generate_cert_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Certificate
type_ = "certificate"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Certificates",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Certificates'",
'text': "'All'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Certificates'",
'text': "'New'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Certificates'",
'text': "'In Progress'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Certificates'",
'text': "'Analyzed'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Certificates'",
'text': "'Deprecated'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Certificate'",
'text': "'Add Certificate'",
'click': "function () {$('#new-certificate').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def handle_cert_file(filename, data, source_name, user=None,
description=None, related_id=None, related_md5=None,
related_type=None, method='', reference='',
relationship=None, bucket_list=None, ticket=None):
"""
Add a Certificate.
:param filename: The filename of the Certificate.
:type filename: str
:param data: The filedata of the Certificate.
:type data: str
:param source_name: The source which provided this Certificate.
:type source_name: str,
:class:`crits.core.crits_mongoengine.EmbeddedSource`,
list of :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param user: The user adding the Certificate.
:type user: str
:param description: Description of the Certificate.
:type description: str
:param related_id: ObjectId of a top-level object related to this Certificate.
:type related_id: str
:param related_md5: MD5 of a top-level object related to this Certificate.
:type related_md5: str
:param related_type: The CRITs type of the related top-level object.
:type related_type: str
:param method: The method of acquiring this Certificate.
:type method: str
:param reference: A reference to the source of this Certificate.
:type reference: str
:param relationship: The relationship between the parent and the Certificate.
:type relationship: str
:param bucket_list: Bucket(s) to add to this Certificate
:type bucket_list: str(comma separated) or list.
:param ticket: Ticket(s) to add to this Certificate
:type ticket: str(comma separated) or list.
:returns: dict with keys:
'success' (boolean),
'message' (str),
'md5' (str) if successful.
"""
if not data:
status = {
'success': False,
'message': 'No data object passed in'
}
return status
if len(data) <= 0:
status = {
'success': False,
'message': 'Data length <= 0'
}
return status
if ((related_type and not (related_id or related_md5)) or
(not related_type and (related_id or related_md5))):
status = {
'success': False,
'message': 'Must specify both related_type and related_id or related_md5.'
}
return status
related_obj = None
if related_id or related_md5:
if related_id:
related_obj = class_from_id(related_type, related_id)
else:
related_obj = class_from_value(related_type, related_md5)
if not related_obj:
status = {
'success': False,
'message': 'Related object not found.'
}
return status
# generate md5 and timestamp
md5 = hashlib.md5(data).hexdigest()
timestamp = datetime.datetime.now()
# generate Certificate
cert = Certificate.objects(md5=md5).first()
if not cert:
cert = Certificate()
cert.filename = filename
cert.created = timestamp
cert.size = len(data)
cert.description = description
cert.md5 = md5
# generate source information and add to certificate
if isinstance(source_name, basestring) and len(source_name) > 0:
s = create_embedded_source(source_name,
method=method,
reference=reference,
analyst=user)
cert.add_source(s)
elif isinstance(source_name, EmbeddedSource):
cert.add_source(source_name, method=method, reference=reference)
elif isinstance(source_name, list) and len(source_name) > 0:
for s in source_name:
if isinstance(s, EmbeddedSource):
cert.add_source(s, method=method, reference=reference)
if bucket_list:
cert.add_bucket_list(bucket_list, user)
if ticket:
cert.add_ticket(ticket, user)
# add file to GridFS
if not isinstance(cert.filedata.grid_id, ObjectId):
cert.add_file_data(data)
# save cert
cert.save(username=user)
cert.reload()
# run certificate triage
if len(AnalysisResult.objects(object_id=str(cert.id))) < 1 and data:
run_triage(cert, user)
# update relationship if a related top-level object is supplied
if related_obj and cert:
if not relationship:
relationship = "Related_To"
cert.add_relationship(rel_item=related_obj,
rel_type=relationship,
analyst=user,
get_rels=False)
related_obj.save(username=user)
cert.save(username=user)
status = {
'success': True,
'message': 'Uploaded certificate',
'md5': md5,
'id': str(cert.id),
'object': cert
}
return status
def delete_cert(md5, username=None):
"""
Delete a Certificate.
:param md5: The MD5 of the Certificate to delete.
:type md5: str
:param username: The user deleting the certificate.
:type username: str
:returns: True, False
"""
if is_admin(username):
cert = Certificate.objects(md5=md5).first()
if cert:
cert.delete(username=username)
return True
else:
return False
else:
return False
|
{
"content_hash": "a12e86d47b63e2ff5dd2bfa96dd8debe",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 124,
"avg_line_length": 36.185792349726775,
"alnum_prop": 0.5622923588039868,
"repo_name": "seanthegeek/crits",
"id": "79cb39f73972141d545e4f51503e5a63d20a8d0b",
"size": "13244",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "crits/certificates/handlers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "8694"
},
{
"name": "CSS",
"bytes": "360810"
},
{
"name": "HTML",
"bytes": "447412"
},
{
"name": "JavaScript",
"bytes": "2013634"
},
{
"name": "Perl",
"bytes": "916"
},
{
"name": "Prolog",
"bytes": "948"
},
{
"name": "Python",
"bytes": "1908131"
},
{
"name": "Shell",
"bytes": "10293"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import six
import random
from ibeis_cnn.__LASAGNE__ import layers
from ibeis_cnn.__LASAGNE__ import nonlinearities
# from ibeis_cnn.__LASAGNE__ import init
from ibeis_cnn import custom_layers
from ibeis_cnn.models import abstract_models
import utool as ut
print, rrr, profile = ut.inject2(__name__, '[ibeis_cnn.models.viewpoint]')
Conv2DLayer = custom_layers.Conv2DLayer
MaxPool2DLayer = custom_layers.MaxPool2DLayer
@six.add_metaclass(ut.ReloadingMetaclass)
class ViewpointModel(abstract_models.AbstractCategoricalModel):
def __init__(model, autoinit=False, batch_size=128, data_shape=(96, 96, 3), arch_tag='viewpoint', **kwargs):
super(ViewpointModel, model).__init__(batch_size=batch_size, data_shape=data_shape, arch_tag=arch_tag, **kwargs)
if autoinit:
model.initialize_architecture()
def augment(model, Xb, yb=None):
# Invert label function
def _invert_label(label):
label = label.replace('LEFT', '^L^')
label = label.replace('RIGHT', '^R^')
label = label.replace('^R^', 'LEFT')
label = label.replace('^L^', 'RIGHT')
return(label)
# Map
points, channels, height, width = Xb.shape
for index in range(points):
if random.uniform(0.0, 1.0) <= 0.5:
Xb[index] = Xb[index, :, ::-1]
if yb is not None:
yb[index] = _invert_label(yb[index])
return Xb, yb
def label_order_mapping(model, category_list):
r"""
Args:
category_list (list):
Returns:
?: category_mapping
CommandLine:
python -m ibeis_cnn.models.viewpoint --exec-label_order_mapping
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis_cnn.models.viewpoint import * # NOQA
>>> model = ViewpointModel()
>>> category_list = ['LEFT', 'FRONT_LEFT', 'FRONT', 'FRONT_RIGHT', 'RIGHT', 'BACK_RIGHT', 'BACK', 'BACK_LEFT']
>>> category_mapping = model.label_order_mapping(category_list)
>>> result = ('category_mapping = %s' % (str(category_mapping),))
>>> print(result)
"""
if len(category_list) == 8:
species_list = [
'ZEBRA_PLAINS',
]
else:
species_list = [
'ZEBRA_PLAINS',
'ZEBRA_GREVYS',
'ELEPHANT_SAVANNA',
'GIRAFFE_RETICULATED',
'GIRAFFE_MASAI',
]
viewpoint_mapping = {
'LEFT': 0,
'FRONT_LEFT': 1,
'FRONT': 2,
'FRONT_RIGHT': 3,
'RIGHT': 4,
'BACK_RIGHT': 5,
'BACK': 6,
'BACK_LEFT': 7,
}
viewpoint_mapping = {
}
viewpoints = len(viewpoint_mapping.keys())
category_mapping = {}
for index, species in enumerate(species_list):
for viewpoint, value in six.iteritems(viewpoint_mapping):
key = '%s:%s' % (species, viewpoint, )
base = viewpoints * index
category_mapping[key] = base + value
return category_mapping
def learning_rate_update(model, x):
return x / 2.0
def learning_rate_shock(model, x):
return x * 2.0
#def build_model(model, batch_size, input_width, input_height, input_channels, output_dims):
def initialize_architecture(model):
(_, input_channels, input_width, input_height) = model.input_shape
output_dims = model.output_dims
_CaffeNet = abstract_models.PretrainedNetwork('caffenet')
l_in = layers.InputLayer(
# variable batch size (None), channel, width, height
#shape=(None, input_channels, input_width, input_height)
shape=model.input_shape,
)
l_noise = layers.GaussianNoiseLayer(
l_in,
)
l_conv0 = Conv2DLayer(
l_noise,
num_filters=32,
filter_size=(11, 11),
# nonlinearity=nonlinearities.rectify,
nonlinearity=nonlinearities.LeakyRectify(leakiness=(1. / 10.)),
W=_CaffeNet.get_pretrained_layer(0),
)
l_conv0_dropout = layers.DropoutLayer(l_conv0, p=0.10)
l_conv1 = Conv2DLayer(
l_conv0_dropout,
num_filters=32,
filter_size=(5, 5),
# nonlinearity=nonlinearities.rectify,
nonlinearity=nonlinearities.LeakyRectify(leakiness=(1. / 10.)),
W=_CaffeNet.get_pretrained_layer(2),
)
l_pool1 = MaxPool2DLayer(
l_conv1,
pool_size=(2, 2),
stride=(2, 2),
)
l_conv2_dropout = layers.DropoutLayer(l_pool1, p=0.10)
l_conv2 = Conv2DLayer(
l_conv2_dropout,
num_filters=64,
filter_size=(3, 3),
# nonlinearity=nonlinearities.rectify,
nonlinearity=nonlinearities.LeakyRectify(leakiness=(1. / 10.)),
# W=init.Orthogonal(),
)
l_pool2 = MaxPool2DLayer(
l_conv2,
pool_size=(2, 2),
stride=(2, 2),
)
l_conv3_dropout = layers.DropoutLayer(l_pool2, p=0.30)
l_conv3 = Conv2DLayer(
l_conv3_dropout,
num_filters=128,
filter_size=(3, 3),
# nonlinearity=nonlinearities.rectify,
nonlinearity=nonlinearities.LeakyRectify(leakiness=(1. / 10.)),
# W=init.Orthogonal(),
)
l_pool3 = MaxPool2DLayer(
l_conv3,
pool_size=(2, 2),
stride=(2, 2),
)
l_conv4_dropout = layers.DropoutLayer(l_pool3, p=0.30)
l_conv4 = Conv2DLayer(
l_conv4_dropout,
num_filters=128,
filter_size=(3, 3),
# nonlinearity=nonlinearities.rectify,
nonlinearity=nonlinearities.LeakyRectify(leakiness=(1. / 10.)),
# W=init.Orthogonal(),
)
l_pool4 = MaxPool2DLayer(
l_conv4,
pool_size=(2, 2),
stride=(2, 2),
)
l_hidden1 = layers.DenseLayer(
l_pool4,
num_units=512,
# nonlinearity=nonlinearities.rectify,
nonlinearity=nonlinearities.LeakyRectify(leakiness=(1. / 10.)),
# W=init.Orthogonal(),
)
l_hidden1_maxout = layers.FeaturePoolLayer(
l_hidden1,
pool_size=2,
)
l_hidden1_dropout = layers.DropoutLayer(l_hidden1_maxout, p=0.5)
l_hidden2 = layers.DenseLayer(
l_hidden1_dropout,
num_units=512,
# nonlinearity=nonlinearities.rectify,
nonlinearity=nonlinearities.LeakyRectify(leakiness=(1. / 10.)),
# W=init.Orthogonal(),
)
l_hidden2_maxout = layers.FeaturePoolLayer(
l_hidden2,
pool_size=2,
)
l_hidden2_dropout = layers.DropoutLayer(l_hidden2_maxout, p=0.5)
l_out = layers.DenseLayer(
l_hidden2_dropout,
num_units=output_dims,
nonlinearity=nonlinearities.softmax,
# W=init.Orthogonal(),
)
model.output_layer = l_out
return l_out
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis_cnn.models.dummy
python -m ibeis_cnn.models.dummy --allexamples
python -m ibeis_cnn.models.dummy --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
{
"content_hash": "f153e7bed470d2cb9e6dbb81cc62d003",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 122,
"avg_line_length": 31.727642276422763,
"alnum_prop": 0.5386290839205637,
"repo_name": "bluemellophone/ibeis_cnn",
"id": "d3cf327977743b5c20067b5316690c942346fd9e",
"size": "7829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibeis_cnn/models/viewpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "667619"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(0, "../../python/")
import mxnet as mx
import numpy as np
import numpy.random as rnd
import time
import argparse
# parser
parser = argparse.ArgumentParser(description='kvstore test')
args = parser.parse_args()
def check_diff_to_scalar(A, x, rank=None):
""" assert A == x"""
assert(np.sum(np.abs((A - x).asnumpy())) == 0), (rank, A.asnumpy(), x)
# setup
keys = ['3', '5', '7']
init_test_keys = [str(i) for i in range(200,300)]
init_test_keys_big = [str(i) for i in range(300,400)]
init_test_keys_device = [str(i) for i in range(400,500)]
init_test_keys_device_big = [str(i) for i in range(500,600)]
shape = (2, 3)
big_shape = (1200, 1200) # bigger than MXNET_KVSTORE_BIGARRAY_BOUND
kv = mx.kv.create('horovod')
my_rank = kv.rank
my_num_workers = kv.num_workers
def test_pushpull():
ctx = mx.gpu(kv.local_rank) if mx.device.num_gpus() > 0 else mx.cpu(kv.local_rank)
scale = kv.rank + 1
tensor = mx.nd.ones(shape, ctx) * scale
kv.pushpull('3', tensor)
expected = (kv.num_workers + 1) * kv.num_workers / 2
check_diff_to_scalar(tensor, expected)
print('worker ' + str(kv.local_rank) + ' passed test_pushpull')
def test_broadcast():
ctx = mx.gpu(kv.local_rank) if mx.device.num_gpus() > 0 else mx.cpu(kv.local_rank)
val = mx.nd.zeros(shape, ctx)
kv.broadcast('0', mx.nd.ones(shape), out=val)
expected = 1
check_diff_to_scalar(val, expected, kv.rank)
print('worker ' + str(kv.local_rank) + ' passed test_broadcast')
def test_type():
assert kv.type == 'horovod'
if __name__ == "__main__":
test_type()
test_broadcast()
test_pushpull()
|
{
"content_hash": "70428c1186396fcafa2c6e9dc5527b7a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 86,
"avg_line_length": 27.081967213114755,
"alnum_prop": 0.6368038740920097,
"repo_name": "szha/mxnet",
"id": "3e2c2b98e270e7c3f5e14379af26221517e3119d",
"size": "2461",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/nightly/dist_device_sync_kvstore_horovod.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "232928"
},
{
"name": "C++",
"bytes": "9664292"
},
{
"name": "CMake",
"bytes": "159414"
},
{
"name": "Clojure",
"bytes": "622640"
},
{
"name": "Cuda",
"bytes": "1291579"
},
{
"name": "Dockerfile",
"bytes": "100732"
},
{
"name": "Groovy",
"bytes": "167421"
},
{
"name": "HTML",
"bytes": "40268"
},
{
"name": "Java",
"bytes": "205196"
},
{
"name": "Julia",
"bytes": "445413"
},
{
"name": "Jupyter Notebook",
"bytes": "3660357"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "149067"
},
{
"name": "Perl",
"bytes": "1558293"
},
{
"name": "PowerShell",
"bytes": "9244"
},
{
"name": "Python",
"bytes": "9800628"
},
{
"name": "R",
"bytes": "357982"
},
{
"name": "Raku",
"bytes": "9012"
},
{
"name": "SWIG",
"bytes": "161870"
},
{
"name": "Scala",
"bytes": "1304635"
},
{
"name": "Shell",
"bytes": "459014"
},
{
"name": "Smalltalk",
"bytes": "3497"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import ehs_esports.users.models
class Migration(migrations.Migration):
dependencies = [
('communication', '0001_initial'),
('users', '0022_user_notifications'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='notifications',
),
migrations.AddField(
model_name='user',
name='notifications',
field=models.ManyToManyField(blank=True, to='communication.NotificationMethod'),
),
migrations.AlterField(
model_name='user',
name='profile_banner',
field=models.ImageField(blank=True, upload_to=ehs_esports.users.models.generate_banner_filename, verbose_name='Profile Banner'),
),
]
|
{
"content_hash": "2a0a272f5161c09e9fbb673949f644be",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 140,
"avg_line_length": 29.413793103448278,
"alnum_prop": 0.608440797186401,
"repo_name": "ReilySiegel/ehs_esports",
"id": "0e5947ee44fc2b37baad1d27d7d6aaf6bf42598c",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ehs_esports/users/migrations/0023_auto_20161021_1626.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1932"
},
{
"name": "HTML",
"bytes": "32397"
},
{
"name": "JavaScript",
"bytes": "3106"
},
{
"name": "Python",
"bytes": "85591"
},
{
"name": "Shell",
"bytes": "4232"
}
],
"symlink_target": ""
}
|
import json
from flask import request, Response, Blueprint
from rpaas import (auth, get_manager, storage, manager, tasks, consul_manager)
from rpaas.misc import (validate_name, require_plan, ValidationError)
router = Blueprint('router', __name__, url_prefix='/router')
supported_extra_features = ['tls', 'status', 'info'] # possible values: "cname", "tls", "healthcheck"
@router.url_value_preprocessor
def add_name_prefix(endpoint, values):
if 'name' in values:
values['name'] = 'router-{}'.format(values['name'])
@router.route("/backend/<name>", methods=["GET"])
@auth.required
def get_backend(name):
try:
addr = get_manager().status(name)
except storage.InstanceNotFoundError:
return "Backend not found", 404
if addr == manager.FAILURE:
return addr, 500
if addr == manager.PENDING:
addr = ""
return Response(response=json.dumps({"address": addr}), status=200,
mimetype="application/json")
@router.route("/backend/<name>", methods=["POST"])
@auth.required
def add_backend(name):
try:
validate_name(name)
except ValidationError as e:
return str(e), 400
data = request.get_json()
if not data:
return "could not decode body json", 400
team = data.get('team') or data.get('tsuru.io/app-teamowner')
plan = data.get('plan')
flavor = data.get('flavor')
if not team:
return "team name is required", 400
if require_plan() and not plan:
return "plan is required", 400
try:
if flavor:
get_manager().new_instance(name, team=team,
plan_name=plan, flavor_name=flavor)
else:
get_manager().new_instance(name, team=team, plan_name=plan)
except storage.PlanNotFoundError:
return "Plan not found", 404
except storage.FlavorNotFoundError:
return "Flavor not found", 404
except storage.DuplicateError:
return "{} backend already exists".format(name), 409
except manager.QuotaExceededError as e:
return str(e), 403
return "", 201
@router.route("/backend/<name>", methods=["PUT"])
@auth.required
def update_backend(name):
data = request.get_json()
if not data:
return "could not decode body json", 400
plan = data.get('plan')
flavor = data.get('flavor')
scale = data.get('scale')
if not plan and not flavor and not scale:
return "Invalid option. Valid update options are: scale, flavor and plan", 400
try:
if scale and int(scale) <= 0:
raise ValueError
get_manager().update_instance(name, plan, flavor)
if scale:
get_manager().scale_instance(name, scale)
except tasks.NotReadyError as e:
return "Backend not ready: {}".format(e), 412
except storage.InstanceNotFoundError:
return "Backend not found", 404
except storage.PlanNotFoundError:
return "Plan not found", 404
except storage.FlavorNotFoundError:
return "Flavor not found", 404
except ValueError:
return "Scale option should be integer and >0", 400
return "", 204
@router.route("/backend/<name>", methods=["DELETE"])
@auth.required
def delete_backend(name):
try:
get_manager().remove_instance(name)
except storage.InstanceNotFoundError:
return "Backend not found", 404
except consul_manager.InstanceAlreadySwappedError:
return "Instance with swap enabled", 412
return "", 200
@router.route("/backend/<name>/routes", methods=["GET"])
@auth.required
def list_routes(name):
try:
routes = get_manager().list_upstreams(name, name)
routes = ["http://{}".format(route) for route in routes]
except tasks.NotReadyError as e:
return "Backend not ready: {}".format(e), 412
except storage.InstanceNotFoundError:
return "Backend not found", 404
return Response(response=json.dumps({"addresses": list(routes)}), status=200,
mimetype="application/json")
@router.route("/backend/<name>/routes", methods=["POST"])
@auth.required
def add_routes(name):
data = request.get_json()
if not data:
return "could not decode body json", 400
addresses = data.get('addresses')
if not addresses:
return "", 200
m = get_manager()
try:
m.bind(name, name, router_mode=True)
m.add_upstream(name, name, addresses, True)
except tasks.NotReadyError as e:
return "Backend not ready: {}".format(e), 412
except storage.InstanceNotFoundError:
return "Backend not found", 404
return "", 200
# TODO: wait nginx reload and report status?
@router.route("/backend/<name>/status", methods=["GET"])
@auth.required
def status(name):
node_status = get_manager().node_status(name)
status = []
for node in node_status:
status.append("{} - {}: {}".format(node, node_status[node]['address'], node_status[node]['status']))
node_status = {}
node_status['status'] = "\n".join(status)
return Response(response=json.dumps(node_status), status=200,
mimetype="application/json")
@router.route("/info", methods=["GET"])
@auth.required
def info():
plans = get_manager().storage.list_plans()
flavors = get_manager().storage.list_flavors()
options_plans = ["{} - {}".format(p.name, p.description) for p in plans]
options_flavors = ["{} - {}".format(f.name, f.description) for f in flavors]
options = """
scale - number of instance vms
plan - set instance to plan
flavor - set instance to flavor
"""
if options_plans:
options = options + "\nAvailable plans: \n" + "\n".join(options_plans)
if options_plans:
options = options + "\n"
if options_flavors:
options = options + "\nAvailable flavors: \n" + "\n".join(options_flavors)
return Response(response=json.dumps({'Router options': options}), status=200,
mimetype="application/json")
@router.route("/backend/<name>/routes/remove", methods=["POST"])
@auth.required
def delete_routes(name):
data = request.get_json()
if not data:
return "could not decode body json", 400
addresses = data.get('addresses')
if not addresses:
return "", 200
m = get_manager()
try:
m.remove_upstream(name, name, addresses)
routes = m.list_upstreams(name, name)
if len(routes) < 1:
m.unbind(name)
except tasks.NotReadyError as e:
return "Backend not ready: {}".format(e), 412
except storage.InstanceNotFoundError:
return "Backend not found", 404
return "", 200
# TODO: wait nginx reload and report status?
@router.route("/backend/<name>/swap", methods=["POST"])
@auth.required
def swap(name):
data = request.get_json()
if not data:
return "Could not decode body json", 400
if data.get('cnameOnly'):
return "Swap cname only not supported", 400
target_instance = data.get('target')
if not target_instance:
return "Target instance cannot be empty", 400
m = get_manager()
try:
m.swap(name, "router-{}".format(target_instance))
except tasks.NotReadyError as e:
return "Backend not ready: {}".format(e), 412
except storage.InstanceNotFoundError:
return "Backend not found", 404
except consul_manager.InstanceAlreadySwappedError:
return "Instance already swapped", 412
return "", 200
@router.route("/backend/<name>/certificate/<cname>", methods=["GET"])
@auth.required
def get_certificate(name, cname):
m = get_manager()
try:
certificate, _ = m.get_certificate(name)
except storage.InstanceNotFoundError:
return "Backend not found", 404
except consul_manager.CertificateNotFoundError:
return "Certificate not found", 404
return Response(response=json.dumps({'certificate': certificate}),
status=200, mimetype="application/json")
@router.route("/backend/<name>/certificate/<cname>", methods=["PUT"])
@auth.required
def update_certificate(name, cname):
data = request.get_json()
if not data:
return "Could not decode body json", 400
certificate = data.get('certificate')
key = data.get('key')
if not key or not certificate:
return "Certificate or key is missing", 400
m = get_manager()
try:
m.update_certificate(name, certificate, key)
except storage.InstanceNotFoundError:
return "Backend not found", 404
return "", 200
@router.route("/backend/<name>/certificate/<cname>", methods=["DELETE"])
@auth.required
def delete_certificate(name, cname):
m = get_manager()
try:
m.delete_certificate(name)
except storage.InstanceNotFoundError:
return "Backend not found", 404
return "", 200
@router.route("/support/<feature>", methods=["GET"])
@auth.required
def supports(feature):
if feature in supported_extra_features:
return "", 200
return "", 404
|
{
"content_hash": "b8c24c69f1cd754d5fff40d923e3fe07",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 108,
"avg_line_length": 32.96350364963504,
"alnum_prop": 0.6391718334809566,
"repo_name": "tsuru/rpaas",
"id": "11ac8bebbad761f5dd4fa6cc3e688f2d2c8863cb",
"size": "9188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpaas/router_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1723"
},
{
"name": "Procfile",
"bytes": "18"
},
{
"name": "Python",
"bytes": "541935"
},
{
"name": "Shell",
"bytes": "2261"
}
],
"symlink_target": ""
}
|
DOCUMENTATION = '''
---
module: merge_configs
short_description: Merge ini-style configs
description:
- ConfigParser is used to merge several ini-style configs into one
options:
dest:
description:
- The destination file name
required: True
type: str
sources:
description:
- A list of files on the destination node to merge together
default: None
required: True
type: str
author: Sam Yaple
'''
EXAMPLES = '''
Merge multiple configs:
- hosts: database
tasks:
- name: Merge configs
merge_configs:
sources:
- "/tmp/config_1.cnf"
- "/tmp/config_2.cnf"
- "/tmp/config_3.cnf"
dest:
- "/etc/mysql/my.cnf"
'''
|
{
"content_hash": "b2c85971ca2bb3a874d6229cb718b946",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 71,
"avg_line_length": 20.571428571428573,
"alnum_prop": 0.6125,
"repo_name": "dardelean/kolla-ansible",
"id": "1a73e31018b01203eb7a6c9b0a8ebe28a976de40",
"size": "1317",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ansible/library/merge_configs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125843"
},
{
"name": "Ruby",
"bytes": "12188"
},
{
"name": "Shell",
"bytes": "81844"
}
],
"symlink_target": ""
}
|
from sqlalchemy.testing.requirements import SuiteRequirements
from sqlalchemy.testing import exclusions
class Requirements(SuiteRequirements):
# This class configures the sqlalchemy test suite. Oddly, it must
# be importable in the main codebase and not alongside the tests.
#
# The full list of supported settings is at
# https://github.com/zzzeek/sqlalchemy/blob/master/lib/sqlalchemy/testing/requirements.py
# This one's undocumented but appears to control connection reuse
# in the tests.
independent_connections = exclusions.open()
# We don't support these features yet, but the tests have them on
# by default.
temporary_tables = exclusions.closed()
temp_table_reflection = exclusions.closed()
time = exclusions.skip_if(lambda config: not config.db.dialect._is_v2plus,
"v1.x does not support TIME.")
time_microseconds = exclusions.skip_if(lambda config: not config.db.dialect._is_v2plus,
"v1.x does not support TIME.")
server_side_cursors = exclusions.closed()
cross_schema_fk_reflection = exclusions.closed()
# We don't do implicit casts.
date_coerces_from_datetime = exclusions.closed()
# We do not support creation of views with `SELECT *` expressions,
# which these tests use.
view_reflection = exclusions.closed()
view_column_reflection = exclusions.closed()
# The autoincrement tests assume a predictable 1-based sequence.
autoincrement_insert = exclusions.closed()
# The following features are off by default. We turn on as many as
# we can without causing test failures.
non_updating_cascade = exclusions.open()
deferrable_fks = exclusions.closed()
boolean_col_expressions = exclusions.open()
nullsordering = exclusions.open()
standalone_binds = exclusions.open()
intersect = exclusions.open()
except_ = exclusions.open()
window_functions = exclusions.open()
empty_inserts = exclusions.open()
returning = exclusions.open()
multivalues_inserts = exclusions.open()
emulated_lastrowid = exclusions.open()
dbapi_lastrowid = exclusions.open()
views = exclusions.open()
schemas = exclusions.closed()
sequences = exclusions.closed()
sequences_optional = exclusions.closed()
temporary_views = exclusions.closed()
reflects_pk_names = exclusions.open()
unicode_ddl = exclusions.open()
datetime_literals = exclusions.closed()
datetime_historic = exclusions.open()
date_historic = exclusions.open()
precision_numerics_enotation_small = exclusions.open()
precision_numerics_enotation_large = exclusions.open()
precision_numerics_many_significant_digits = exclusions.open()
precision_numerics_retains_significant_digits = exclusions.closed()
savepoints = exclusions.closed()
two_phase_transactions = exclusions.closed()
update_from = exclusions.open()
mod_operator_as_percent_sign = exclusions.open()
foreign_key_constraint_reflection = exclusions.open()
# The psycopg driver doesn't support these.
percent_schema_names = exclusions.closed()
order_by_label_with_expression = exclusions.open()
order_by_col_from_union = exclusions.open()
implicitly_named_constraints = exclusions.open()
# Mostly work, except for https://github.com/cockroachdb/cockroach/issues/28548
index_reflection = exclusions.closed()
unique_constraint_reflection = exclusions.closed()
|
{
"content_hash": "837cae3631803547b99500f646116b10",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 93,
"avg_line_length": 43.24691358024691,
"alnum_prop": 0.7108192977447901,
"repo_name": "bdarnell/cockroach-python",
"id": "e009da5f9f1653871ff7a38fb31dc0abf150d82e",
"size": "3503",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cockroachdb/sqlalchemy/test_requirements.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import os
import sys
import string
import BoostBuild
xml = "--xml" in sys.argv
toolset = BoostBuild.get_toolset()
# Clear environment for testing.
#
for s in ('BOOST_ROOT', 'BOOST_BUILD_PATH', 'JAM_TOOLSET', 'BCCROOT', 'MSVCDir',
'MSVC', 'MSVCNT', 'MINGW', 'watcom' ):
try:
del os.environ[s]
except:
pass
BoostBuild.set_defer_annotations(1)
def run_tests(critical_tests, other_tests):
"""Runs first critical tests and then other_tests.
Stops on first error, and write the name of failed test to
test_results.txt. Critical tests are run in the specified order, other
tests are run starting with the one that failed the last time.
"""
last_failed = last_failed_test()
other_tests = reorder_tests(other_tests, last_failed)
all_tests = critical_tests + other_tests
invocation_dir = os.getcwd()
pass_count = 0
failures_count = 0
for i in all_tests:
passed = 1
if not xml:
print ("%-25s : " %(i)),
try:
__import__(i)
except SystemExit:
passed = 0;
if failures_count == 0:
f = open(os.path.join(invocation_dir, 'test_results.txt'), 'w')
f.write(i)
f.close()
failures_count = failures_count + 1
# Restore the current directory, which might be changed by the test.
os.chdir(invocation_dir)
if not xml:
if passed:
print "PASSED"
else:
print "FAILED"
if i == "regression":
BoostBuild.flush_annotations()
BoostBuild.clear_annotations()
else:
rs = "succeed"
if not passed:
rs = "fail"
print """
<test-log library="build" test-name="%s" test-type="run" toolset="%s" test-program="%s" target-directory="%s">
<run result="%s">""" % (i, toolset, "tools/build/v2/test/" + i + ".py",
"boost/bin.v2/boost.build.tests/" + toolset + "/" + i, rs)
if not passed:
BoostBuild.flush_annotations(1)
print """
</run>
</test-log>
"""
if passed:
pass_count = pass_count + 1
sys.stdout.flush() # Makes testing under emacs more entertaining.
# Erase the file on success.
if failures_count == 0:
open('test_results.txt', 'w')
if not xml:
print """
=== Test summary ===
PASS: %d
FAIL: %d
""" % (pass_count, failures_count)
def last_failed_test():
"Returns the name of last failed test or None"
try:
f = open("test_results.txt")
s = string.strip(f.read())
return s
except:
return None
def reorder_tests(tests, first_test):
try:
n = tests.index(first_test)
return [first_test] + tests[:n] + tests[n+1:]
except ValueError:
return tests
critical_tests = ["unit_tests", "module_actions", "startup_v1", "startup_v2"]
critical_tests += ["core_d12", "core_typecheck", "core_delete_module",
"core_varnames", "core_import_module"]
tests = [ "absolute_sources",
"alias",
"alternatives",
"bad_dirname",
"build_dir",
"build_file",
"build_no",
"c_file",
"chain",
"clean",
"composite",
"conditionals",
"conditionals2",
"conditionals3",
"conditionals_multiple",
"configuration",
"copy_time",
"custom_generator",
"default_build",
"default_features",
# This test is known to be broken itself.
# "default_toolset",
"dependency_property",
"dependency_test",
"disambiguation",
"dll_path",
"double_loading",
"duplicate",
"example_libraries",
"example_make",
"expansion",
"explicit",
"file_name_handling",
"free_features_request",
"generator_selection",
"generators_test",
"implicit_dependency",
"indirect_conditional",
"inherit_toolset",
"inherited_dependency",
"inline",
"lib_source_property",
"library_chain",
"library_property",
"load_order",
"loop",
"make_rule",
"ndebug",
"no_type",
"notfile",
"ordered_include",
"out_of_tree",
"path_features",
"prebuilt",
"print",
"project_dependencies",
"project_glob",
"project_root_constants",
"project_root_rule",
"project_test3",
"project_test4",
"property_expansion",
"rebuilds",
"regression",
"relative_sources",
"remove_requirement",
"resolution",
"searched_lib",
"skipping",
"sort_rule",
"source_locations",
"stage",
"standalone",
"suffix",
"tag",
"test_result_dumping",
"testing_support",
"timedata",
"unit_test",
"use_requirements",
"using",
"wrapper",
"wrong_project",
"exit_status",
]
if os.name == 'posix':
tests.append("symlink")
# On windows, library order is not important, so skip this test. Besides, it
# fails ;-). Further, the test relies on the fact that on Linux, one can
# build a shared library with unresolved symbols. This is not true on
# Windows (even with cygwin gcc).
if string.find(os.uname()[0], "CYGWIN") == -1:
tests.append("library_order")
if string.find(BoostBuild.get_toolset(), 'gcc') == 0:
tests.append("gcc_runtime")
if ( string.find(BoostBuild.get_toolset(), 'gcc') == 0 )or \
( string.find(BoostBuild.get_toolset(), 'msvc') == 0 ):
tests.append("pch")
if "--extras" in sys.argv:
tests.append("boostbook")
tests.append("qt4")
tests.append("example_qt4")
# Requires ./whatever.py to work, so is not guaranted to work everywhere.
tests.append("example_customization")
# Requires gettext tools.
tests.append("example_gettext")
elif not xml:
print 'Note: skipping extra tests'
run_tests(critical_tests, tests)
|
{
"content_hash": "f4bb4cac718683ddf935aafbbb47a8f3",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 110,
"avg_line_length": 27.70689655172414,
"alnum_prop": 0.5294026135656503,
"repo_name": "basho-labs/riak-cxx-client",
"id": "59476bd7404a0634930205171c9eb215dfea4c23",
"size": "6686",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "deps/boost-1.47.0/tools/build/v2/test/test_all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4509"
},
{
"name": "C++",
"bytes": "130887"
},
{
"name": "Protocol Buffer",
"bytes": "7146"
},
{
"name": "Shell",
"bytes": "326442"
}
],
"symlink_target": ""
}
|
import time, sys, signal, atexit
import pyupm_my9221 as upmGroveCircularLED
# Exit handlers
def SIGINTHandler(signum, frame):
raise SystemExit
def exitHandler():
circle.setLevel(0, True)
print "Exiting"
sys.exit(0)
# This function lets you run code on exit
atexit.register(exitHandler)
# This function stops python from printing a stacktrace when you hit control-C
signal.signal(signal.SIGINT, SIGINTHandler)
# Instantiate a Grove Circular LED on gpio pins 9 and 8
circle = upmGroveCircularLED.GroveCircularLED(9, 8)
level = 0
while(1):
circle.setSpinner(level)
level = (level + 1) % 24
time.sleep(.1)
|
{
"content_hash": "6fa858e88bbe3a890f3fa11abd5a7461",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 24.192307692307693,
"alnum_prop": 0.7472178060413355,
"repo_name": "kissbac/upm",
"id": "1c5a485d92f46b11658f197cea4dc3ac89c96139",
"size": "1790",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/python/grovecircularled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1272641"
},
{
"name": "C++",
"bytes": "2473960"
},
{
"name": "CMake",
"bytes": "79260"
},
{
"name": "CSS",
"bytes": "18714"
},
{
"name": "HTML",
"bytes": "33016"
},
{
"name": "JavaScript",
"bytes": "47971"
},
{
"name": "Python",
"bytes": "32304"
}
],
"symlink_target": ""
}
|
class Solution:
# @param root, a tree node
# @return a boolean
def Height(self, root):
if root == None:
return 0
return max(self.Height(root.left), self.Height(root.right)) + 1
def isBalanced(self, root):
if root == None:
return True
if abs(self.Height(root.left) - self.Height(root.right)) <= 1:
return self.isBalanced(root.left) and self.isBalanced(root.right)
else:
return False
|
{
"content_hash": "610f48ca5770c1a75058999462d6c33a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 32.46666666666667,
"alnum_prop": 0.5728952772073922,
"repo_name": "Chasego/codi",
"id": "2e7e70aba9d5e43002f8c725c40d796c206e25b2",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/110-Balanced-Binary-Tree/BalancedBinTree_001.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9684"
},
{
"name": "HTML",
"bytes": "48035"
},
{
"name": "Java",
"bytes": "102385"
},
{
"name": "JavaScript",
"bytes": "2001"
},
{
"name": "Python",
"bytes": "284844"
}
],
"symlink_target": ""
}
|
{
"name" : "pdf report support for your language",
"version" : "2.1.1",
"author" : "Shine IT",
"maintainer":"jeff@openerp.cn",
"website": "http://www.openerp.cn",
"description": u"""
pdf report support for your language
=====================================
Fonts defined in the default report may not support characters
in your language, which may cause jarbled characters in the printed
pdf report.
This addon will solve abovementioned issue elegently by using openerp
customfonts API to replace the original fonts with your seleted fonts.
1) Put your font to your font`s directory (Eg. 'HOME/yourname/fonts')
2) Click the link on the line of 'Configure fonts mapping for pdf report' (Settings/ Configuration/ General Settings/)
You can found your fonts mapping in Settings/Technical/Parameters/System Parameters/fonts_map
More Detail(中文安装指南): http://cn.openerp.cn/openerp_v7_oecn_base_fonts/
by shineit<contact@openerp.cn>""",
"depends" : ["base",'base_setup'],
"category" : "Generic Modules/Base",
"demo_xml" : [],
"update_xml" : [
"oecn_font_installer.xml",
"res_config_view.xml",
],
"license": "GPL-3",
"active": False,
"installable": True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "ed1dfe2d5a3fdcf91564ce66307c332e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 118,
"avg_line_length": 33.282051282051285,
"alnum_prop": 0.6741140215716487,
"repo_name": "chjw8016/GreenOdoo7-haibao",
"id": "f0e01ac7560ce4bca3d0849f15fed1bb5af61d76",
"size": "2211",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "openerp/addons/oecn_base_fonts/__openerp__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "90846"
},
{
"name": "CSS",
"bytes": "384369"
},
{
"name": "JavaScript",
"bytes": "1730589"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9394626"
},
{
"name": "Shell",
"bytes": "5172"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
}
|
"""
Compare spawn to spawn_n, among other things.
This script will generate a number of "properties" files for the Hudson plot plugin
Profiling and graphs
====================
You can profile this program and obtain a call graph with `gprof2dot` and `graphviz`:
```
python -m cProfile -o output.pstats path/to/this/script arg1 arg2
gprof2dot.py -f pstats output.pstats | dot -Tpng -o output.png
```
It generates a graph where a node represents a function and has the following layout:
```
+------------------------------+
| function name |
| total time % ( self time % ) |
| total calls |
+------------------------------+
```
where:
* total time % is the percentage of the running time spent in this function and all its children;
* self time % is the percentage of the running time spent in this function alone;
* total calls is the total number of times this function was called (including recursive calls).
An edge represents the calls between two functions and has the following layout:
```
total time %
calls
parent --------------------> children
```
where:
* total time % is the percentage of the running time transfered from the children to this parent (if available);
* calls is the number of calls the parent function called the children.
"""
import os
import evy
import benchmarks
DATA_DIR = 'plot_data'
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
def write_result (filename, best):
fd = open(os.path.join(DATA_DIR, filename), 'w')
fd.write('YVALUE=%s' % best)
fd.close()
def cleanup ():
evy.sleep(0.2)
iters = 10000
best = benchmarks.measure_best(5, iters,
'pass',
cleanup,
evy.sleep)
write_result('evy.sleep_main', best[evy.sleep])
gt = evy.spawn(benchmarks.measure_best, 5, iters,
'pass',
cleanup,
evy.sleep)
best = gt.wait()
write_result('evy.sleep_gt', best[evy.sleep])
def dummy (i = None):
return i
def run_spawn ():
evy.spawn(dummy, 1)
def run_spawn_n ():
evy.spawn_n(dummy, 1)
def run_spawn_n_kw ():
evy.spawn_n(dummy, i = 1)
best = benchmarks.measure_best(5, iters,
'pass',
cleanup,
run_spawn_n,
run_spawn,
run_spawn_n_kw)
write_result('evy.spawn', best[run_spawn])
write_result('evy.spawn_n', best[run_spawn_n])
write_result('evy.spawn_n_kw', best[run_spawn_n_kw])
pool = None
def setup ():
global pool
pool = evy.GreenPool(iters)
def run_pool_spawn ():
pool.spawn(dummy, 1)
def run_pool_spawn_n ():
pool.spawn_n(dummy, 1)
def cleanup_pool ():
pool.waitall()
best = benchmarks.measure_best(3, iters,
setup,
cleanup_pool,
run_pool_spawn,
run_pool_spawn_n,
)
write_result('evy.GreenPool.spawn', best[run_pool_spawn])
write_result('evy.GreenPool.spawn_n', best[run_pool_spawn_n])
|
{
"content_hash": "3e3e2a025f3a6fd85175d7348337e10a",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 114,
"avg_line_length": 24.425373134328357,
"alnum_prop": 0.5554537121906508,
"repo_name": "inercia/evy",
"id": "a1b097d8df9d484725d3f3192b6dce0bbfb68731",
"size": "3295",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "benchmarks/spawn_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "948569"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
}
|
import os
"""Handy utils for config"""
def contents(*names):
"""Return string contents from first matching named environment variable
or file.
Each name in names is checked first against an environment variable then
a file. An Exception is raised if nothing matches.
"""
for name in names:
if name in os.environ:
return os.environ[name]
else:
name = os.path.expanduser(name)
if os.path.isfile(name):
with open(name) as src:
return src.read().strip()
raise Exception("Unresolved content: "+', '.join(names))
|
{
"content_hash": "f06b71caa491c319795faa814885065b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 28.545454545454547,
"alnum_prop": 0.60828025477707,
"repo_name": "gregelin/demo-allauth-bootstrap",
"id": "18c6717ff675131fba0a46d6915d2c4243b1652d",
"size": "628",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "allauthdemo/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2581"
},
{
"name": "HTML",
"bytes": "24709"
},
{
"name": "Makefile",
"bytes": "375"
},
{
"name": "Python",
"bytes": "26380"
}
],
"symlink_target": ""
}
|
from django.test import SimpleTestCase
from corehq.apps.app_manager.exceptions import CaseXPathValidationError
from corehq.apps.app_manager.xpath import (
dot_interpolate,
UserCaseXPath,
interpolate_xpath,
)
class RegexTest(SimpleTestCase):
def test_regex(self):
replacement = "@case_id stuff"
cases = [
('./lmp < 570.5', '%s/lmp < 570.5'),
('stuff ./lmp < 570.', 'stuff %s/lmp < 570.'),
('.53 < hello.', '.53 < hello%s'),
]
for case in cases:
self.assertEqual(
dot_interpolate(case[0], replacement),
case[1] % replacement
)
def test_interpolate_xpath(self):
replacements = {
'case': "<casedb stuff>",
'user': UserCaseXPath().case(),
'session': "instance('commcaresession')/session",
}
cases = [
('./lmp < 570.5', '{case}/lmp < 570.5'),
('#case/lmp < 570.5', '{case}/lmp < 570.5'),
('stuff ./lmp < 570.', 'stuff {case}/lmp < 570.'),
('stuff #case/lmp < 570.', 'stuff {case}/lmp < 570.'),
('.53 < hello.', '.53 < hello{case}'),
('.53 < hello#case', '.53 < hello{case}'),
('#session/data/username', '{session}/data/username'),
('"jack" = #session/username', '"jack" = {session}/username'),
('./@case_id = #session/userid', '{case}/@case_id = {session}/userid'),
('#case/@case_id = #user/@case_id', '{case}/@case_id = {user}/@case_id'),
('#host/foo = 42', "instance('casedb')/casedb/case[@case_id={case}/index/host]/foo = 42"),
("'ham' = #parent/spam", "'ham' = instance('casedb')/casedb/case[@case_id={case}/index/parent]/spam"),
]
for case in cases:
self.assertEqual(
interpolate_xpath(case[0], replacements['case']),
case[1].format(**replacements)
)
def test_interpolate_xpath_error(self):
for case in ('./lmp < 570.5', '#case/lmp < 570.5'):
with self.assertRaises(CaseXPathValidationError):
interpolate_xpath(case, None),
|
{
"content_hash": "f4419ae47063f2823b419fa923fa418b",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 114,
"avg_line_length": 40.51851851851852,
"alnum_prop": 0.5137111517367459,
"repo_name": "qedsoftware/commcare-hq",
"id": "2c56579973ec94b0c41057b556d1568815da7085",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/app_manager/tests/test_suite_regex.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
from undine.client.connector.base_connector import BaseConnector
from undine.database.mariadb import MariaDbConnector as MariaDB
class MariaDbConnector(BaseConnector):
__QUERY = {
'mission_mid': '''
SELECT mid FROM mission WHERE name = %(name)s
''',
'mission_list': '''
SELECT mid, name, email,
ready, issued, done, canceled, failed,
issued_at AS issued
FROM mission_dashboard %s
ORDER BY complete, issued_at DESC
''',
'mission_info': '''
SELECT mid, name, email, description, issued
FROM mission %s
ORDER BY issued ASC
''',
'task_list': '''
SELECT tid, name, host, ip, state, issued, updated,
IF(reportable = TRUE, 'true', 'false') AS reportable
FROM task_list %s
ORDER BY issued ASC
''',
'task_info': '''
SELECT t.tid AS tid, t.name AS name,
t.host AS host, t.ip AS ip, s.name AS state,
t.mid AS mid,
t.cid AS cid, t.iid AS iid, t.wid AS wid,
t.issued, t.updated,
IF(t.reportable = TRUE, 'true', 'false') AS reportable,
IF(r.content IS NOT NULL, r.content, '-') AS result,
IF(r.reported IS NOT NULL, r.reported, '-') AS succeed,
IF(e.message IS NOT NULL, e.message, '-') AS error,
IF(e.informed IS NOT NULL, e.informed, '-') AS failed
FROM task AS t
JOIN state_type AS s ON t.state = s.state
LEFT JOIN result r ON t.tid = r.tid
LEFT JOIN error e ON t.tid = e.tid
WHERE t.tid = %(tid)s
''',
'config_info': '''
SELECT cid AS cid, name, config, issued
FROM config
WHERE cid = %(cid)s
''',
'input_info': '''
SELECT iid AS iid, name, items, issued
FROM input %s
ORDER BY issued
''',
'worker_info': '''
SELECT wid AS wid, name, command, arguments, worker_dir, issued
FROM worker %s
ORDER BY issued
''',
'host_list': '''
SELECT name, ip, issued, canceled, failed,
registered, logged_in, logged_out, state
FROM host_list
''',
'tid_list': '''
SELECT tid FROM task %s
''',
# Trashing task information.
'trash_result': '''
INSERT INTO trash (tid, generated, category, content, trashed)
SELECT tid, reported, 'result', content, CURRENT_TIMESTAMP FROM result
WHERE tid IN (%s)
''',
'trash_error': '''
INSERT INTO trash (tid, generated, category, content, trashed)
SELECT tid, informed, 'error', message, CURRENT_TIMESTAMP FROM error
WHERE tid IN (%s)
''',
'delete_result': '''
DELETE FROM result WHERE tid IN (%s)
''',
'delete_error': '''
DELETE FROM error WHERE tid IN (%s)
''',
'cancel_task': '''
UPDATE task
SET state = 'C', host = '', ip = '', updated = CURRENT_TIMESTAMP
WHERE tid IN (%s)
''',
# Remove task related information.
'delete_trash': '''
DELETE FROM trash WHERE tid IN (%s)
''',
'delete_task': '''
DELETE FROM task WHERE tid IN (%s)
''',
'delete_mission': '''
DELETE FROM mission WHERE mid = %(mid)s
''',
# Retry task
'retry_task': '''
UPDATE task
SET state = 'R', host = '', ip = '', updated = CURRENT_TIMESTAMP
WHERE tid IN (%s)
'''
}
_WhereItem = namedtuple('_WhereItem', ['clause', 'format'])
_WHERE_CLAUSE = {
'mid': _WhereItem('mid = %(mid)s', '{}'),
'tid': _WhereItem('tid = %(tid)s', '{}'),
'cid': _WhereItem('cid = %(cid)s', '{}'),
'iid': _WhereItem('iid = %(iid)s', '{}'),
'wid': _WhereItem('wid = %(wid)s', '{}'),
'name': _WhereItem('name LIKE %(name)s', '%{}%'),
'host': _WhereItem('host LIKE %(host)s', '%{}%'),
'email': _WhereItem('email LIKE %(email)s', '%{}%'),
'state': _WhereItem('state = %(state)s', '{}'),
'reportable': _WhereItem('reportable = %(reportable)s', None)
}
#
# Constructor & Destructor
#
def __init__(self, config):
super(MariaDbConnector, self).__init__(config)
self._db = MariaDB(self._db_config)
#
# Private methods
#
@staticmethod
def __value(form, value):
return value if not form else form.format(value)
@staticmethod
def __where(condition):
return 'WHERE ' + ' AND '.join(condition) if condition else ''
def __query(self, template, **kwargs):
where = list()
params = dict()
for k, v in kwargs.items():
if k in self._WHERE_CLAUSE:
where.append(self._WHERE_CLAUSE[k].clause)
params[k] = self.__value(self._WHERE_CLAUSE[k].format, v)
where = self.__where(where)
return dict(query=self.__QUERY[template] % where, **params)
def __manipulate_task(self, tasks, operations):
where = ", ".join(("%s",) * len(tasks))
self._db.execute_multiple_dml([
self._db.sql(self.__QUERY[operation] % where, *tasks)
for operation in operations
])
#
# Inherited methods
#
def _get_mid(self, name):
item = self._db.fetch_a_tuple(self.__QUERY['mission_mid'], name=name)
return item[0] if item else None
def _get_tid_list(self, **kwargs):
query_set = self.__query('tid_list', **kwargs)
return [item[0] for item in self._db.fetch_all_tuples(**query_set)]
def _mission_info(self, **kwargs):
return self._db.fetch_all_tuples(**self.__query('mission_info',
**kwargs))
def _task_list(self, **kwargs):
return self._db.fetch_all_tuples(**self.__query('task_list', **kwargs))
def _task_info(self, tid):
return self._db.fetch_a_tuple(self.__QUERY['task_info'], tid=tid)
def _config_info(self, cid):
return self._db.fetch_a_tuple(self.__QUERY['config_info'], cid=cid)
def _input_info(self, iid):
return self._db.fetch_a_tuple(**self.__query('input_info', iid=iid))
def _worker_info(self, wid):
return self._db.fetch_a_tuple(**self.__query('worker_info', wid=wid))
def _cancel_tasks(self, *tasks):
# TODO Check item list size. This individual tid issue mechanism can
# fail because of their list size. If this mechanism is not efficient,
# add new operation using mid.
operations = ('cancel_task',
'trash_result', 'trash_error',
'delete_result', 'delete_error')
self.__manipulate_task(tasks, operations)
def _drop_tasks(self, *tasks):
# TODO Check item list size. This individual tid issue mechanism can
# fail because of their list size. If this mechanism is not efficient,
# add new operation using mid.
operations = ('delete_trash',
'delete_result', 'delete_error',
'delete_task')
self.__manipulate_task(tasks, operations)
def _drop_mission(self, mid):
self._db.execute_single_dml(self.__QUERY['delete_mission'], mid=mid)
def _rerun_tasks(self, *tasks):
# TODO Check item list size. This individual tid issue mechanism can
# fail because of their list size. If this mechanism is not efficient,
# add new operation using mid.
operations = ('trash_result', 'trash_error',
'delete_result', 'delete_error',
'retry_task')
self.__manipulate_task(tasks, operations)
def mission_list(self, list_all=False):
where = 'WHERE complete = FALSE' if not list_all else ''
return self._db.fetch_all_tuples(self.__QUERY['mission_list'] % where)
def input_list(self):
return self._db.fetch_all_tuples(**self.__query('input_info'))
def worker_list(self):
return self._db.fetch_all_tuples(**self.__query('worker_info'))
def host_list(self):
return self._db.fetch_all_tuples(self.__QUERY['host_list'])
|
{
"content_hash": "837257e685269c7e164b4d0ddd08ea55",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 34.6219512195122,
"alnum_prop": 0.5312903604555594,
"repo_name": "Sungup/Undine",
"id": "5bf82666f79fd24ce266b953da23527be05e1291",
"size": "8517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "undine/client/connector/mariadb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147048"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class SearchSort(Model):
"""The sort parameters for search.
:param name: The name of the field the search query is sorted on.
:type name: str
:param order: The sort order of the search. Possible values include:
'asc', 'desc'
:type order: str or :class:`SearchSortEnum
<azure.mgmt.loganalytics.models.SearchSortEnum>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'str'},
}
def __init__(self, name=None, order=None):
self.name = name
self.order = order
|
{
"content_hash": "c31ffa57507df55b944b1dcc3547d655",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 28.40909090909091,
"alnum_prop": 0.608,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "15814e6e38a874e22146949dd84c0baef7196207",
"size": "1099",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_sort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import glob
import os
import shutil
import subprocess
import time
from threading import Thread
from config import config
from database import Database
from models import ComponentOptions, LayoutOptions
from models import ConvertRequest, ConversionStatus
FORMAT = {
'escher': 'Escher',
'sbml': 'SBML',
'sbgn': 'SBGN'
}
EXTENSION = {
'escher': '.escher.json',
'sbml': '.sbml.xml',
'sbgn': '.sbgn.xml'
}
class EscherConverter(object):
"""
Controls the execution of the JAR and actual conversion.
"""
jar_path = config['JAR_PATH']
file_store = config['FILE_STORE']
common_command = ['java', '-jar', jar_path, '--gui=false', '--log-level=FINEST']
def __init__(self, options: ConvertRequest):
self.command = EscherConverter.common_command
self.db = Database(config['SQLITE_FILE'], config['DEBUG'])
self.db.renew()
self.options = self.db.retrieve(options.id)
if not isinstance(options, ConvertRequest):
raise TypeError("Options must be of type 'ConvertRequest'")
if options.output_format is None:
raise ValueError("output_format is required")
else:
self.command.append("--format=" + FORMAT[str(options.output_format.value)])
input_file = "--input=" + EscherConverter.file_store + str(options.id) + "/input/"
self.command.append(input_file)
os.makedirs(EscherConverter.file_store + str(options.id) + "/output/")
self.command.append("--output=" + EscherConverter.file_store + str(options.id) + "/output/")
self.command.append("--log-file=" + EscherConverter.file_store + str(options.id) +
"/conversion.log")
self.add_compartment_options(options.component_options)
self.add_layout_option(options.layout_options)
def add_compartment_options(self, component_options: ComponentOptions):
"""
Add component options to the command.
:param component_options:
:return:
"""
pass
def add_layout_option(self, layout_options: LayoutOptions):
"""
Add layout options to the command.
:param layout_options:
:return:
"""
pass
def convert(self):
"""
Calls the internal _convert() in a separate thread.
:return:
"""
thread = Thread(target=self._convert, args=[self.options.id])
thread.start()
def _convert(self, id):
"""
Invokes the executable JAR and updates the database.
:param id:
:return:
"""
db = Database(config['SQLITE_FILE'], config['DEBUG'])
db.renew()
options = db.retrieve(id)
status_code = subprocess.call(self.command)
req_id = options.id
for file in glob.glob(config['FILE_STORE'] + str(req_id) + '/output/input/*'):
shutil.move(file, config['FILE_STORE'] + str(req_id) + '/output/')
os.rmdir(config['FILE_STORE'] + str(req_id) + '/output/input/')
if status_code == 0:
options.status = ConversionStatus.completed
else:
options.status = ConversionStatus.failed
options.completion_date = int(time.time())
db.update()
db.finalize()
|
{
"content_hash": "1b53aa15e96f375d6625a3b56a331e86",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 100,
"avg_line_length": 33.53061224489796,
"alnum_prop": 0.6062081558125381,
"repo_name": "SBRG/EscherConverter",
"id": "fcdc7c05c891eff08d34c813ce31bf3cdda4b168",
"size": "3286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/escherconverter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1900"
},
{
"name": "CoffeeScript",
"bytes": "969"
},
{
"name": "HTML",
"bytes": "6614"
},
{
"name": "Java",
"bytes": "346328"
},
{
"name": "JavaScript",
"bytes": "13376"
},
{
"name": "Python",
"bytes": "27543"
},
{
"name": "Shell",
"bytes": "89"
},
{
"name": "XSLT",
"bytes": "121039"
}
],
"symlink_target": ""
}
|
__version__ = 'DEVELOPMENT'
|
{
"content_hash": "654819b021dd103c127fe049d7080581",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.6428571428571429,
"repo_name": "knipknap/telemetric",
"id": "e6b3c12a3309b1d4e82c99050d417f90f8c10da3",
"size": "77",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "telemetric/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1401"
},
{
"name": "Python",
"bytes": "32106"
},
{
"name": "Shell",
"bytes": "1420"
}
],
"symlink_target": ""
}
|
import pytest
import nnabla as nn
import nnabla.parametric_functions as PF
import numpy as np
from nnabla.testing import assert_allclose
def ref_all_gather(x_data, n_devices):
results = []
for i in range(n_devices):
results.append(x_data * i)
return results
@pytest.mark.parametrize("seed", [313])
def test_all_gather(seed, comm_nccl_opts):
if comm_nccl_opts is None:
pytest.skip(
"Communicator test is disabled. You can turn it on by an option `--test-communicator`.")
if len(comm_nccl_opts.devices) < 2:
pytest.skip(
"Communicator test is disabled. Use more than 1 gpus.")
comm = comm_nccl_opts.comm
device_id = int(comm_nccl_opts.device_id)
n_devices = len(comm_nccl_opts.devices)
# Variables
rng = np.random.RandomState(seed)
x_data = rng.rand(3, 4)
x = nn.Variable(x_data.shape)
x.d = x_data * device_id
y_list = []
for i in range(n_devices):
y = nn.Variable(x_data.shape)
y_list.append(y)
# AllGahter
comm.all_gather(x.data, [y.data for y in y_list])
# Ref
refs = ref_all_gather(x_data, n_devices)
# Check
for y, ref in zip(y_list, refs):
assert_allclose(y.d, ref, rtol=1e-3, atol=1e-6)
|
{
"content_hash": "338cdd7d14e4f782995cf0e4fedda19b",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 100,
"avg_line_length": 27.347826086956523,
"alnum_prop": 0.6287758346581876,
"repo_name": "sony/nnabla",
"id": "fef653117adac920e5e55bdeb2a5fe6300e71bb7",
"size": "1855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/communicator/test_all_gather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
}
|
from mio.utils import Null
def test_repr1(mio):
mio.eval("f = block(State clone())")
assert repr(mio.eval("f()")) == "NormalState()"
def test_repr2(mio):
mio.eval("f = block(State clone() setBreak())")
assert repr(mio.eval("f()")) == "BreakState()"
def test_repr3(mio):
mio.eval("f = block(State clone() setContinue())")
assert repr(mio.eval("f()")) == "ContinueState()"
def test_repr4(mio):
mio.eval("f = block(State clone() setReturn())")
assert repr(mio.eval("f()")) == "ReturnState()"
def test_repr5(mio):
mio.eval("f = block(State clone() setReturn(\"foo\"))")
assert repr(mio.eval("f()")) == "ReturnState(u\"foo\")"
def test_reset(mio):
mio.eval("f = block(State clone() setReturn(\"foo\"))")
state = mio.eval("f()")
assert state.isReturn
assert state.value == "foo"
assert repr(state) == "ReturnState(u\"foo\")"
state.reset()
assert not state.isReturn
assert state.value is Null
assert repr(state) == "NormalState()"
def test_state_stop(mio):
mio.eval("f = block(State clone() setReturn(\"foo\"))")
assert mio.eval("f() stop()")
def test_state_invalid(mio):
assert mio.eval("state setContinue() is state")
def test_state_invalid2(mio):
assert mio.eval("state setReturn() is state")
def test_state_invalid3(mio):
assert mio.eval("state setBreak() is state")
|
{
"content_hash": "c74625f502b0278fd06eab5ad71f1b9d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 59,
"avg_line_length": 24.660714285714285,
"alnum_prop": 0.6198406951484432,
"repo_name": "prologic/mio",
"id": "bd4f2d625e988f34b172ef723e05e8e7023e7b06",
"size": "1381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/test_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15260"
},
{
"name": "Python",
"bytes": "191874"
},
{
"name": "Shell",
"bytes": "5303"
},
{
"name": "VimL",
"bytes": "1477"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
from django.utils.crypto import get_random_string
def create_settings():
"""
Creates a new learningprogress_settings.py file in the base directory
if inexistent.
"""
base_dir = os.path.dirname(__file__)
if not os.path.exists(os.path.join(base_dir, 'learningprogress_settings.py')):
default_settings_path = os.path.join(base_dir, 'learningprogress', 'default_settings.py')
with open(default_settings_path) as default_settings:
key = get_random_string(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
settings = re.sub(
r"SECRET_KEY = ''",
"SECRET_KEY = '%s'" % key,
default_settings.read())
with open(os.path.join(base_dir, 'learningprogress_settings.py'), 'w') as new_settings:
new_settings.write(settings)
if __name__ == '__main__':
create_settings()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'learningprogress_settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "dd3ff991deaf25cc4921ab868365e088",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 99,
"avg_line_length": 34.24242424242424,
"alnum_prop": 0.6371681415929203,
"repo_name": "normanjaeckel/LearningProgress",
"id": "7edeb1ec185e4b33d2dd47df2d1b568f0d7e5426",
"size": "1130",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "92326"
},
{
"name": "HTML",
"bytes": "232511"
},
{
"name": "JavaScript",
"bytes": "11550"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "74296"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
first_name = models.CharField(_('first name'), max_length=30)
last_name = models.CharField(_('last name'), max_length=30)
biography = models.TextField(_('biography'), blank=True)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
|
{
"content_hash": "4c09da44a1130cc793a7584862939dd6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 65,
"avg_line_length": 32.13333333333333,
"alnum_prop": 0.7053941908713693,
"repo_name": "barszczmm/django-wpadmin",
"id": "1d4b3581539ac8680cff49bb6b940c584042650f",
"size": "482",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test_project/apps/authors/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51789"
},
{
"name": "HTML",
"bytes": "39814"
},
{
"name": "JavaScript",
"bytes": "5244"
},
{
"name": "Python",
"bytes": "45582"
},
{
"name": "Shell",
"bytes": "742"
}
],
"symlink_target": ""
}
|
import pycurl
import StringIO
import urllib
import logging
from lib.msg.RawSerialize import RawSerialize
logger = logging.getLogger(__name__)
def authenticate(user, password, ip, port, realm=""):
c = pycurl.Curl()
content = StringIO.StringIO()
if "" == realm:
url = 'http://%s:%s/openam/json/authenticate' % (ip, port)
else:
url = 'http://%s:%s/openam/json/%s/authenticate' % (ip, port, realm)
post_data_dict = {}
c.setopt(pycurl.URL, url)
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, urllib.urlencode(post_data_dict) )
c.setopt(pycurl.HTTPHEADER,['X-OpenAM-Username: %s'%user.encode('ascii','ignore'),'X-OpenAM-Password: %s'%password,'Content-Type: application/json'])
c.setopt(pycurl.WRITEFUNCTION, content.write)
c.setopt(pycurl.CONNECTTIMEOUT, 30)
c.setopt(pycurl.TIMEOUT, 30)
ret = ""
try:
ret = c.perform()
except Exception,e:
logger.info(e)
c.close()
return ""
c.close()
cont = content.getvalue()
return cont
def logout(tokenid, ip, port, realm=""):
c = pycurl.Curl()
content = StringIO.StringIO()
if "" == realm:
url = 'http://%s:%s/openam/json/sessions/?_action=logout' % (ip, port)
else:
url = 'http://%s:%s/openam/json/%s/sessions/?_action=logout' % (ip, port, realm)
post_data_dict = {}
c.setopt(pycurl.URL, url)
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, urllib.urlencode(post_data_dict) )
c.setopt(pycurl.HTTPHEADER,['iplanetDirectoryPro: %s'%tokenid,'Content-Type: application/json'])
c.setopt(pycurl.WRITEFUNCTION, content.write)
c.setopt(pycurl.CONNECTTIMEOUT, 30)
c.setopt(pycurl.TIMEOUT, 30)
ret = ""
try:
ret = c.perform()
except Exception,e:
logger.info(e)
c.close()
return ""
c.close()
cont = content.getvalue()
return cont
def get_user_list(tokenid, ip, port, realm=""):
c = pycurl.Curl()
content = StringIO.StringIO()
if "" == realm:
url = 'http://%s:%s/openam/json/users?_queryID=*' % (ip, port)
else:
url = 'http://%s:%s/openam/json/%s/users?_queryID=*' % (ip, port, realm)
c.setopt(pycurl.URL, url)
c.setopt(pycurl.POST, 0)
c.setopt(pycurl.HTTPHEADER,['iplanetDirectoryPro: %s'%tokenid,'Content-Type: application/json'])
c.setopt(pycurl.WRITEFUNCTION, content.write)
c.setopt(pycurl.CONNECTTIMEOUT, 30)
c.setopt(pycurl.TIMEOUT, 30)
ret = ""
try:
ret = c.perform()
except Exception,e:
logger.info(e)
c.close()
return ""
c.close()
cont = content.getvalue()
return cont
def change_password(tokenid, user, oldpasswd, newpasswd, ip, port, realm=""):
user = user.encode('ascii','ignore')
oldpasswd = oldpasswd.encode('ascii','ignore')
newpasswd = newpasswd.encode('ascii','ignore')
c = pycurl.Curl()
content = StringIO.StringIO()
if "" == realm:
url = 'http://%s:%s/openam/json/users/%s?_action=changePassword' % (ip, port, user)
else:
url = 'http://%s:%s/openam/json/%s/users/%s?_action=changePassword' % (ip, port, realm, user)
post_data_dict = {"currentpassword":oldpasswd,"userpassword":newpasswd}
raws = RawSerialize()
post_data_dict = raws.serialize(post_data_dict)
c.setopt(pycurl.URL, url)
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, post_data_dict )
c.setopt(pycurl.HTTPHEADER,['iplanetDirectoryPro: %s'%tokenid,'Content-Type: application/json'])
c.setopt(pycurl.WRITEFUNCTION, content.write)
c.setopt(pycurl.CONNECTTIMEOUT, 30)
c.setopt(pycurl.TIMEOUT, 30)
ret = ""
try:
ret = c.perform()
except Exception,e:
logger.info(e)
c.close()
return ""
c.close()
cont = content.getvalue()
return cont
|
{
"content_hash": "65a6a276606d4e93b39d508fe609d69e",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 153,
"avg_line_length": 31.4390243902439,
"alnum_prop": 0.6188259632790277,
"repo_name": "Svolcano/python_exercise",
"id": "ce2872076f863b2d2887f8af2e74b6977355e1df",
"size": "3867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WinLsLoad/lib/openam/openam.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "732"
},
{
"name": "JavaScript",
"bytes": "157614"
},
{
"name": "Python",
"bytes": "3292561"
},
{
"name": "Shell",
"bytes": "1417"
}
],
"symlink_target": ""
}
|
"""The request for analysis by a client. It contains analysis instances.
"""
import logging
from AccessControl import ClassSecurityInfo
from DateTime import DateTime
from Products.ATExtensions.field import RecordsField
from plone.indexer import indexer
from Products.Archetypes import atapi
from Products.Archetypes.config import REFERENCE_CATALOG
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.Archetypes.Widget import RichWidget
from Products.CMFCore import permissions
from Products.CMFCore.permissions import View
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode
from Products.CMFPlone.utils import _createObjectByType
from bika.lims.browser.fields import ARAnalysesField
from bika.lims.config import PROJECTNAME
from bika.lims.permissions import *
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.interfaces import IAnalysisRequest, ISamplePrepWorkflow
from bika.lims.browser.fields import HistoryAwareReferenceField
from bika.lims.browser.widgets import DateTimeWidget, DecimalWidget
from bika.lims.browser.widgets import ReferenceWidget
from bika.lims.browser.widgets import SelectionWidget
from bika.lims.workflow import skip, isBasicTransitionAllowed
from bika.lims.workflow import doActionFor
from decimal import Decimal
from zope.interface import implements
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t, getUsers, dicts_to_dict
from bika.lims.browser.fields import DateTimeField
from bika.lims.browser.widgets import SelectionWidget as BikaSelectionWidget
import sys
try:
from zope.component.hooks import getSite
except:
# Plone < 4.3
from zope.app.component.hooks import getSite
@indexer(IAnalysisRequest)
def Priority(instance):
priority = instance.getPriority()
if priority:
return priority.getSortKey()
@indexer(IAnalysisRequest)
def BatchUID(instance):
batch = instance.getBatch()
if batch:
return batch.UID()
@indexer(IAnalysisRequest)
def SamplingRoundUID(instance):
sr = instance.getSamplingRound()
if sr:
return sr.UID()
schema = BikaSchema.copy() + Schema((
StringField(
'RequestID',
searchable=True,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=StringWidget(
label = _("Request ID"),
description=_("The ID assigned to the client's request by the lab"),
visible={'view': 'invisible',
'edit': 'invisible'},
),
),
ReferenceField(
'Contact',
required=1,
default_method='getContactUIDForUser',
vocabulary_display_path_bound=sys.maxsize,
allowed_types=('Contact',),
referenceClass=HoldingReference,
relationship='AnalysisRequestContact',
mode="rw",
read_permission=permissions.View,
write_permission=EditARContact,
widget=ReferenceWidget(
label = _("Contact"),
render_own_label=True,
size=20,
helper_js=("bika_widgets/referencewidget.js", "++resource++bika.lims.js/contact.js"),
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'prominent',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
base_query={'inactive_state': 'active'},
showOn=True,
popup_width='400px',
colModel=[{'columnName': 'UID', 'hidden': True},
{'columnName': 'Fullname', 'width': '50', 'label': _('Name')},
{'columnName': 'EmailAddress', 'width': '50', 'label': _('Email Address')},
],
),
),
ReferenceField(
'CCContact',
multiValued=1,
vocabulary_display_path_bound=sys.maxsize,
allowed_types=('Contact',),
referenceClass=HoldingReference,
relationship='AnalysisRequestCCContact',
mode="rw",
read_permission=permissions.View,
write_permission=EditARContact,
widget=ReferenceWidget(
label = _("CC Contacts"),
render_own_label=True,
size=20,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'prominent',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
base_query={'inactive_state': 'active'},
showOn=True,
popup_width='400px',
colModel=[{'columnName': 'UID', 'hidden': True},
{'columnName': 'Fullname', 'width': '50', 'label': _('Name')},
{'columnName': 'EmailAddress', 'width': '50', 'label': _('Email Address')},
],
),
),
StringField(
'CCEmails',
mode="rw",
read_permission=permissions.View,
write_permission=EditARContact,
widget=StringWidget(
label = _("CC Emails"),
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'prominent',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
render_own_label=True,
size=20,
),
),
ReferenceField(
'Client',
required=1,
allowed_types=('Client',),
relationship='AnalysisRequestClient',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Client"),
description = _("You must assign this request to a client"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'invisible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'invisible', 'edit': 'invisible'},
'sampled': {'view': 'invisible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'invisible', 'edit': 'invisible'},
'sample_received': {'view': 'invisible', 'edit': 'invisible'},
'attachment_due': {'view': 'invisible', 'edit': 'invisible'},
'to_be_verified': {'view': 'invisible', 'edit': 'invisible'},
'verified': {'view': 'invisible', 'edit': 'invisible'},
'published': {'view': 'invisible', 'edit': 'invisible'},
'invalid': {'view': 'invisible', 'edit': 'invisible'},
},
base_query={'inactive_state': 'active'},
showOn=True,
),
),
ReferenceField(
'Sample',
vocabulary_display_path_bound=sys.maxsize,
allowed_types=('Sample',),
referenceClass=HoldingReference,
relationship='AnalysisRequestSample',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Sample"),
description = _("Select a sample to create a secondary AR"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'invisible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_catalog',
base_query={'cancellation_state': 'active',
'review_state': ['sample_due', 'sample_received', ]},
showOn=True,
),
),
ReferenceField(
'Batch',
allowed_types=('Batch',),
relationship='AnalysisRequestBatch',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Batch"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'visible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_catalog',
base_query={'review_state': 'open',
'cancellation_state': 'active'},
showOn=True,
),
),
ReferenceField(
'SamplingRound',
allowed_types=('SamplingRound',),
relationship='AnalysisRequestSamplingRound',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Sampling Round"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'visible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='portal_catalog',
base_query={},
showOn=True,
),
),
ReferenceField(
'SubGroup',
required=False,
allowed_types=('SubGroup',),
referenceClass = HoldingReference,
relationship = 'AnalysisRequestSubGroup',
widget=ReferenceWidget(
label = _("Sub-group"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'visible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
colModel=[
{'columnName': 'Title', 'width': '30',
'label': _('Title'), 'align': 'left'},
{'columnName': 'Description', 'width': '70',
'label': _('Description'), 'align': 'left'},
{'columnName': 'SortKey', 'hidden': True},
{'columnName': 'UID', 'hidden': True},
],
base_query={'inactive_state': 'active'},
sidx='SortKey',
sord='asc',
showOn=True,
),
),
ReferenceField(
'Template',
allowed_types=('ARTemplate',),
referenceClass=HoldingReference,
relationship='AnalysisRequestARTemplate',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Template"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'invisible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
# TODO: Profile'll be delated
ReferenceField(
'Profile',
allowed_types=('AnalysisProfile',),
referenceClass=HoldingReference,
relationship='AnalysisRequestAnalysisProfile',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Analysis Profile"),
size=20,
render_own_label=True,
visible=False,
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=False,
),
),
ReferenceField(
'Profiles',
multiValued=1,
allowed_types=('AnalysisProfile',),
referenceClass=HoldingReference,
vocabulary_display_path_bound=sys.maxsize,
relationship='AnalysisRequestAnalysisProfiles',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Analysis Profiles"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'invisible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
# Sample field
DateTimeField('DateSampled',
mode="rw",
read_permission=permissions.View,
write_permission=SampleSample,
widget = DateTimeWidget(
label = _("Date Sampled"),
size=20,
visible={'edit': 'visible',
'view': 'visible',
'secondary': 'disabled',
'header_table': 'prominent',
'sample_registered': {'view': 'invisible', 'edit': 'invisible'},
'to_be_sampled': {'view': 'invisible', 'edit': 'visible'},
'sampled': {'view': 'invisible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'invisible', 'edit': 'invisible'},
'sample_due': {'view': 'invisible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'invisible', 'edit': 'invisible'},
'attachment_due': {'view': 'invisible', 'edit': 'invisible'},
'to_be_verified': {'view': 'invisible', 'edit': 'invisible'},
'verified': {'view': 'invisible', 'edit': 'invisible'},
'published': {'view': 'invisible', 'edit': 'invisible'},
'invalid': {'view': 'invisible', 'edit': 'invisible'},
},
render_own_label=True,
),
),
# Sample field
StringField('Sampler',
mode="rw",
read_permission=permissions.View,
write_permission=SampleSample,
vocabulary='getSamplers',
widget=BikaSelectionWidget(
format='select',
label = _("Sampler"),
# see SamplingWOrkflowWidgetVisibility
visible={'edit': 'visible',
'view': 'visible',
'header_table': 'prominent',
'sample_registered': {'view': 'invisible', 'edit': 'invisible'},
'to_be_sampled': {'view': 'invisible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
render_own_label=True,
),
),
DateTimeField(
'SamplingDate',
required=1,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget = DateTimeWidget(
label = _("Sampling Date"),
size=20,
render_own_label=True,
# see SamplingWOrkflowWidgetVisibility
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'secondary': 'disabled',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'invisible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
ReferenceField(
'SampleType',
required=1,
allowed_types='SampleType',
relationship='AnalysisRequestSampleType',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Sample Type"),
description = _("Create a new sample of this type"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'invisible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
ReferenceField(
'Specification',
required=0,
allowed_types='AnalysisSpec',
relationship='AnalysisRequestAnalysisSpec',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Analysis Specification"),
description = _("Choose default AR specification values"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
colModel=[
{'columnName': 'contextual_title',
'width': '30',
'label': _('Title'),
'align': 'left'},
{'columnName': 'SampleTypeTitle',
'width': '70',
'label': _('SampleType'),
'align': 'left'},
# UID is required in colModel
{'columnName': 'UID', 'hidden': True},
],
showOn=True,
),
),
# see setResultsRange below.
RecordsField('ResultsRange',
required=0,
type='analysisspec',
subfields=('keyword', 'min', 'max', 'error', 'hidemin', 'hidemax', 'rangecomment'),
widget=ComputedWidget(visible=False),
),
ReferenceField(
'PublicationSpecification',
required=0,
allowed_types='AnalysisSpec',
relationship='AnalysisRequestPublicationSpec',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.View,
widget=ReferenceWidget(
label = _("Publication Specification"),
description = _("Set the specification to be used before publishing an AR."),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'header_table': 'visible',
'sample_registered': {'view': 'invisible', 'edit': 'invisible'},
'to_be_sampled': {'view': 'invisible', 'edit': 'invisible'},
'sampled': {'view': 'invisible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'invisible', 'edit': 'invisible'},
'sample_due': {'view': 'invisible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'invisible', 'edit': 'invisible'},
'attachment_due': {'view': 'invisible', 'edit': 'invisible'},
'to_be_verified': {'view': 'invisible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'visible'},
'published': {'view': 'visible', 'edit': 'visible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
ReferenceField(
'SamplePoint',
allowed_types='SamplePoint',
relationship='AnalysisRequestSamplePoint',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Sample Point"),
description = _("Location where sample was taken"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'}, # LIMS-1159
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
ReferenceField(
'StorageLocation',
allowed_types='StorageLocation',
relationship='AnalysisRequestStorageLocation',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Storage Location"),
description = _("Location where sample is kept"),
size=25,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'visible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
colModel=[{'columnName': 'UID', 'hidden': True},
{'columnName': 'Room', 'width': '15', 'label': _('Room')},
{'columnName': 'StorageType', 'width': '15', 'label': _('Type')},
{'columnName': 'Shelf', 'width': '13', 'label': _('Sh./Ca.')},
{'columnName': 'Box', 'width': '13', 'label': _('Box/Cane')},
{'columnName': 'Position', 'width': '13', 'label': _('Pos.')},
{'columnName': 'Title', 'width': '31', 'label': _('Address')},
],
),
),
StringField(
'ClientOrderNumber',
searchable=True,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=StringWidget(
label = _("Client Order Number"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'visible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
# Sample field
StringField(
'ClientReference',
searchable=True,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=StringWidget(
label = _("Client Reference"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'visible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
# Sample field
StringField(
'ClientSampleID',
searchable=True,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=StringWidget(
label = _("Client Sample ID"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible'},
'to_be_sampled': {'view': 'visible', 'edit': 'invisible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
# Sample field
ReferenceField('SamplingDeviation',
allowed_types = ('SamplingDeviation',),
relationship = 'AnalysisRequestSamplingDeviation',
referenceClass = HoldingReference,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Sampling Deviation"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
# Sample field
ReferenceField(
'SampleCondition',
allowed_types = ('SampleCondition',),
relationship = 'AnalysisRequestSampleCondition',
referenceClass = HoldingReference,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Sample condition"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
StringField(
'EnvironmentalConditions',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=StringWidget(
label=_("Environmental conditions"),
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'prominent',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
render_own_label=True,
size=20,
),
),
ReferenceField(
'DefaultContainerType',
allowed_types = ('ContainerType',),
relationship = 'AnalysisRequestContainerType',
referenceClass = HoldingReference,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Default Container"),
description = _("Default container for new sample partitions"),
size=20,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'invisible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
# Sample field
BooleanField(
'AdHoc',
default=False,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=BooleanWidget(
label = _("Ad-Hoc"),
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'invisible'},
'sampled': {'view': 'visible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'visible', 'edit': 'invisible'},
'sample_due': {'view': 'visible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
# Sample field
BooleanField(
'Composite',
default=False,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=BooleanWidget(
label = _("Composite"),
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'secondary': 'disabled',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
BooleanField(
'ReportDryMatter',
default=False,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=BooleanWidget(
label = _("Report as Dry Matter"),
render_own_label=True,
description = _("These results can be reported as dry matter"),
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
BooleanField(
'InvoiceExclude',
default=False,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=BooleanWidget(
label = _("Invoice Exclude"),
description = _("Select if analyses to be excluded from invoice"),
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
ARAnalysesField(
'Analyses',
required=1,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ComputedWidget(
visible={'edit': 'invisible',
'view': 'invisible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'invisible'},
}
),
),
ReferenceField(
'Attachment',
multiValued=1,
allowed_types=('Attachment',),
referenceClass=HoldingReference,
relationship='AnalysisRequestAttachment',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ComputedWidget(
visible={'edit': 'invisible',
'view': 'invisible',
},
)
),
ReferenceField(
'Invoice',
vocabulary_display_path_bound=sys.maxsize,
allowed_types=('Invoice',),
referenceClass=HoldingReference,
relationship='AnalysisRequestInvoice',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ComputedWidget(
visible={'edit': 'invisible',
'view': 'invisible',
},
)
),
DateTimeField(
'DateReceived',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=DateTimeWidget(
label = _("Date Received"),
visible={'edit': 'visible',
'view': 'visible',
'header_table': 'visible',
'sample_registered': {'view': 'invisible', 'edit': 'invisible', 'add': 'invisible'},
'to_be_sampled': {'view': 'invisible', 'edit': 'invisible'},
'sampled': {'view': 'invisible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'invisible', 'edit': 'invisible'},
'sample_due': {'view': 'invisible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
DateTimeField(
'DatePublished',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=DateTimeWidget(
label = _("Date Published"),
visible={'edit': 'visible',
'view': 'visible',
'add': 'invisible',
'secondary': 'invisible',
'header_table': 'visible',
'sample_registered': {'view': 'invisible', 'edit': 'invisible', 'add': 'invisible'},
'to_be_sampled': {'view': 'invisible', 'edit': 'invisible'},
'sampled': {'view': 'invisible', 'edit': 'invisible'},
'to_be_preserved': {'view': 'invisible', 'edit': 'invisible'},
'sample_due': {'view': 'invisible', 'edit': 'invisible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'invisible', 'edit': 'invisible'},
'attachment_due': {'view': 'invisible', 'edit': 'invisible'},
'to_be_verified': {'view': 'invisible', 'edit': 'invisible'},
'verified': {'view': 'invisible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
),
),
TextField(
'Remarks',
searchable=True,
default_content_type='text/x-web-intelligent',
allowable_content_types = ('text/plain', ),
default_output_type="text/plain",
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=TextAreaWidget(
macro="bika_widgets/remarks",
label = _("Remarks"),
append_only=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'invisible',
'sample_registered': {'view': 'invisible', 'edit': 'invisible', 'add': 'invisible'},
},
),
),
FixedPointField(
'MemberDiscount',
default_method='getDefaultMemberDiscount',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=DecimalWidget(
label = _("Member discount %"),
description = _("Enter percentage value eg. 33.0"),
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'invisible',
'sample_registered': {'view': 'invisible', 'edit': 'invisible', 'add': 'invisible'},
},
),
),
ComputedField(
'ClientUID',
searchable=True,
expression='here.aq_parent.UID()',
widget=ComputedWidget(
visible=False,
),
),
ComputedField(
'SampleTypeTitle',
searchable=True,
expression="here.getSampleType().Title() if here.getSampleType() else ''",
widget=ComputedWidget(
visible=False,
),
),
ComputedField(
'SamplePointTitle',
searchable=True,
expression="here.getSamplePoint().Title() if here.getSamplePoint() else ''",
widget=ComputedWidget(
visible=False,
),
),
ComputedField(
'SampleUID',
expression="here.getSample() and here.getSample().UID() or ''",
widget=ComputedWidget(
visible=False,
),
),
ComputedField(
'SampleID',
expression="here.getSample() and here.getSample().getId() or ''",
widget=ComputedWidget(
visible=False,
),
),
ComputedField(
'ContactUID',
expression="here.getContact() and here.getContact().UID() or ''",
widget=ComputedWidget(
visible=False,
),
),
ComputedField(
'ProfilesUID',
expression="here.getProfiles() and [profile.UID() for profile in here.getProfiles()] or []",
widget=ComputedWidget(
visible=False,
),
),
ComputedField(
'Invoiced',
expression='here.getInvoice() and True or False',
default=False,
widget=ComputedWidget(
visible=False,
),
),
ReferenceField(
'ChildAnalysisRequest',
allowed_types = ('AnalysisRequest',),
relationship = 'AnalysisRequestChildAnalysisRequest',
referenceClass = HoldingReference,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
visible=False,
),
),
ReferenceField(
'ParentAnalysisRequest',
allowed_types = ('AnalysisRequest',),
relationship = 'AnalysisRequestParentAnalysisRequest',
referenceClass = HoldingReference,
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
visible=False,
),
),
StringField('PreparationWorkflow',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
vocabulary='getPreparationWorkflows',
acquire=True,
widget=SelectionWidget(
format="select",
label=_("Preparation Workflow"),
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'invisible'},
'attachment_due': {'view': 'visible', 'edit': 'invisible'},
'to_be_verified': {'view': 'visible', 'edit': 'invisible'},
'verified': {'view': 'visible', 'edit': 'invisible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
render_own_label=True,
),
),
HistoryAwareReferenceField(
'Priority',
allowed_types=('ARPriority',),
referenceClass=HoldingReference,
relationship='AnalysisRequestPriority',
mode="rw",
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=ReferenceWidget(
label = _("Priority"),
size=10,
render_own_label=True,
visible={'edit': 'visible',
'view': 'visible',
'add': 'edit',
'header_table': 'visible',
'sample_registered': {'view': 'visible', 'edit': 'visible', 'add': 'edit'},
'to_be_sampled': {'view': 'visible', 'edit': 'visible'},
'sampled': {'view': 'visible', 'edit': 'visible'},
'to_be_preserved': {'view': 'visible', 'edit': 'visible'},
'sample_due': {'view': 'visible', 'edit': 'visible'},
'sample_prep': {'view': 'visible', 'edit': 'invisible'},
'sample_received': {'view': 'visible', 'edit': 'visible'},
'attachment_due': {'view': 'visible', 'edit': 'visible'},
'to_be_verified': {'view': 'visible', 'edit': 'visible'},
'verified': {'view': 'visible', 'edit': 'visible'},
'published': {'view': 'visible', 'edit': 'invisible'},
'invalid': {'view': 'visible', 'edit': 'invisible'},
},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
colModel=[
{'columnName': 'Title', 'width': '30',
'label': _('Title'), 'align': 'left'},
{'columnName': 'Description', 'width': '70',
'label': _('Description'), 'align': 'left'},
{'columnName': 'sortKey', 'hidden': True},
{'columnName': 'UID', 'hidden': True},
],
sidx='sortKey',
sord='asc',
showOn=True,
),
),
# For comments or results interpretation
# Old one, to be removed because of the incorporation of
# ResultsInterpretationDepts (due to LIMS-1628)
TextField(
'ResultsInterpretation',
searchable=True,
mode="rw",
default_content_type = 'text/html', # Input content type for the textfield
default_output_type = 'text/x-html-safe', # getResultsInterpretation returns a str with html tags
# to conserve the txt format in the report.
read_permission=permissions.View,
write_permission=permissions.ModifyPortalContent,
widget=RichWidget (
description = _("Comments or results interpretation"),
label = _("Results Interpretation"),
size=10,
allow_file_upload=False,
default_mime_type='text/x-rst',
output_mime_type='text/x-html',
rows=3,
visible=False),
),
RecordsField('ResultsInterpretationDepts',
subfields = ('uid',
'richtext'),
subfield_labels = {'uid': _('Department'),
'richtext': _('Results Interpreation'),},
widget = RichWidget(visible=False),
),
# Custom settings for the assigned analysis services
# https://jira.bikalabs.com/browse/LIMS-1324
# Fields:
# - uid: Analysis Service UID
# - hidden: True/False. Hide/Display in results reports
RecordsField('AnalysisServicesSettings',
required=0,
subfields=('uid', 'hidden',),
widget=ComputedWidget(visible=False),
),
)
)
schema['title'].required = False
schema['id'].widget.visible = {
'edit': 'invisible',
'view': 'invisible',
}
schema['title'].widget.visible = {
'edit': 'invisible',
'view': 'invisible',
}
schema.moveField('Client', before='Contact')
schema.moveField('ResultsInterpretation', pos='bottom')
schema.moveField('ResultsInterpretationDepts', pos='bottom')
class AnalysisRequest(BaseFolder):
implements(IAnalysisRequest, ISamplePrepWorkflow)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def _getCatalogTool(self):
from bika.lims.catalog import getCatalog
return getCatalog(self)
def getRequestID(self):
""" Return the id as RequestID
"""
return safe_unicode(self.getId()).encode('utf-8')
def Title(self):
""" Return the Request ID as title """
return self.getRequestID()
def Description(self):
""" Return searchable data as F """
descr = " ".join((self.getRequestID(), self.aq_parent.Title()))
return safe_unicode(descr).encode('utf-8')
def getClient(self):
if self.aq_parent.portal_type == 'Client':
return self.aq_parent
if self.aq_parent.portal_type == 'Batch':
return self.aq_parent.getClient()
def getClientPath(self):
return "/".join(self.aq_parent.getPhysicalPath())
def getClientTitle(self):
return self.getClient().Title() if self.getClient() else ''
def getContactTitle(self):
return self.getContact().Title() if self.getContact() else ''
def getProfilesTitle(self):
return [profile.Title() for profile in self.getProfiles()]
def getTemplateTitle(self):
return self.getTemplate().Title() if self.getTemplate() else ''
def setPublicationSpecification(self, value):
"Never contains a value; this field is here for the UI."
return None
def getAnalysisCategory(self):
proxies = self.getAnalyses(full_objects=True)
value = []
for proxy in proxies:
val = proxy.getCategoryTitle()
if val not in value:
value.append(val)
return value
def getAnalysisService(self):
proxies = self.getAnalyses(full_objects=True)
value = []
for proxy in proxies:
val = proxy.getServiceTitle()
if val not in value:
value.append(val)
return value
def getAnalysts(self):
proxies = self.getAnalyses(full_objects=True)
value = []
for proxy in proxies:
val = proxy.getAnalyst()
if val not in value:
value.append(val)
return value
def getBatch(self):
# The parent type may be "Batch" during ar_add.
# This function fills the hidden field in ar_add.pt
if self.aq_parent.portal_type == 'Batch':
return self.aq_parent
else:
return self.Schema()['Batch'].get(self)
def getDefaultMemberDiscount(self):
""" compute default member discount if it applies """
if hasattr(self, 'getMemberDiscountApplies'):
if self.getMemberDiscountApplies():
plone = getSite()
settings = plone.bika_setup
return settings.getMemberDiscount()
else:
return "0.00"
def setDefaultPriority(self):
""" compute default priority """
bsc = getToolByName(self, 'bika_setup_catalog')
priorities = bsc(
portal_type='ARPriority',
)
for brain in priorities:
obj = brain.getObject()
if obj.getIsDefault():
self.setPriority(obj)
return
# priority is not a required field. No default means...
logging.info('Priority: no default priority found')
return
security.declareProtected(View, 'getResponsible')
def getAnalysesNum(self):
""" Return the amount of analyses verified/total in the current AR """
verified = 0
total = 0
for analysis in self.getAnalyses():
review_state = analysis.review_state
if review_state in ['verified' ,'published']:
verified += 1
if review_state not in 'retracted':
total += 1
return verified,total
def getResponsible(self):
""" Return all manager info of responsible departments """
managers = {}
departments = []
for analysis in self.objectValues('Analysis'):
department = analysis.getService().getDepartment()
if department is None:
continue
department_id = department.getId()
if department_id in departments:
continue
departments.append(department_id)
manager = department.getManager()
if manager is None:
continue
manager_id = manager.getId()
if manager_id not in managers:
managers[manager_id] = {}
managers[manager_id]['salutation'] = safe_unicode(manager.getSalutation())
managers[manager_id]['name'] = safe_unicode(manager.getFullname())
managers[manager_id]['email'] = safe_unicode(manager.getEmailAddress())
managers[manager_id]['phone'] = safe_unicode(manager.getBusinessPhone())
managers[manager_id]['job_title'] = safe_unicode(manager.getJobTitle())
if manager.getSignature():
managers[manager_id]['signature'] = '%s/Signature' % manager.absolute_url()
else:
managers[manager_id]['signature'] = False
managers[manager_id]['departments'] = ''
mngr_dept = managers[manager_id]['departments']
if mngr_dept:
mngr_dept += ', '
mngr_dept += safe_unicode(department.Title())
managers[manager_id]['departments'] = mngr_dept
mngr_keys = managers.keys()
mngr_info = {}
mngr_info['ids'] = mngr_keys
mngr_info['dict'] = managers
return mngr_info
security.declareProtected(View, 'getResponsible')
def getManagers(self):
""" Return all managers of responsible departments """
manager_ids = []
manager_list = []
departments = []
for analysis in self.objectValues('Analysis'):
department = analysis.getService().getDepartment()
if department is None:
continue
department_id = department.getId()
if department_id in departments:
continue
departments.append(department_id)
manager = department.getManager()
if manager is None:
continue
manager_id = manager.getId()
if not manager_id in manager_ids:
manager_ids.append(manager_id)
manager_list.append(manager)
return manager_list
security.declareProtected(View, 'getLate')
def getLate(self):
""" return True if any analyses are late """
workflow = getToolByName(self, 'portal_workflow')
review_state = workflow.getInfoFor(self, 'review_state', '')
if review_state in ['to_be_sampled', 'to_be_preserved',
'sample_due', 'published']:
return False
for analysis in self.objectValues('Analysis'):
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state == 'published':
continue
calculation = analysis.getService().getCalculation()
if not calculation \
or (calculation and not calculation.getDependentServices()):
resultdate = analysis.getResultCaptureDate()
duedate = analysis.getDueDate()
if (resultdate and resultdate > duedate) \
or (not resultdate and DateTime() > duedate):
return True
return False
security.declareProtected(View, 'getBillableItems')
def getBillableItems(self):
"""
The main purpose of this function is to obtain the analysis services and profiles from the analysis request
whose prices are needed to quote the analysis request.
If an analysis belongs to a profile, this analysis will only be included in the analyses list if the profile
has disabled "Use Analysis Profile Price".
:return: a tuple of two lists. The first one only contains analysis services not belonging to a profile
with active "Use Analysis Profile Price".
The second list contains the profiles with activated "Use Analysis Profile Price".
"""
workflow = getToolByName(self, 'portal_workflow')
# REMEMBER: Analysis != Analysis services
analyses = []
analysis_profiles = []
to_be_billed = []
# Getting all analysis request analyses
for analysis in self.objectValues('Analysis'):
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state != 'not_requested':
analyses.append(analysis)
# Getting analysis request profiles
for profile in self.getProfiles():
# Getting the analysis profiles which has "Use Analysis Profile Price" enabled
if profile.getUseAnalysisProfilePrice():
analysis_profiles.append(profile)
else:
# we only need the analysis service keywords from these profiles
to_be_billed += [service.getKeyword() for service in profile.getService()]
# So far we have three arrays:
# - analyses: has all analyses (even if they are included inside a profile or not)
# - analysis_profiles: has the profiles with "Use Analysis Profile Price" enabled
# - to_be_quoted: has analysis services keywords from analysis profiles with "Use Analysis Profile Price"
# disabled
# If a profile has its own price, we don't need their analises' prices, so we have to quit all
# analysis belonging to that profile. But if another profile has the same analysis service but has
# "Use Analysis Profile Price" disabled, the service must be included as billable.
for profile in analysis_profiles:
for analysis_service in profile.getService():
for analysis in analyses:
if analysis_service.getKeyword() == analysis.getService().getKeyword() and \
analysis.getService().getKeyword() not in to_be_billed:
analyses.remove(analysis)
return analyses, analysis_profiles
def getServicesAndProfiles(self):
"""
This function gets all analysis services and all profiles and removes the services belonging to a profile.
:return: a tuple of three lists, where the first list contains the analyses and the second list the profiles.
The third contains the analyses objects used by the profiles.
"""
# Getting requested analyses
workflow = getToolByName(self, 'portal_workflow')
analyses = []
# profile_analyses contains the profile's analyses (analysis != service") objects to obtain
# the correct price later
profile_analyses = []
for analysis in self.objectValues('Analysis'):
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state != 'not_requested':
analyses.append(analysis)
# Getting all profiles
analysis_profiles = self.getProfiles() if len(self.getProfiles()) > 0 else []
# Cleaning services included in profiles
for profile in analysis_profiles:
for analysis_service in profile.getService():
for analysis in analyses:
if analysis_service.getKeyword() == analysis.getService().getKeyword():
analyses.remove(analysis)
profile_analyses.append(analysis)
return analyses, analysis_profiles, profile_analyses
security.declareProtected(View, 'getSubtotal')
def getSubtotal(self):
""" Compute Subtotal (without member discount and without vat)
"""
analyses, a_profiles = self.getBillableItems()
return sum(
[Decimal(obj.getPrice()) for obj in analyses] +
[Decimal(obj.getAnalysisProfilePrice()) for obj in a_profiles]
)
security.declareProtected(View, 'getSubtotalVATAmount')
def getSubtotalVATAmount(self):
""" Compute VAT amount without member discount"""
analyses, a_profiles = self.getBillableItems()
if len(analyses) > 0 or len(a_profiles) > 0:
return sum(
[Decimal(o.getVATAmount()) for o in analyses] +
[Decimal(o.getVATAmount()) for o in a_profiles]
)
return 0
security.declareProtected(View, 'getSubtotalTotalPrice')
def getSubtotalTotalPrice(self):
""" Compute the price with VAT but no member discount"""
return self.getSubtotal() + self.getSubtotalVATAmount()
security.declareProtected(View, 'getDiscountAmount')
def getDiscountAmount(self):
"""
It computes and returns the analysis service's discount amount without VAT
"""
has_client_discount = self.aq_parent.getMemberDiscountApplies()
if has_client_discount:
discount = Decimal(self.getDefaultMemberDiscount())
return Decimal(self.getSubtotal() * discount / 100)
else:
return 0
def getVATAmount(self):
"""
It computes the VAT amount from (subtotal-discount.)*VAT/100, but each analysis has its
own VAT!
:return: the analysis request VAT amount with the discount
"""
has_client_discount = self.aq_parent.getMemberDiscountApplies()
VATAmount = self.getSubtotalVATAmount()
if has_client_discount:
discount = Decimal(self.getDefaultMemberDiscount())
return Decimal((1 - discount/100) * VATAmount)
else:
return VATAmount
security.declareProtected(View, 'getTotalPrice')
def getTotalPrice(self):
"""
It gets the discounted price from analyses and profiles to obtain the total value with the VAT
and the discount applied
:return: the analysis request's total price including the VATs and discounts
"""
return self.getSubtotal() - self.getDiscountAmount() + self.getVATAmount()
getTotal = getTotalPrice
security.declareProtected(ManageInvoices, 'issueInvoice')
def issueInvoice(self, REQUEST=None, RESPONSE=None):
""" issue invoice
"""
# check for an adhoc invoice batch for this month
now = DateTime()
batch_month = now.strftime('%b %Y')
batch_title = '%s - %s' % (batch_month, 'ad hoc')
invoice_batch = None
for b_proxy in self.portal_catalog(portal_type='InvoiceBatch',
Title=batch_title):
invoice_batch = b_proxy.getObject()
if not invoice_batch:
first_day = DateTime(now.year(), now.month(), 1)
start_of_month = first_day.earliestTime()
last_day = first_day + 31
while last_day.month() != now.month():
last_day = last_day - 1
end_of_month = last_day.latestTime()
invoices = self.invoices
batch_id = invoices.generateUniqueId('InvoiceBatch')
invoice_batch = _createObjectByType("InvoiceBatch", invoices, batch_id)
invoice_batch.edit(
title=batch_title,
BatchStartDate=start_of_month,
BatchEndDate=end_of_month,
)
invoice_batch.processForm()
client_uid = self.getClientUID()
# Get the created invoice
invoice = invoice_batch.createInvoice(client_uid, [self, ])
invoice.setAnalysisRequest(self)
# Set the created invoice in the schema
self.Schema()['Invoice'].set(self, invoice)
security.declarePublic('printInvoice')
def printInvoice(self, REQUEST=None, RESPONSE=None):
""" print invoice
"""
invoice = self.getInvoice()
invoice_url = invoice.absolute_url()
RESPONSE.redirect('%s/invoice_print' % invoice_url)
def addARAttachment(self, REQUEST=None, RESPONSE=None):
""" Add the file as an attachment
"""
workflow = getToolByName(self, 'portal_workflow')
this_file = self.REQUEST.form['AttachmentFile_file']
if 'Analysis' in self.REQUEST.form:
analysis_uid = self.REQUEST.form['Analysis']
else:
analysis_uid = None
attachmentid = self.generateUniqueId('Attachment')
attachment = _createObjectByType("Attachment", self.aq_parent,
attachmentid)
attachment.edit(
AttachmentFile=this_file,
AttachmentType=self.REQUEST.form.get('AttachmentType', ''),
AttachmentKeys=self.REQUEST.form['AttachmentKeys'])
attachment.processForm()
attachment.reindexObject()
if analysis_uid:
tool = getToolByName(self, REFERENCE_CATALOG)
analysis = tool.lookupObject(analysis_uid)
others = analysis.getAttachment()
attachments = []
for other in others:
attachments.append(other.UID())
attachments.append(attachment.UID())
analysis.setAttachment(attachments)
if workflow.getInfoFor(analysis, 'review_state') == 'attachment_due':
workflow.doActionFor(analysis, 'attach')
else:
others = self.getAttachment()
attachments = []
for other in others:
attachments.append(other.UID())
attachments.append(attachment.UID())
self.setAttachment(attachments)
if REQUEST['HTTP_REFERER'].endswith('manage_results'):
RESPONSE.redirect('%s/manage_results' % self.absolute_url())
else:
RESPONSE.redirect(self.absolute_url())
def delARAttachment(self, REQUEST=None, RESPONSE=None):
""" delete the attachment """
tool = getToolByName(self, REFERENCE_CATALOG)
if 'Attachment' in self.REQUEST.form:
attachment_uid = self.REQUEST.form['Attachment']
attachment = tool.lookupObject(attachment_uid)
parent_r = attachment.getRequest()
parent_a = attachment.getAnalysis()
parent = parent_a if parent_a else parent_r
others = parent.getAttachment()
attachments = []
for other in others:
if not other.UID() == attachment_uid:
attachments.append(other.UID())
parent.setAttachment(attachments)
client = attachment.aq_parent
ids = [attachment.getId(), ]
BaseFolder.manage_delObjects(client, ids, REQUEST)
RESPONSE.redirect(self.REQUEST.get_header('referer'))
security.declarePublic('getVerifier')
def getVerifier(self):
wtool = getToolByName(self, 'portal_workflow')
mtool = getToolByName(self, 'portal_membership')
verifier = None
try:
review_history = wtool.getInfoFor(self, 'review_history')
except:
return 'access denied'
if not review_history:
return 'no history'
for items in review_history:
action = items.get('action')
if action != 'verify':
continue
actor = items.get('actor')
member = mtool.getMemberById(actor)
verifier = member.getProperty('fullname')
if verifier is None or verifier == '':
verifier = actor
return verifier
security.declarePublic('getContactUIDForUser')
def getContactUIDForUser(self):
""" get the UID of the contact associated with the authenticated
user
"""
user = self.REQUEST.AUTHENTICATED_USER
user_id = user.getUserName()
pc = getToolByName(self, 'portal_catalog')
r = pc(portal_type='Contact',
getUsername=user_id)
if len(r) == 1:
return r[0].UID
security.declarePublic('current_date')
def current_date(self):
""" return current date """
return DateTime()
def getQCAnalyses(self, qctype=None, review_state=None):
""" return the QC analyses performed in the worksheet in which, at
least, one sample of this AR is present.
Depending on qctype value, returns the analyses of:
- 'b': all Blank Reference Samples used in related worksheet/s
- 'c': all Control Reference Samples used in related worksheet/s
- 'd': duplicates only for samples contained in this AR
If qctype==None, returns all type of qc analyses mentioned above
"""
qcanalyses = []
suids = []
ans = self.getAnalyses()
wf = getToolByName(self, 'portal_workflow')
for an in ans:
an = an.getObject()
if an.getServiceUID() not in suids:
suids.append(an.getServiceUID())
for an in ans:
an = an.getObject()
br = an.getBackReferences('WorksheetAnalysis')
print br
print '---------'
if (len(br) > 0):
ws = br[0]
was = ws.getAnalyses()
for wa in was:
if wa.portal_type == 'DuplicateAnalysis' \
and wa.getRequestID() == self.id \
and wa not in qcanalyses \
and (qctype is None or wa.getReferenceType() == qctype) \
and (review_state is None or wf.getInfoFor(wa, 'review_state') in review_state):
qcanalyses.append(wa)
elif wa.portal_type == 'ReferenceAnalysis' \
and wa.getServiceUID() in suids \
and wa not in qcanalyses \
and (qctype is None or wa.getReferenceType() == qctype) \
and (review_state is None or wf.getInfoFor(wa, 'review_state') in review_state):
qcanalyses.append(wa)
return qcanalyses
def isInvalid(self):
""" return if the Analysis Request has been invalidated
"""
workflow = getToolByName(self, 'portal_workflow')
return workflow.getInfoFor(self, 'review_state') == 'invalid'
def getLastChild(self):
""" return the last child Request due to invalidation
"""
child = self.getChildAnalysisRequest()
while (child and child.getChildAnalysisRequest()):
child = child.getChildAnalysisRequest()
return child
def getRequestedAnalyses(self):
"""
It returns all requested analyses, even if they belong to an analysis profile or not.
"""
#
# title=Get requested analyses
#
result = []
cats = {}
workflow = getToolByName(self, 'portal_workflow')
for analysis in self.getAnalyses(full_objects=True):
review_state = workflow.getInfoFor(analysis, 'review_state')
if review_state == 'not_requested':
continue
service = analysis.getService()
category_name = service.getCategoryTitle()
if not category_name in cats:
cats[category_name] = {}
cats[category_name][analysis.Title()] = analysis
cat_keys = cats.keys()
cat_keys.sort(lambda x, y: cmp(x.lower(), y.lower()))
for cat_key in cat_keys:
analyses = cats[cat_key]
analysis_keys = analyses.keys()
analysis_keys.sort(lambda x, y: cmp(x.lower(), y.lower()))
for analysis_key in analysis_keys:
result.append(analyses[analysis_key])
return result
def getSamplingRoundUID(self):
"""
Obtains the sampling round UID
:return: a UID
"""
if self.getSamplingRound():
return self.getSamplingRound().UID()
else:
return ''
def setResultsRange(self, value=None):
"""Sets the spec values for this AR.
1 - Client specs where (spec.Title) matches (ar.SampleType.Title)
2 - Lab specs where (spec.Title) matches (ar.SampleType.Title)
3 - Take override values from instance.Specification
4 - Take override values from the form (passed here as parameter 'value').
The underlying field value is a list of dictionaries.
The value parameter may be a list of dictionaries, or a dictionary (of
dictionaries). In the last case, the keys are irrelevant, but in both
cases the specs must contain, at minimum, the "keyword", "min", "max",
and "error" fields.
Value will be stored in ResultsRange field as list of dictionaries
"""
rr = {}
sample = self.getSample()
if not sample:
# portal_factory
return []
stt = self.getSample().getSampleType().Title()
bsc = getToolByName(self, 'bika_setup_catalog')
# 1 or 2: rr = Client specs where (spec.Title) matches (ar.SampleType.Title)
for folder in self.aq_parent, self.bika_setup.bika_analysisspecs:
proxies = bsc(portal_type='AnalysisSpec',
getSampleTypeTitle=stt,
ClientUID=folder.UID())
if proxies:
rr = dicts_to_dict(proxies[0].getObject().getResultsRange(), 'keyword')
break
# 3: rr += override values from instance.Specification
ar_spec = self.getSpecification()
if ar_spec:
ar_spec_rr = ar_spec.getResultsRange()
rr.update(dicts_to_dict(ar_spec_rr, 'keyword'))
# 4: rr += override values from the form (value=dict key=service_uid)
if value:
if type(value) in (list, tuple):
value = dicts_to_dict(value, "keyword")
elif type(value) == dict:
value = dicts_to_dict(value.values(), "keyword")
rr.update(value)
return self.Schema()['ResultsRange'].set(self, rr.values())
# Then a string of fields which are defined on the AR, but need to be set
# and read from the sample
security.declarePublic('setSamplingDate')
def setSamplingDate(self, value):
sample = self.getSample()
if sample and value:
sample.setSamplingDate(value)
security.declarePublic('getSamplingDate')
def getSamplingDate(self):
sample = self.getSample()
if sample:
return sample.getSamplingDate()
security.declarePublic('setSampler')
def setSampler(self, value):
sample = self.getSample()
if sample and value:
sample.setSampler(value)
self.Schema()['Sampler'].set(self, value)
security.declarePublic('getSampler')
def getSampler(self):
sample = self.getSample()
if sample:
return sample.getSampler()
return self.Schema().getField('Sampler').get(self)
security.declarePublic('setDateSampled')
def setDateSampled(self, value):
sample = self.getSample()
if sample and value:
sample.setDateSampled(value)
self.Schema()['DateSampled'].set(self, value)
security.declarePublic('getDateSampled')
def getDateSampled(self):
sample = self.getSample()
if sample:
return sample.getDateSampled()
return self.Schema().getField('DateSampled').get(self)
security.declarePublic('setSamplePoint')
def setSamplePoint(self, value):
sample = self.getSample()
if sample and value:
sample.setSamplePoint(value)
self.Schema()['SamplePoint'].set(self, value)
security.declarePublic('getSamplepoint')
def getSamplePoint(self):
sample = self.getSample()
if sample:
return sample.getSamplePoint()
return self.Schema().getField('SamplePoint').get(self)
security.declarePublic('setSampleType')
def setSampleType(self, value):
sample = self.getSample()
if sample and value:
sample.setSampleType(value)
self.Schema()['SampleType'].set(self, value)
security.declarePublic('getSampleType')
def getSampleType(self):
sample = self.getSample()
if sample:
return sample.getSampleType()
return self.Schema().getField('SampleType').get(self)
security.declarePublic('setClientReference')
def setClientReference(self, value):
sample = self.getSample()
if sample and value:
sample.setClientReference(value)
self.Schema()['ClientReference'].set(self, value)
security.declarePublic('getClientReference')
def getClientReference(self):
sample = self.getSample()
if sample:
return sample.getClientReference()
return self.Schema().getField('ClientReference').get(self)
security.declarePublic('setClientSampleID')
def setClientSampleID(self, value):
sample = self.getSample()
if sample and value:
sample.setClientSampleID(value)
self.Schema()['ClientSampleID'].set(self, value)
security.declarePublic('getClientSampleID')
def getClientSampleID(self):
sample = self.getSample()
if sample:
return sample.getClientSampleID()
return self.Schema().getField('ClientSampleID').get(self)
security.declarePublic('setSamplingDeviation')
def setSamplingDeviation(self, value):
sample = self.getSample()
if sample and value:
sample.setSamplingDeviation(value)
self.Schema()['SamplingDeviation'].set(self, value)
security.declarePublic('getSamplingDeviation')
def getSamplingDeviation(self):
sample = self.getSample()
if sample:
return sample.getSamplingDeviation()
return self.Schema().getField('SamplingDeviation').get(self)
security.declarePublic('setSampleCondition')
def setSampleCondition(self, value):
sample = self.getSample()
if sample and value:
sample.setSampleCondition(value)
self.Schema()['SampleCondition'].set(self, value)
security.declarePublic('getSampleCondition')
def getSampleCondition(self):
sample = self.getSample()
if sample:
return sample.getSampleCondition()
return self.Schema().getField('SampleCondition').get(self)
security.declarePublic('setEnvironmentalConditions')
def setEnvironmentalConditions(self, value):
sample = self.getSample()
if sample and value:
sample.setEnvironmentalConditions(value)
self.Schema()['EnvironmentalConditions'].set(self, value)
security.declarePublic('getEnvironmentalConditions')
def getEnvironmentalConditions(self):
sample = self.getSample()
if sample:
return sample.getEnvironmentalConditions()
return self.Schema().getField('EnvironmentalConditions').get(self)
security.declarePublic('setComposite')
def setComposite(self, value):
sample = self.getSample()
if sample and value:
sample.setComposite(value)
self.Schema()['Composite'].set(self, value)
security.declarePublic('getComposite')
def getComposite(self):
sample = self.getSample()
if sample:
return sample.getComposite()
return self.Schema().getField('Composite').get(self)
security.declarePublic('setStorageLocation')
def setStorageLocation(self, value):
sample = self.getSample()
if sample and value:
sample.setStorageLocation(value)
self.Schema()['StorageLocation'].set(self, value)
security.declarePublic('getStorageLocation')
def getStorageLocation(self):
sample = self.getSample()
if sample:
return sample.getStorageLocation()
return self.Schema().getField('StorageLocation').get(self)
security.declarePublic('setAdHoc')
def setAdHoc(self, value):
sample = self.getSample()
if sample and value:
sample.setAdHoc(value)
self.Schema()['AdHoc'].set(self, value)
security.declarePublic('getAdHoc')
def getAdHoc(self):
sample = self.getSample()
if sample:
return sample.getAdHoc()
return self.Schema().getField('AdHoc').get(self)
def getSamplers(self):
return getUsers(self, ['LabManager', 'Sampler'])
def getPreparationWorkflows(self):
"""Return a list of sample preparation workflows. These are identified
by scanning all workflow IDs for those beginning with "sampleprep".
"""
wf = self.portal_workflow
ids = wf.getWorkflowIds()
sampleprep_ids = [wid for wid in ids if wid.startswith('sampleprep')]
prep_workflows = [['', ''],]
for workflow_id in sampleprep_ids:
workflow = wf.getWorkflowById(workflow_id)
prep_workflows.append([workflow_id, workflow.title])
return DisplayList(prep_workflows)
def getDepartments(self):
""" Returns a set with the departments assigned to the Analyses
from this Analysis Request
"""
ans = [an.getObject() for an in self.getAnalyses()]
depts = [an.getService().getDepartment() for an in ans if an.getService().getDepartment()]
return set(depts)
def getResultsInterpretationByDepartment(self, department=None):
""" Returns the results interpretation for this Analysis Request
and department. If department not set, returns the results
interpretation tagged as 'General'.
Returns a dict with the following keys:
{'uid': <department_uid> or 'general',
'richtext': <text/plain>}
"""
uid = department.UID() if department else 'general'
rows = self.Schema()['ResultsInterpretationDepts'].get(self)
row = [row for row in rows if row.get('uid') == uid]
if len(row) > 0:
row = row[0]
elif uid=='general' \
and hasattr(self, 'getResultsInterpretation') \
and self.getResultsInterpretation():
row = {'uid': uid, 'richtext': self.getResultsInterpretation()}
else:
row = {'uid': uid, 'richtext': ''};
return row
def getAnalysisServiceSettings(self, uid):
""" Returns a dictionary with the settings for the analysis
service that match with the uid provided.
If there are no settings for the analysis service and
analysis requests:
1. looks for settings in AR's ARTemplate. If found, returns
the settings for the AnalysisService set in the Template
2. If no settings found, looks in AR's ARProfile. If found,
returns the settings for the AnalysisService from the
AR Profile. Otherwise, returns a one entry dictionary
with only the key 'uid'
"""
sets = [s for s in self.getAnalysisServicesSettings() \
if s.get('uid','') == uid]
# Created by using an ARTemplate?
if not sets and self.getTemplate():
adv = self.getTemplate().getAnalysisServiceSettings(uid)
sets = [adv] if 'hidden' in adv else []
# Created by using an AR Profile?
if not sets and self.getProfiles():
adv = []
adv += [profile.getAnalysisServiceSettings(uid) for profile in self.getProfiles()]
sets = adv if 'hidden' in adv[0] else []
return sets[0] if sets else {'uid': uid}
def getPartitions(self):
"""
This functions returns the partitions from the analysis request's analyses.
:return: a list with the full partition objects
"""
analyses = self.getRequestedAnalyses()
partitions = []
for analysis in analyses:
if analysis.getSamplePartition() not in partitions:
partitions.append(analysis.getSamplePartition())
return partitions
def getContainers(self):
"""
This functions returns the containers from the analysis request's analyses
:return: a list with the full partition objects
"""
partitions = self.getPartitions()
containers = []
for partition in partitions:
if partition.getContainer():
containers.append(partition.getContainer())
return containers
def isAnalysisServiceHidden(self, uid):
""" Checks if the analysis service that match with the uid
provided must be hidden in results.
If no hidden assignment has been set for the analysis in
this request, returns the visibility set to the analysis
itself.
Raise a TypeError if the uid is empty or None
Raise a ValueError if there is no hidden assignment in this
request or no analysis service found for this uid.
"""
if not uid:
raise TypeError('None type or empty uid')
sets = self.getAnalysisServiceSettings(uid)
if 'hidden' not in sets:
uc = getToolByName(self, 'uid_catalog')
serv = uc(UID=uid)
if serv and len(serv) == 1:
return serv[0].getObject().getRawHidden()
else:
raise ValueError('%s is not valid' % uid)
return sets.get('hidden', False)
def guard_unassign_transition(self):
"""Allow or disallow transition depending on our children's states
"""
if not isBasicTransitionAllowed(self):
return False
if self.getAnalyses(worksheetanalysis_review_state='unassigned'):
return True
if not self.getAnalyses(worksheetanalysis_review_state='assigned'):
return True
return False
def guard_assign_transition(self):
"""Allow or disallow transition depending on our children's states
"""
if not isBasicTransitionAllowed(self):
return False
if not self.getAnalyses(worksheetanalysis_review_state='assigned'):
return False
if self.getAnalyses(worksheetanalysis_review_state='unassigned'):
return False
return True
def guard_receive_transition(self):
"""Prevent the receive transition from being available:
- if object is cancelled
- if any related ARs have field analyses with no result.
"""
if not isBasicTransitionAllowed(self):
return False
# check if any related ARs have field analyses with no result.
for ar in self.getSample().getAnalysisRequests():
field_analyses = ar.getAnalyses(getPointOfCapture='field',
full_objects=True)
no_results = [a for a in field_analyses if a.getResult() == '']
if no_results:
return False
return True
def guard_sample_prep_transition(self):
sample = self.getSample()
return sample.guard_sample_prep_transition()
def guard_sample_prep_complete_transition(self):
sample = self.getSample()
return sample.guard_sample_prep_complete_transition()
def workflow_script_receive(self):
if skip(self, "receive"):
return
workflow = getToolByName(self, 'portal_workflow')
self.setDateReceived(DateTime())
self.reindexObject(idxs=["review_state", "getDateReceived", ])
# receive the AR's sample
sample = self.getSample()
if not skip(sample, 'receive', peek=True):
# unless this is a secondary AR
if workflow.getInfoFor(sample, 'review_state') == 'sample_due':
workflow.doActionFor(sample, 'receive')
# receive all analyses in this AR.
analyses = self.getAnalyses(review_state='sample_due')
for analysis in analyses:
if not skip(analysis, 'receive'):
workflow.doActionFor(analysis.getObject(), 'receive')
def workflow_script_preserve(self):
if skip(self, "preserve"):
return
workflow = getToolByName(self, 'portal_workflow')
# transition our sample
sample = self.getSample()
if not skip(sample, "preserve", peek=True):
workflow.doActionFor(sample, "preserve")
def workflow_script_submit(self):
if skip(self, "submit"):
return
self.reindexObject(idxs=["review_state", ])
def workflow_script_sampling_workflow(self):
if skip(self, "sampling_workflow"):
return
sample = self.getSample()
if sample.getSamplingDate() > DateTime():
sample.future_dated = True
def workflow_script_no_sampling_workflow(self):
if skip(self, "no_sampling_workflow"):
return
sample = self.getSample()
if sample.getSamplingDate() > DateTime():
sample.future_dated = True
def workflow_script_attach(self):
if skip(self, "attach"):
return
self.reindexObject(idxs=["review_state", ])
# Don't cascade. Shouldn't be attaching ARs for now (if ever).
return
def workflow_script_sample(self):
# no skip check here: the sampling workflow UI is odd
# if skip(self, "sample"):
# return
# transition our sample
workflow = getToolByName(self, 'portal_workflow')
sample = self.getSample()
if not skip(sample, "sample", peek=True):
workflow.doActionFor(sample, "sample")
# def workflow_script_to_be_preserved(self):
# if skip(self, "to_be_preserved"):
# return
# pass
# def workflow_script_sample_due(self):
# if skip(self, "sample_due"):
# return
# pass
# def workflow_script_retract(self):
# if skip(self, "retract"):
# return
# pass
def workflow_script_verify(self):
if skip(self, "verify"):
return
self.reindexObject(idxs=["review_state", ])
if not "verify all analyses" in self.REQUEST['workflow_skiplist']:
# verify all analyses in this AR.
analyses = self.getAnalyses(review_state='to_be_verified')
for analysis in analyses:
doActionFor(analysis.getObject(), "verify")
def workflow_script_publish(self):
if skip(self, "publish"):
return
self.reindexObject(idxs=["review_state", "getDatePublished", ])
if not "publish all analyses" in self.REQUEST['workflow_skiplist']:
# publish all analyses in this AR. (except not requested ones)
analyses = self.getAnalyses(review_state='verified')
for analysis in analyses:
doActionFor(analysis.getObject(), "publish")
def workflow_script_reinstate(self):
if skip(self, "reinstate"):
return
self.reindexObject(idxs=["cancellation_state", ])
# activate all analyses in this AR.
analyses = self.getAnalyses(cancellation_state='cancelled')
for analysis in analyses:
doActionFor(analysis.getObject(), 'reinstate')
def workflow_script_cancel(self):
if skip(self, "cancel"):
return
self.reindexObject(idxs=["cancellation_state", ])
# deactivate all analyses in this AR.
analyses = self.getAnalyses(cancellation_state='active')
for analysis in analyses:
doActionFor(analysis.getObject(), 'cancel')
atapi.registerType(AnalysisRequest, PROJECTNAME)
|
{
"content_hash": "945f913b75d1ffdcdb62d305a010013b",
"timestamp": "",
"source": "github",
"line_count": 2588,
"max_line_length": 117,
"avg_line_length": 43.710200927357036,
"alnum_prop": 0.5189441487951062,
"repo_name": "hocinebendou/bika.gsoc",
"id": "abf44f044cdf32cefa2c2f53722c74ccc2695927",
"size": "113122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bika/lims/content/analysisrequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
}
|
import os
def getOutput(file):
lines = []
while 1:
line = file.readline()
if (contains(line, "Total tests run:")) :
lines.append(line)
if (contains(line, "[echo] * running test suite")) :
lines.append(line)
if not line:
break
return lines
def contains(string, toSearch):
return string.find(toSearch) >= 0
def isFailure(s) :
return not contains(s, "Failures: 0") or not contains(s, "Skips: 0")
def isDescr(s) :
return contains(s, "[echo] * running test suite")
def isException(s) :
return contains(s, "[testng] Caused by:")
def getFailures(lines) :
faliures=[]
for s in lines:
if (isFailure(s) and not isDescr(s)) or isException(s) :
faliures.append(s)
return faliures;
os.system("ant doTest > log.deleteMe")
file = open("log.deleteMe", "r")
lines = getOutput(file)
failures = getFailures(lines)
print "This test fails :"
for failure in failures :
index = lines.index(failure)
descIndex = index - 1;
desc = lines[descIndex]
failed = desc.split("test-build")[1];
result = failed.replace(" ...", "")[1:];
print result
print "Run this test alone to get more information about it"
file.close()
#os.remove("xxx")
|
{
"content_hash": "166f263b8de441f78386f5aade4c3d0d",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 72,
"avg_line_length": 21.64406779661017,
"alnum_prop": 0.6115896632732968,
"repo_name": "fregaham/KiWi",
"id": "c493e65ccbae1d080968070f780de3885d19a091",
"size": "1586",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "doTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "8898756"
},
{
"name": "JavaScript",
"bytes": "3758087"
},
{
"name": "Python",
"bytes": "1586"
},
{
"name": "Shell",
"bytes": "1368"
}
],
"symlink_target": ""
}
|
import numpy as np
def linear(r):
return -r
def thin_plate_spline(r):
if r == 0:
return 0.0
else:
return r**2*np.log(r)
def cubic(r):
return r**3
def quintic(r):
return -r**5
def multiquadric(r):
return -np.sqrt(r**2 + 1)
def inverse_multiquadric(r):
return 1/np.sqrt(r**2 + 1)
def inverse_quadratic(r):
return 1/(r**2 + 1)
def gaussian(r):
return np.exp(-r**2)
NAME_TO_FUNC = {
"linear": linear,
"thin_plate_spline": thin_plate_spline,
"cubic": cubic,
"quintic": quintic,
"multiquadric": multiquadric,
"inverse_multiquadric": inverse_multiquadric,
"inverse_quadratic": inverse_quadratic,
"gaussian": gaussian
}
def kernel_vector(x, y, kernel_func, out):
"""Evaluate RBFs, with centers at `y`, at the point `x`."""
for i in range(y.shape[0]):
out[i] = kernel_func(np.linalg.norm(x - y[i]))
def polynomial_vector(x, powers, out):
"""Evaluate monomials, with exponents from `powers`, at the point `x`."""
for i in range(powers.shape[0]):
out[i] = np.prod(x**powers[i])
def kernel_matrix(x, kernel_func, out):
"""Evaluate RBFs, with centers at `x`, at `x`."""
for i in range(x.shape[0]):
for j in range(i+1):
out[i, j] = kernel_func(np.linalg.norm(x[i] - x[j]))
out[j, i] = out[i, j]
def polynomial_matrix(x, powers, out):
"""Evaluate monomials, with exponents from `powers`, at `x`."""
for i in range(x.shape[0]):
for j in range(powers.shape[0]):
out[i, j] = np.prod(x[i]**powers[j])
# pythran export _kernel_matrix(float[:, :], str)
def _kernel_matrix(x, kernel):
"""Return RBFs, with centers at `x`, evaluated at `x`."""
out = np.empty((x.shape[0], x.shape[0]), dtype=float)
kernel_func = NAME_TO_FUNC[kernel]
kernel_matrix(x, kernel_func, out)
return out
# pythran export _polynomial_matrix(float[:, :], int[:, :])
def _polynomial_matrix(x, powers):
"""Return monomials, with exponents from `powers`, evaluated at `x`."""
out = np.empty((x.shape[0], powers.shape[0]), dtype=float)
polynomial_matrix(x, powers, out)
return out
# pythran export _build_system(float[:, :],
# float[:, :],
# float[:],
# str,
# float,
# int[:, :])
def _build_system(y, d, smoothing, kernel, epsilon, powers):
"""Build the system used to solve for the RBF interpolant coefficients.
Parameters
----------
y : (P, N) float ndarray
Data point coordinates.
d : (P, S) float ndarray
Data values at `y`.
smoothing : (P,) float ndarray
Smoothing parameter for each data point.
kernel : str
Name of the RBF.
epsilon : float
Shape parameter.
powers : (R, N) int ndarray
The exponents for each monomial in the polynomial.
Returns
-------
lhs : (P + R, P + R) float ndarray
Left-hand side matrix.
rhs : (P + R, S) float ndarray
Right-hand side matrix.
shift : (N,) float ndarray
Domain shift used to create the polynomial matrix.
scale : (N,) float ndarray
Domain scaling used to create the polynomial matrix.
"""
p = d.shape[0]
s = d.shape[1]
r = powers.shape[0]
kernel_func = NAME_TO_FUNC[kernel]
# Shift and scale the polynomial domain to be between -1 and 1
mins = np.min(y, axis=0)
maxs = np.max(y, axis=0)
shift = (maxs + mins)/2
scale = (maxs - mins)/2
# The scale may be zero if there is a single point or all the points have
# the same value for some dimension. Avoid division by zero by replacing
# zeros with ones.
scale[scale == 0.0] = 1.0
yeps = y*epsilon
yhat = (y - shift)/scale
# Transpose to make the array fortran contiguous. This is required for
# dgesv to not make a copy of lhs.
lhs = np.empty((p + r, p + r), dtype=float).T
kernel_matrix(yeps, kernel_func, lhs[:p, :p])
polynomial_matrix(yhat, powers, lhs[:p, p:])
lhs[p:, :p] = lhs[:p, p:].T
lhs[p:, p:] = 0.0
for i in range(p):
lhs[i, i] += smoothing[i]
# Transpose to make the array fortran contiguous.
rhs = np.empty((s, p + r), dtype=float).T
rhs[:p] = d
rhs[p:] = 0.0
return lhs, rhs, shift, scale
# pythran export _evaluate(float[:, :],
# float[:, :],
# str,
# float,
# int[:, :],
# float[:],
# float[:],
# float[:, :])
def _evaluate(x, y, kernel, epsilon, powers, shift, scale, coeffs):
"""Evaluate the RBF interpolant at `x`.
Parameters
----------
x : (Q, N) float ndarray
Interpolation point coordinates.
y : (P, N) float ndarray
Data point coordinates.
kernel : str
Name of the RBF.
epsilon : float
Shape parameter.
powers : (R, N) int ndarray
The exponents for each monomial in the polynomial.
shift : (N,) float ndarray
Shifts the polynomial domain for numerical stability.
scale : (N,) float ndarray
Scales the polynomial domain for numerical stability.
coeffs : (P + R, S) float ndarray
Coefficients for each RBF and monomial.
Returns
-------
(Q, S) float ndarray
"""
q = x.shape[0]
p = y.shape[0]
r = powers.shape[0]
s = coeffs.shape[1]
kernel_func = NAME_TO_FUNC[kernel]
yeps = y*epsilon
xeps = x*epsilon
xhat = (x - shift)/scale
out = np.zeros((q, s), dtype=float)
vec = np.empty((p + r,), dtype=float)
for i in range(q):
kernel_vector(xeps[i], yeps, kernel_func, vec[:p])
polynomial_vector(xhat[i], powers, vec[p:])
# Compute the dot product between coeffs and vec. Do not use np.dot
# because that introduces build complications with BLAS (see
# https://github.com/serge-sans-paille/pythran/issues/1346)
for j in range(s):
for k in range(p + r):
out[i, j] += coeffs[k, j]*vec[k]
return out
|
{
"content_hash": "91d34dcc2ec2c68d3dfcb6301675a465",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 77,
"avg_line_length": 28.61467889908257,
"alnum_prop": 0.5591535748637384,
"repo_name": "serge-sans-paille/pythran",
"id": "2c23a431fcc4f869f3a0cce148a894286ff5baa9",
"size": "6238",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythran/tests/scipy/_rbfinterp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2074873"
},
{
"name": "Cython",
"bytes": "1701"
},
{
"name": "Jupyter Notebook",
"bytes": "27461"
},
{
"name": "Makefile",
"bytes": "1162"
},
{
"name": "Python",
"bytes": "2025760"
}
],
"symlink_target": ""
}
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = ['davidharcombe@google.com (David Harcombe)']
from googleapiclient.discovery import Resource
from classes.sa360_report_validation.sa360_field_validator import SA360Validator
class BidStrategy(SA360Validator):
def __init__(self,
sa360_service: Resource = None,
agency: int = None,
advertiser: int = None) -> None:
super().__init__(sa360_service, agency, advertiser)
self.fields = [
"status",
"creationTimestamp",
"lastModifiedTimestamp",
"agency",
"agencyId",
"advertiser",
"advertiserId",
"bidStrategyId",
"bidStrategy",
"bidStrategyGoal",
"floodlightOptimizationEnabled",
"ersTarget",
"cpaTarget",
"roasTarget",
"lowPosition",
"highPosition",
"bidStrategyMinBid",
"bidStrategyMaxBid",
"monthlySpendTarget",
"floodlightActivityTargetIds",
"dfaActions",
"dfaRevenue",
"dfaTransactions",
"dfaWeightedActions",
"dfaActionsCrossEnv",
"dfaRevenueCrossEnv",
"dfaTransactionsCrossEnv",
"dfaWeightedActionsCrossEnv",
"avgCpc",
"avgCpm",
"avgPos",
"clicks",
"cost",
"ctr",
"impr",
"adWordsConversions",
"adWordsConversionValue",
"adWordsViewThroughConversions",
"visits",
"date",
"monthStart",
"monthEnd",
"quarterStart",
"quarterEnd",
"weekStart",
"weekEnd",
"yearStart",
"yearEnd",
]
|
{
"content_hash": "0998d030724fc4e01196474ce9d4973f",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 26.60759493670886,
"alnum_prop": 0.6379638439581351,
"repo_name": "google/report2bq",
"id": "f10532efa238483206508f4cdf866437694d3750",
"size": "2102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/classes/sa360_report_validation/bid_strategy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "689"
},
{
"name": "HTML",
"bytes": "13362"
},
{
"name": "JavaScript",
"bytes": "375"
},
{
"name": "Python",
"bytes": "435292"
},
{
"name": "Shell",
"bytes": "35343"
}
],
"symlink_target": ""
}
|
"""
Created on Tue Aug 9 00:24:17 2016
@author: ajaver
"""
import os
from functools import partial
import tables
def _isValidFlag(field, flag_value):
return (field._v_attrs['has_finished'] >= flag_value)
def _isValidProvenance(field, point_name):
return (point_name in field)
def _checkFlagsFun(fname, field_name, test_value, test_func=_isValidFlag, extra_files=[]):
accepted_errors = (tables.exceptions.HDF5ExtError,
tables.exceptions.NoSuchNodeError, KeyError,IOError)
try:
with tables.open_file(fname, mode='r') as fid:
field = fid.get_node(field_name)
has_finished = test_func(field, test_value)
#check that all the extra files do exist
has_finished = has_finished and all(os.path.exists(x) for x in extra_files)
return has_finished
except accepted_errors:
return False
class CheckFinished(object):
def __init__(self, checkpoints_args):
self.checkpoints = checkpoints_args
self._deprec_check_funcs = {}
#THIS CODE CHECK DEPRECATED OPTIONS. MAYBE STILL USEFUL FOR VERY OLD FILES BUT
#FOR THE MOMENT I LEAVE IT LIKE THIS.
# #I plan to check succesful processing using only provenance. I keep this for backwards compatibility.
# outf = lambda x : output_files[x][0]
# #has_finished flags were deprecated in favor of provenance tracking flags
# self._deprec_check_funcs = {
# 'COMPRESS': [partial(_checkFlagsFun, outf('COMPRESS'), '/mask', 1)],
# 'COMPRESS_ADD_DATA': [partial(_checkFlagsFun, outf('COMPRESS'), '/mask', 2)],
# 'TRAJ_CREATE': [partial(_checkFlagsFun, outf('TRAJ_CREATE'), '/plate_worms', 1)],
# 'TRAJ_JOIN': [partial(_checkFlagsFun, outf('TRAJ_JOIN'), '/plate_worms', 2)],
# 'SKE_CREATE': [partial(_checkFlagsFun, outf('SKE_CREATE'), '/skeleton', 1)],
# 'SKE_FILT': [partial(_checkFlagsFun, outf('SKE_FILT'), '/skeleton', 2)],
# #'SKE_ORIENT': [partial(_checkFlagsFun, outf('SKE_ORIENT'), '/skeleton', 3)],
# 'INT_PROFILE': [partial(_checkFlagsFun, outf('INT_PROFILE'), '/straighten_worm_intensity_median', 1)],
# #'INT_SKE_ORIENT': [partial(_checkFlagsFun, outf('INT_SKE_ORIENT'), '/skeleton', 4)],
# 'FEAT_CREATE': [partial(_checkFlagsFun, outf('FEAT_CREATE'), '/features_means', 1)],
# 'FEAT_MANUAL_CREATE': [partial(_checkFlagsFun, outf('FEAT_MANUAL_CREATE'), '/features_means', 1)],
# }
#deprecated due to the removal of trajectories.hdf5 from the pipeline
# traj_file = outf('TRAJ_CREATE').replace('_skeletons.hdf5', '_trajectories.hdf5')
# for point in ['TRAJ_CREATE', 'TRAJ_JOIN']:
# func = partial(_checkFlagsFun,
# traj_file,
# '/provenance_tracking',
# point,
# _isValidProvenance,
# extra_files)
# self._deprec_check_funcs[point].append(func)
def check_provenance(self, point):
output_files = self.checkpoints[point]['output_files']
extra_files = output_files[1:]
provenance_file = output_files[0]
return _checkFlagsFun(provenance_file, '/provenance_tracking', point, _isValidProvenance, extra_files)
def getUnfinishedPoints(self, checkpoints2process):
unfinished_points = checkpoints2process[:]
for point in checkpoints2process:
if self.get(point):
unfinished_points.pop(0)
else:
break
return unfinished_points
def get(self, point):
has_finished = self.check_provenance(point)
#we test flags for backwards compatibility
if not has_finished and point in self._deprec_check_funcs:
for func in self._deprec_check_funcs[point]:
has_finished = func()
if has_finished:
break
return has_finished
|
{
"content_hash": "78c4abe330f272e4b5fe8d76c5d85fcc",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 116,
"avg_line_length": 41.11,
"alnum_prop": 0.5923133057650207,
"repo_name": "ljschumacher/tierpsy-tracker",
"id": "e9cff4871923c644b7f7c9bfd75ad50215a7c166",
"size": "4135",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tierpsy/processing/CheckFinished.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5062"
},
{
"name": "C",
"bytes": "11990"
},
{
"name": "Makefile",
"bytes": "761"
},
{
"name": "Python",
"bytes": "845034"
},
{
"name": "Shell",
"bytes": "10294"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nomi', '0080_auto_20170622_2036'),
]
operations = [
migrations.AddField(
model_name='nomination',
name='group_status',
field=models.CharField(choices=[('normal', 'normal'), ('grouped', 'grouped')], default='normal', max_length=50),
),
]
|
{
"content_hash": "694bb81257efc648399518407b2dced5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 124,
"avg_line_length": 25.38888888888889,
"alnum_prop": 0.5973741794310722,
"repo_name": "aniketp41/Gymkhana-Nominations",
"id": "8fe00b1aa71b263fb88a0e8119f670544ed56607",
"size": "530",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nomi/migrations/0081_nomination_group_status.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13871"
},
{
"name": "HTML",
"bytes": "187973"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "249674"
}
],
"symlink_target": ""
}
|
import telebot
import toml
import os
# get conf file:
telebot_config = os.getenv("TELEBOT_CONFIG")
if telebot_config:
config = toml.loads(telebot_config)
else:
telebot_conf_file = os.getenv("TELEBOT_CONFIG_FILE")
if not telebot_conf_file:
telebot_conf_file = "~/.telebot"
telebot_conf_file = os.path.expanduser(telebot_conf_file)
with open(telebot_conf_file) as conf:
config = toml.loads(conf.read())
# DEBUG mode enabler
if 'logging' in config:
log_level = config['logging']['debuglevel']
levels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']
if log_level in levels:
telebot.logger.setLevel(log_level)
else:
telebot.logger.setLevel('NOTSET')
# init telebot
tb = telebot.TeleBot(config['security']['token'])
class TelebotObject():
"""global object"""
def __init__(self, config):
super(TelebotObject, self).__init__()
if "IsAuthorised" in config['security']:
self.IsAuthorised = config['security']['IsAuthorised']
if type(self.IsAuthorised) != bool:
self.panic()
else:
self.IsAuthorised = False
if "secretQuestion" in config['security']:
self.secretQuestion = config['security']['secretQuestion']
else:
self.secretQuestion = 'pass'
if "insecFunc" in config['security']:
self.InsecureFuncs = config['security']["insecFunc"]
else:
self.InsecureFuncs = ['sudo']
if "whitelisted_users_id" in config['users']:
self.whitelisted_users_id = config['users']["whitelisted_users_id"]
# default working tree
if "workDir" in config['properties']:
self.workDir = config['properties']['workDir']
else:
self.workDir = os.path.expanduser("~/repos/")
def panic(self):
print("errors in configs")
# init global object
print(config)
tb_mem = TelebotObject(config)
#
|
{
"content_hash": "8e6f5be478042875a40a862de4bfb2eb",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 26.89189189189189,
"alnum_prop": 0.6105527638190955,
"repo_name": "BorysDrozhak/ops-bot",
"id": "0fe13ea280b8692bc5b492543c49e665f77fe231",
"size": "1990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "763"
},
{
"name": "Python",
"bytes": "9481"
}
],
"symlink_target": ""
}
|
from element import Elem
from item import Item #@UnusedImport, suppress Eclipse warning, used dynamically in convert_to_item() eval
class OrganizemItemDataConversionException(Exception): pass
class BaseItemConverter(object):
@staticmethod
def convert_to_item(item):
pass
class YamlItemConverter(BaseItemConverter):
@staticmethod
def convert_to_item(py_item):
"""Converts Item serialized to Python object form, dicts and lists, to YAML"""
# The list of names of elements an Item must have for this version
elem_names = Elem.get_optional_data_elems()
# List of names of elements in the py_item
py_elem_names = YamlItemConverter._get_py_item_elems(py_item)
# Item must have title element, so check for that first
title = YamlItemConverter._get_py_item_title(py_item, py_elem_names)
# Handling dynamic list of kwargs to __init__(), so build string
# dynamically and make __init__() call an eval()
init_call = []
init_call.append("Item('%s', {" % title)
# eval(x) where x is a multiline string literal fails on
# exception from scanning literal and finding an EOL in it
# So, store the multiline string in this local List. Put the
# note_vals[idx] into the string to be evaled.
# And, yes, this is a pretty sweet hack
note_vals = []
# Algo:
# - Iterate the list of expected elements, item_elems
# - Test for matching elem in py_item passed in (which was loaded from data)
# - If found, add to kwargs list with py_item value for Item.__init__()
# - If not found, add to kwargs list with None value for Item.__init__()
for elem_name in elem_names:
if elem_name in py_elem_names:
idx = py_elem_names.index(elem_name)
py_elems = py_item[Elem.ROOT]
py_elem_val = py_elems[idx][elem_name]
py_elem_val = Elem.elem_init(elem_name, py_elem_val).escaped_str()
if py_elem_val:
# Handle special case of multiline string value for Note elem
# See comment above where note_vals[] is declared
if Elem.get_elem_type(elem_name) == Elem.MULTILINE_TEXT_TYPE:
note_vals.append(py_elem_val)
val_idx = len(note_vals) - 1
init_call.append("'%s' : note_vals[%i], " % (elem_name, val_idx))
else:
init_call.append("'%s' : %s, " % (elem_name, py_elem_val))
else:
init_call.append("'%s' : None, " % elem_name)
else:
init_call.append("'%s' : None, " % elem_name)
init_call.append('})')
init_call = ''.join(init_call)
item = eval(init_call)
return item
@staticmethod
def _get_py_item_elems(py_item):
py_elems = py_item[Elem.ROOT]
num_elems = len(py_elems)
return [py_elems[j].keys()[0] for j in range(0, num_elems)]
@staticmethod
def _get_py_item_title(py_item, py_elem_names):
# Elements in the py_item
py_elems = py_item[Elem.ROOT]
if Elem.TITLE not in py_elem_names:
raise OrganizemItemDataConversionException("Attempted to load Item from data file without required 'title' element")
idx = py_elem_names.index(Elem.TITLE)
title = py_elems[idx][Elem.TITLE]
if not title:
raise OrganizemItemDataConversionException("Attempted to load Item from data file without value for required 'title' element")
return title
|
{
"content_hash": "f99c9bcce531678eb178a9e8dbed4517",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 138,
"avg_line_length": 45.25301204819277,
"alnum_prop": 0.582800851970181,
"repo_name": "marksweiss/organize-m",
"id": "f2cb7058f9e54ed26780497f6b6db7250d1c6c52",
"size": "3756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/item_converter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "130219"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="autocolorscale", parent_name="sunburst.marker", **kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "e17d6d00e9700ca5a2d384c0b12623a6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.6073394495412844,
"repo_name": "plotly/python-api",
"id": "297a2cf0cf3ef2a5d66912e8ecfd035a077d1885",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sunburst/marker/_autocolorscale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""
Utilities to compile denominator section and related helper functions.
"""
from typing import Any
from absl import app
from absl import flags
from common_util import *
FLAGS = flags.FLAGS
flags.DEFINE_string(
'denominator_config', None,
'Path of json file SHORT config for generating denominator section')
flags.DEFINE_string(
'denominator_long_config', None,
'Path of json file LONG config for generating denominator section')
flags.DEFINE_string('column_list_path', None,
'Path of json file containing list of all columns')
flags.DEFINE_boolean('get_ignore_columns', False,
'Path of json file containing list of all columns')
def find_columns_with_token(column_list: list,
token: str,
delimiter: str = '!!') -> list:
"""Filters out columns that contain the given token
Args:
column_list: List of column names to be searched.
token: Token to be searched within the column name.
delimiter: delimiter seperating tokens within single column name string.
Returns:
List of column names that contain the input token.
"""
ret_list = []
for cur_column in column_list:
if token_in_list_ignore_case(token, cur_column.split(delimiter)):
ret_list.append(cur_column)
return list(set((ret_list)))
def replace_token_in_column(cur_column: str,
old_token: str,
new_token: str,
delimiter: str = '!!') -> str:
return delimiter.join([
new_token if x == old_token else x for x in cur_column.split(delimiter)
])
def replace_first_token_in_column(cur_column: str,
old_token: str,
new_token: str,
delimiter: str = '!!') -> str:
new_list = []
temp_flag = True
for x in cur_column.split(delimiter):
if x == old_token and temp_flag:
new_list.append(new_token)
temp_flag = False
else:
new_list.append(x)
return delimiter.join(new_list)
# replace token
def replace_token_in_column_list(column_list: list,
old_token: str,
new_token: str,
delimiter: str = '!!') -> list:
ret_list = []
for cur_column in column_list:
ret_list.append(
replace_token_in_column(cur_column, old_token, new_token,
delimiter))
return ret_list
# combined replace list
def replace_token_list_in_column_list(column_list: list,
old_token: str,
new_token_list: list,
delimiter: str = '!!') -> dict:
"""Replaces a token with an element from list of new tokens individually.
Outputs a list of new column names with new token for each column name.
Args:
column_list: List of column names to replace tokens in.
old_token: Token to be replaced.
new_token_list: List of new tokens to be placed. Each token will have a output column name.
delimiter: delimiter seperating tokens within single column name string.
Returns:
Dict object with original column name as key and list of replaced column names as value.
"""
ret_dict = {}
for cur_column in column_list:
ret_dict[cur_column] = []
for new_token in new_token_list:
ret_dict[cur_column].append(
replace_token_in_column(cur_column, old_token, new_token,
delimiter))
return ret_dict
# find columns sub token
def find_columns_with_token_partial_match(column_list: list,
token_str: str,
delimiter: str = '!!') -> list:
ret_list = []
for cur_column in column_list:
for token in cur_column.split(delimiter):
if token_str.lower() in token.lower():
ret_list.append(cur_column)
return list(set((ret_list)))
def get_columns_by_token_count(column_list: list,
delimiter: str = '!!') -> dict:
ret_dict = {}
for cur_column in column_list:
token_list = cur_column.split(delimiter)
if len(token_list) not in ret_dict:
ret_dict[len(token_list)] = []
ret_dict[len(token_list)].append(cur_column)
return ret_dict
def get_columns_with_same_prefix(columns_by_length: dict,
max_extra_token: int = 1,
delimiter: str = '!!') -> dict:
ret_dict = {}
for column_length in columns_by_length:
for cur_column in columns_by_length[column_length]:
extra_length = 1
while extra_length <= max_extra_token:
if (column_length + extra_length) in columns_by_length:
for comapre_column in columns_by_length[column_length +
extra_length]:
if comapre_column.startswith(cur_column):
if cur_column not in ret_dict:
ret_dict[cur_column] = []
ret_dict[cur_column].append(comapre_column)
extra_length += 1
return ret_dict
def guess_total_columns_from_zip_file(zip_path: str, ignore_token_list: list,
year_list: list) -> dict:
"""Find columns that might be totals based on their value(> 100).
Args:
zip_path: Path of the zip file with data.
ignore_token_list: Tokens that indicate column needs to be ignored.
year_list: List of years for which processing needs to be done.
Returns:
Dict with year as key value and list of column names that could be totals as value.
"""
zip_path = os.path.expanduser(zip_path)
ret_dict = {}
with zipfile.ZipFile(zip_path) as zf:
for filename in zf.namelist():
if '_data_' in filename:
# find year
year = filename[:filename.index('.')]
year = year[-4:]
if year in year_list:
with zf.open(filename, 'r') as data_f:
csv_reader = csv.reader(
io.TextIOWrapper(data_f, 'utf-8'))
ret_dict[year] = total_columns_from_csvreader(
csv_reader, ignore_token_list)
return ret_dict
def total_columns_from_csvreader(csv_reader: Any,
ignore_token_list: list) -> list:
total_columns = []
for row in csv_reader:
if csv_reader.line_num == 2:
column_name_list = row.copy()
elif csv_reader.line_num == 3:
for i, val in enumerate(row):
try:
ignore_cell = False
for tok in ignore_token_list:
if tok.lower() in column_name_list[i].lower():
ignore_cell = True
if 'Margin of Error' in column_name_list[i]:
ignore_cell = True
if not ignore_cell:
if float(val) > 100:
if column_name_list[i] not in total_columns:
total_columns.append(column_name_list[i])
except ValueError:
pass
return total_columns
def yearwise_columns_from_zip_file(zip_path: str,
spec_dict: dict,
year_list: list,
delimiter: str = '!!') -> dict:
zip_path = os.path.expanduser(zip_path)
ret_dict = {}
with zipfile.ZipFile(zip_path) as zf:
for filename in zf.namelist():
if '_data_' in filename:
# find year
year = filename[:filename.index('.')]
year = year[-4:]
if year in year_list:
with zf.open(filename, 'r') as data_f:
# TODO sort by variable ID to preserve sequence of appearance.
# Some older year files have lasst few columns in unsorted order,
# leading to wrong prefix association.
csv_reader = csv.reader(
io.TextIOWrapper(data_f, 'utf-8'))
temp_list = columns_from_CSVreader(csv_reader, False)
ret_dict[year] = []
for cur_column in temp_list:
if not column_to_be_ignored(cur_column, spec_dict,
delimiter):
ret_dict[year].append(cur_column)
return ret_dict
def column_find_prefixed(column_name: str, prefix_list: list) -> str:
"""Filter out columns that are begin with one of the strings from a given prefix list.
NOTE: Longest prefix would be used in case multiple matches occour.
Args:
column_name: Name of the column to be checked.
prefix_list: List of possible prefix.
Returns:
The longest matching prefix or None if no match is found.
"""
matched_prefix = None
if column_name not in prefix_list:
for cur_prefix in prefix_list:
if len(cur_prefix) < len(column_name) and cur_prefix in column_name:
if matched_prefix:
if len(cur_prefix) > len(matched_prefix):
matched_prefix = cur_prefix
else:
matched_prefix = cur_prefix
return matched_prefix
def get_census_column_token_index(census_columns: list,
year_list: list,
yearwise_columns: dict,
delimiter: str = '!!') -> dict:
"""Finds the index of token representing the census UI column name.
This index is used for adding MOE substring when necessary.
Args:
census_columns: List of column names in census UI.
year_list: List of years for which processing is rerquired.
yearwise_columns: Dict with list of columns names by year.
delimiter: delimiter seperating tokens within single column name string.
Returns:
Dict with year as key value and the index of census UI column name token as value.
"""
index_dict = {}
ret_dict = {}
for year in yearwise_columns:
if year in year_list:
index_dict[year] = {}
for census_col in census_columns:
if year in year_list:
index_dict[year][census_col] = {}
index_dict[year][census_col]['index'] = []
for year in yearwise_columns:
if year in year_list:
# compile list of column token index, traversing each row
for census_cell in yearwise_columns[year]:
token_list = census_cell.split(delimiter)
column_found = False
for census_col in census_columns:
if census_col in token_list: # or census_col+' MOE' in token_list:
column_found = True
# find the token index of column name for each year
col_i = token_list.index(census_col)
if col_i not in index_dict[year][census_col]['index']:
index_dict[year][census_col]['index'].append(col_i)
# MOE column names
if census_col + ' MOE' in token_list: # or census_col+' MOE' in token_list:
column_found = True
# find the token index of column name for each year
col_i = token_list.index(census_col + ' MOE')
if col_i not in index_dict[year][census_col]['index']:
index_dict[year][census_col]['index'].append(col_i)
if not column_found:
print('Warning: No column found for', census_cell)
# find the census column token index for the year
year_col_i = -1
for census_col in census_columns:
# keep the lowest of the found indices, if multiple found
index_dict[year][census_col]['index'] = sorted(
index_dict[year][census_col]['index'])
if year_col_i == -1:
year_col_i = index_dict[year][census_col]['index'][0]
# check if it is consistent across columns
if year_col_i != index_dict[year][census_col]['index'][0]:
print(
'Warning: found potential conflicts for column token index for year',
year)
ret_dict[year] = year_col_i
## For debug
# print(json.dumps(index_dict, indent=2))
return ret_dict
def get_census_rows_by_column(census_columns: list,
year_list: list,
yearwise_columns: dict,
index_dict: dict,
delimiter: str = '!!') -> dict:
"""Organises column names so that census UI rows are associated with their UI columns.
Args:
census_columns: List of column names in census UI.
year_list: List of years for which processing is rerquired.
yearwise_columns: Dict with list of columns names by year.
index_dict: Dict with census UI column token index for each year.
delimiter: delimiter seperating tokens within single column name string.
Returns:
Dict with year, census column name as key value and the index of census UI column name token as value.
"""
# store the rows according to their columns
ret_dict = {}
for year in yearwise_columns:
if year in year_list:
ret_dict[year] = {}
for census_col in census_columns:
ret_dict[year][census_col] = []
for census_cell in yearwise_columns[year]:
token_list = census_cell.split(delimiter)
for census_col in census_columns:
if token_list[index_dict[year]] == census_col or token_list[
index_dict[year]] == census_col + ' MOE':
if census_cell not in ret_dict[year][census_col]:
ret_dict[year][census_col].append(census_cell)
return ret_dict
def get_census_rows_by_column_by_type(rows_by_column: dict,
delimiter: str = '!!') -> dict:
# store the rows according to their columns and type
ret_dict = {}
for year in rows_by_column:
ret_dict[year] = {}
for census_col in rows_by_column[year]:
ret_dict[year][census_col] = {'moe_cols': [], 'estimate_cols': []}
for census_cell in rows_by_column[year][census_col]:
token_list = census_cell.split(delimiter)
if 'Margin of Error' in token_list:
ret_dict[year][census_col]['moe_cols'].append(census_cell)
else:
ret_dict[year][census_col]['estimate_cols'].append(
census_cell)
return ret_dict
def get_column_total_status(totals_by_column: dict,
rows_by_column_type: dict) -> dict:
ret_dict = {}
for year in totals_by_column:
for census_col in totals_by_column[year]:
if census_col not in ret_dict:
ret_dict[census_col] = {}
ret_dict[census_col][year] = {
'only_percentage': False,
'only_total': False,
}
# only percentages
if len(totals_by_column[year][census_col]) == 0:
ret_dict[census_col][year]['only_percentage'] = True
# only totals
if len(totals_by_column[year][census_col]) == len(
rows_by_column_type[year][census_col]['estimate_cols']):
ret_dict[census_col][year]['only_total'] = True
return ret_dict
def get_denominator_method_config(
totals_status: dict, totals_by_column: dict,
total_not_prefix: list = ()) -> dict:
ret_dict = {}
ret_dict['denominator_method'] = ''
total_columns = []
percent_columns = []
for census_col in totals_status:
col_is_total = 1
col_is_percent = 1
for year in totals_status[census_col]:
if col_is_total == 1:
col_is_total = totals_status[census_col][year]['only_total']
if col_is_total != totals_status[census_col][year]['only_total']:
col_is_total = 2
if col_is_percent == 1:
col_is_percent = totals_status[census_col][year][
'only_percentage']
if col_is_percent != totals_status[census_col][year][
'only_percentage']:
col_is_percent = 2
if col_is_percent == 2:
ret_dict['denominator_method'] = 'year_mismatch'
elif col_is_percent:
percent_columns.append(census_col)
if col_is_total == 2:
ret_dict['denominator_method'] = 'year_mismatch'
elif col_is_total:
total_columns.append(census_col)
if len(percent_columns) > 0 and len(total_columns) > 0:
ret_dict['denominator_method'] = 'token_replace'
ret_dict['token_map'] = {}
for tok in percent_columns:
ret_dict['token_map'][tok] = total_columns[0]
if len(total_columns) > 1:
print(
'Warning: The config might need fixing of token_map section because multiple total columns were found'
)
# prefix method
else:
ret_dict['denominator_method'] = 'prefix'
temp_dict = {'col': '', 'len': 0}
len_dict = {}
# check if length of estimates is the same for all columns
for year in totals_by_column:
for census_col in totals_by_column[year]:
# sanity checks
if census_col not in len_dict:
len_dict[census_col] = len(
totals_by_column[year][census_col])
elif len_dict[census_col] != len(
totals_by_column[year][census_col]):
print(
'Warning: number of totals for', census_col,
'changes across years, modify the long config if needed'
)
# find longest list of totals to use, ideally should be same for all columns
if len(totals_by_column[year][census_col]) > temp_dict['len']:
temp_dict['col'] = census_col
temp_dict['len'] = len(totals_by_column[year][census_col])
temp_dict['rows'] = totals_by_column[year][census_col]
ret_dict['reference_column'] = temp_dict['col']
ret_dict['totals'] = {}
for year in totals_by_column:
ret_dict['totals'][year] = totals_by_column[year][temp_dict['col']]
# TODO discard column present total_not_prefix(should be a new section in the initial config)
# useful when there are multiple totals followed by percentage using the 1st total
return ret_dict
def rename_col(row_name: str, new_col: str, col_i: int, delimiter: str = '!!'):
temp_list = row_name.split(delimiter)
temp_list[col_i] = new_col
temp_str = delimiter.join(temp_list)
return temp_str
def col_add_moe(row_name: str, col_i: int, delimiter: str = '!!'):
temp_list = row_name.split(delimiter)
temp_list[col_i] = temp_list[col_i] + ' MOE'
temp_str = delimiter.join(temp_list)
return temp_str
# create config
def create_long_config(basic_config_path: str, delimiter: str = '!!') -> None:
basic_config_path = os.path.expanduser(basic_config_path)
config_dict = json.load(open(basic_config_path))
spec_dict = get_spec_dict_from_path(config_dict['spec_path'])
us_data_zip = os.path.expanduser(config_dict['us_data_zip'])
year_list = config_dict['year_list']
yearwise_columns = yearwise_columns_from_zip_file(us_data_zip, spec_dict,
year_list, delimiter)
census_columns = config_dict['census_columns']
used_columns = config_dict['used_columns']
ignore_tokens = config_dict['ignore_tokens']
for year in yearwise_columns:
# remove median, mean
temp_list = []
for column_name in yearwise_columns[year]:
tok_found = False
for tok in ignore_tokens:
if tok in column_name:
tok_found = True
if not tok_found:
temp_list.append(column_name)
yearwise_columns[year] = temp_list
yearwise_column_ind = get_census_column_token_index(census_columns,
year_list,
yearwise_columns,
delimiter)
# yearwise col_token index store in config
config_dict['column_tok_index'] = yearwise_column_ind
# find set all rows of each column yearwise
yearwise_rows_by_column = get_census_rows_by_column(census_columns,
year_list,
yearwise_columns,
yearwise_column_ind,
delimiter)
yearwise_rows_by_column_type = get_census_rows_by_column_by_type(
yearwise_rows_by_column)
# find possible totals
yearwise_total_columns = guess_total_columns_from_zip_file(
us_data_zip, ignore_tokens, year_list)
for year in yearwise_total_columns:
# remove ignoreColumns
yearwise_total_columns[year] = remove_columns_to_be_ignored(
yearwise_total_columns[year], spec_dict)
# group by column name
yearwise_totals_by_column = get_census_rows_by_column(
used_columns, year_list, yearwise_total_columns, yearwise_column_ind,
delimiter)
yearwise_totals_status = get_column_total_status(
yearwise_totals_by_column, yearwise_rows_by_column_type)
temp_config = get_denominator_method_config(yearwise_totals_status,
yearwise_totals_by_column)
config_dict.update(temp_config)
new_config_path = basic_config_path.replace('.json', '_long.json')
json.dump(config_dict, open(new_config_path, 'w'), indent=2)
# store yearwise_rows_by_column_type
columns_path = new_config_path.replace('.json', '_columns.json')
json.dump(yearwise_rows_by_column_type, open(columns_path, 'w'), indent=2)
print(json.dumps(config_dict, indent=2))
# TODO accept the path of _columns.json
def create_denominators_section(long_config_path: str,
delimiter: str = '!!') -> None:
long_config_path = os.path.expanduser(long_config_path)
config_dict = json.load(open(long_config_path))
rows_by_column_type = json.load(
open(long_config_path.replace('.json', '_columns.json')))
denominators = {}
no_prefix = []
if config_dict['denominator_method'] == 'token_replace':
for new_col in config_dict['token_map']:
total_col = config_dict['token_map'][new_col]
for year in rows_by_column_type:
col_i = config_dict['column_tok_index'][year]
for new_total in rows_by_column_type[year][total_col][
'estimate_cols']:
if new_total not in denominators:
denominators[new_total] = []
# replace new_col in new_total
temp_str = rename_col(new_total, new_col, col_i, delimiter)
# check and add
if temp_str in rows_by_column_type[year][new_col][
'estimate_cols']:
if temp_str not in denominators[new_total]:
denominators[new_total].append(temp_str)
else:
print('Warning: column expected but not found\n',
temp_str)
# replace new_col and Margin of Error in new_total
temp_str2 = replace_token_in_column(temp_str, 'Estimate',
'Margin of Error',
delimiter)
# check and add
moe_found = False
if temp_str2 in rows_by_column_type[year][new_col][
'moe_cols']:
if temp_str2 not in denominators[new_total]:
denominators[new_total].append(temp_str2)
moe_found = True
# replace new_col+ MOE and Margin of Error in new_total
temp_str3 = rename_col(new_total, new_col + ' MOE', col_i,
delimiter)
temp_str3 = replace_token_in_column(temp_str3, 'Estimate',
'Margin of Error',
delimiter)
# check and add
if temp_str3 in rows_by_column_type[year][new_col][
'moe_cols']:
if temp_str3 not in denominators[new_total]:
denominators[new_total].append(temp_str3)
moe_found = True
if not moe_found:
print('Warning: column expected but not found\n',
temp_str2, '\nor\n', temp_str3)
if config_dict['denominator_method'] == 'prefix':
yearwise_totals_col = config_dict['totals']
used_col = config_dict['used_columns']
# create extended totals list
total_prefixes = []
for year in yearwise_totals_col:
col_i = config_dict['column_tok_index'][year]
for new_total in yearwise_totals_col[year]:
if new_total not in total_prefixes:
total_prefixes.append(new_total)
for new_col in used_col:
temp_str = rename_col(new_total, new_col, col_i, delimiter)
if temp_str not in total_prefixes:
total_prefixes.append(temp_str)
# read columns by year
for year in rows_by_column_type:
print('year', year)
col_i = config_dict['column_tok_index'][year]
for census_col in rows_by_column_type[year]:
for cur_i, new_row in enumerate(
rows_by_column_type[year][census_col]['estimate_cols']):
cur_prefix = column_find_prefixed(new_row, total_prefixes)
if cur_prefix:
if cur_prefix not in denominators:
denominators[cur_prefix] = []
if new_row not in denominators[cur_prefix]:
denominators[cur_prefix].append(new_row)
temp_str2 = replace_token_in_column(
new_row, 'Estimate', 'Margin of Error', delimiter)
temp_str3 = col_add_moe(temp_str2, col_i, delimiter)
moe_found = False
if temp_str2 in rows_by_column_type[year][census_col][
'moe_cols']:
if temp_str2 not in denominators[cur_prefix]:
denominators[cur_prefix].append(temp_str2)
moe_found = True
if temp_str3 in rows_by_column_type[year][census_col][
'moe_cols']:
if temp_str3 not in denominators[cur_prefix]:
denominators[cur_prefix].append(temp_str3)
moe_found = True
if not moe_found:
print('Warning: column expected but not found\n',
temp_str2, '\nor\n', temp_str3)
# warn if no prefix found
elif new_row not in total_prefixes:
# print('Warning:', new_row, 'has no prefix and is not a total')
print(new_row)
new_total_i = -1
for total_i in range(cur_i):
if rows_by_column_type[year][census_col][
'estimate_cols'][total_i] in total_prefixes:
new_total_i = total_i
new_total = rows_by_column_type[year][census_col][
'estimate_cols'][new_total_i]
if new_total not in denominators:
denominators[new_total] = []
if new_row not in denominators[new_total]:
denominators[new_total].append(new_row)
temp_str2 = replace_token_in_column(
new_row, 'Estimate', 'Margin of Error', delimiter)
temp_str3 = col_add_moe(temp_str2, col_i, delimiter)
moe_found = False
if temp_str2 in rows_by_column_type[year][census_col][
'moe_cols']:
if temp_str2 not in denominators[new_total]:
denominators[new_total].append(temp_str2)
moe_found = True
if temp_str3 in rows_by_column_type[year][census_col][
'moe_cols']:
if temp_str3 not in denominators[new_total]:
denominators[new_total].append(temp_str3)
moe_found = True
if not moe_found:
print('Warning: column expected but not found\n',
temp_str2, '\nor\n', temp_str3)
print(new_total, '\n')
no_prefix.append({new_row: new_total})
# print(json.dumps(denominators, indent=2))
output_path = os.path.dirname(long_config_path)
if no_prefix:
json.dump(no_prefix,
open(os.path.join(output_path, 'rows_without_prefix.json'),
'w'),
indent=2)
json.dump(denominators,
open(os.path.join(output_path, 'denominators.json'), 'w'),
indent=2)
if config_dict['update_spec']:
spec_dict = get_spec_dict_from_path(config_dict['spec_path'])
spec_dict['denominators'] = denominators
json.dump(spec_dict, open(config_dict['spec_path'], 'w'), indent=2)
def get_columns_stat_moe(column_list: list, delimiter: str = '!!') -> list:
ret_list = []
stat_tokens = ['Mean', 'Median', 'Average']
moe_columns = find_columns_with_token(column_list, 'Margin of Error')
for cur_token in stat_tokens:
ret_list.extend(
find_columns_with_token_partial_match(moe_columns, cur_token,
delimiter))
return list(set(ret_list))
def main(argv):
if FLAGS.denominator_config:
create_long_config(FLAGS.denominator_config)
if FLAGS.denominator_long_config:
create_denominators_section(FLAGS.denominator_long_config)
if FLAGS.get_ignore_columns:
if not FLAGS.column_list_path:
print('List of columns required to get ignore columns')
else:
columns_path = os.path.expanduser(FLAGS.column_list_path)
column_list = json.load(open(columns_path))
ignore_columns = get_columns_stat_moe(column_list, FLAGS.delimiter)
print(json.dumps(ignore_columns, indent=2))
if __name__ == '__main__':
app.run(main)
|
{
"content_hash": "e96b08aea10d8abb1a895d6378cb323e",
"timestamp": "",
"source": "github",
"line_count": 758,
"max_line_length": 118,
"avg_line_length": 43.199208443271765,
"alnum_prop": 0.5211482669109788,
"repo_name": "datacommonsorg/data",
"id": "1cfeb020f242e7a840049859cd13308a3cfcb1be",
"size": "33320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/us_census/acs5yr/subject_tables/common/helper_functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78"
},
{
"name": "Go",
"bytes": "51446"
},
{
"name": "HTML",
"bytes": "32842253"
},
{
"name": "JavaScript",
"bytes": "458"
},
{
"name": "Jupyter Notebook",
"bytes": "5088443"
},
{
"name": "Python",
"bytes": "3723204"
},
{
"name": "R",
"bytes": "28607"
},
{
"name": "Shell",
"bytes": "25468"
},
{
"name": "TypeScript",
"bytes": "13472"
}
],
"symlink_target": ""
}
|
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Credit-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
{
"content_hash": "cac1246f1d5a29fc837f097bb7cf3577",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.7085889570552147,
"repo_name": "credit-project/Credit",
"id": "518d66802a8b272bdfd74e4220ac1a8c6ea07fa5",
"size": "893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103297"
},
{
"name": "C++",
"bytes": "2515466"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "14688"
},
{
"name": "Objective-C",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "37256"
},
{
"name": "Shell",
"bytes": "2527"
},
{
"name": "TypeScript",
"bytes": "5228186"
}
],
"symlink_target": ""
}
|
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class LiveBlockMigrationNegativeTestJSON(base.BaseV2ComputeAdminTest):
_host_key = 'OS-EXT-SRV-ATTR:host'
@classmethod
def skip_checks(cls):
super(LiveBlockMigrationNegativeTestJSON, cls).skip_checks()
if not CONF.compute_feature_enabled.live_migration:
raise cls.skipException("Live migration is not enabled")
@classmethod
def setup_clients(cls):
super(LiveBlockMigrationNegativeTestJSON, cls).setup_clients()
cls.admin_hosts_client = cls.os_adm.hosts_client
cls.admin_servers_client = cls.os_adm.servers_client
def _migrate_server_to(self, server_id, dest_host):
body = self.admin_servers_client.live_migrate_server(
server_id, dest_host,
CONF.compute_feature_enabled.
block_migration_for_live_migration)
return body
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('7fb7856e-ae92-44c9-861a-af62d7830bcb')
def test_invalid_host_for_migration(self):
# Migrating to an invalid host should not change the status
target_host = data_utils.rand_name('host')
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
self.assertRaises(lib_exc.BadRequest, self._migrate_server_to,
server_id, target_host)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
|
{
"content_hash": "4617acbabfb43824ea8ea3006797d5c3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 37.627906976744185,
"alnum_prop": 0.6854140914709518,
"repo_name": "fengbeihong/tempest_automate_ironic",
"id": "b59e3343b0977ce0637a099b04aa64f6ae69e6de",
"size": "2254",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/api/compute/test_live_block_migration_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2855430"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
from bibliopixel.animation.strip import Strip
import math
# This one is best run in the region of 50 frames a second
class HalvesRainbow(Strip):
def __init__(self, layout, max_led=-1, centre_out=True, rainbow_inc=4,
**kwds):
super().__init__(layout, 0, -1, **kwds)
self._minLed = 0
self._maxLed = max_led
if self._maxLed < 0 or self._maxLed < self._minLed:
self._maxLed = self.layout.numLEDs - 1
self._positive = True
self._step = 0
self._centerOut = centre_out
self._rainbowInc = rainbow_inc
def pre_run(self):
self._current = 0
self._step = 0
def step(self, amt=1):
center = float(self._maxLed) / 2
center_floor = math.floor(center)
center_ceil = math.ceil(center)
if self._centerOut:
self.layout.fill(
self.palette(self._step), int(center_floor - self._current), int(center_floor - self._current))
self.layout.fill(
self.palette(self._step), int(center_ceil + self._current), int(center_ceil + self._current))
else:
self.layout.fill(
self.palette(self._step), int(self._current), int(self._current))
self.layout.fill(
self.palette(self._step), int(self._maxLed - self._current), int(self._maxLed - self._current))
self._step += amt + self._rainbowInc
if self._current == center_floor:
self._current = self._minLed
else:
self._current += amt
|
{
"content_hash": "f207aa17773ee669f3a629c7c0133d0c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 111,
"avg_line_length": 34.21739130434783,
"alnum_prop": 0.5622617534942821,
"repo_name": "rec/BiblioPixelAnimations",
"id": "137c55c2baa9fdbb046f98e2d663b4ae32627921",
"size": "1574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "BiblioPixelAnimations/strip/HalvesRainbow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176882"
}
],
"symlink_target": ""
}
|
"""
Bundle to check the loading order when instantiating with iPOPO
:author: Thomas Calmant
"""
# Pelix
from pelix.constants import BundleActivator
from pelix.framework import BundleContext, BundleEvent
# iPOPO
from pelix.ipopo.decorators import ComponentFactory, Validate, Invalidate, \
Instantiate
from pelix.ipopo.constants import IPopoEvent
# ------------------------------------------------------------------------------
__version__ = (1, 0, 0)
BASIC_INSTANCE = "boot-component"
MAP_SPEC_TEST = "map.spec.test"
FACTORY_MAP = "ipopo.tests.map"
# ------------------------------------------------------------------------------
STATES = []
@ComponentFactory("boot-factory")
@Instantiate(BASIC_INSTANCE)
class BasicComponent(object):
"""
Dummy instantiated component
"""
def __init__(self):
"""
Constructor
"""
STATES.append(IPopoEvent.INSTANTIATED)
@Validate
def validate(self, context):
"""
Validation
"""
STATES.append(IPopoEvent.VALIDATED)
@Invalidate
def invalidate(self, context):
"""
Invalidation
"""
STATES.append(IPopoEvent.INVALIDATED)
@BundleActivator
class ActivatorTest:
"""
Test activator
"""
def start(self, context):
"""
Bundle started
"""
STATES.append(BundleEvent.STARTED)
def stop(self, context):
"""
Bundle stopped
"""
STATES.append(BundleEvent.STOPPED)
|
{
"content_hash": "5579e2cb69efb5cd65d906374c8ef925",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 20.916666666666668,
"alnum_prop": 0.5630810092961488,
"repo_name": "ahmadshahwan/ipopo",
"id": "b8dfbdead6ad63e149bcc8c17c0fea23f0a350c6",
"size": "1560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ipopo/ipopo_boot_order_bundle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1557057"
},
{
"name": "Shell",
"bytes": "2803"
}
],
"symlink_target": ""
}
|
import pytz
import datetime
from django.test.utils import override_settings
from django.test import TestCase
from django.core.urlresolvers import reverse
from schedule.models.calendars import Calendar
from schedule.models.events import Event
from schedule.models.rules import Rule
from schedule.views import check_next_url, coerce_date_dict
class TestViews(TestCase):
fixtures = ['schedule.json']
def setUp(self):
self.rule = Rule.objects.create(frequency="DAILY")
self.calendar = Calendar.objects.create(name="MyCal", slug='MyCalSlug')
data = {
'title': 'Recent Event',
'start': datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
'end': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
'end_recurring_period': datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
'rule': self.rule,
'calendar': self.calendar
}
self.event = Event.objects.create(**data)
@override_settings(USE_TZ=False)
def test_timezone_off(self):
url = reverse('day_calendar', kwargs={'calendar_slug': self.calendar.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.login(username="admin", password="admin")
class TestViewUtils(TestCase):
def test_check_next_url(self):
url = "http://thauber.com"
self.assertTrue(check_next_url(url) is None)
url = "/hello/world/"
self.assertEqual(url, check_next_url(url))
def test_coerce_date_dict(self):
self.assertEqual(
coerce_date_dict({'year': '2008', 'month': '4', 'day': '2', 'hour': '4', 'minute': '4', 'second': '4'}),
{'year': 2008, 'month': 4, 'day': 2, 'hour': 4, 'minute': 4, 'second': 4}
)
def test_coerce_date_dict_partial(self):
self.assertEqual(
coerce_date_dict({'year': '2008', 'month': '4', 'day': '2'}),
{'year': 2008, 'month': 4, 'day': 2, 'hour': 0, 'minute': 0, 'second': 0}
)
def test_coerce_date_dict_empty(self):
self.assertEqual(
coerce_date_dict({}),
{}
)
def test_coerce_date_dict_missing_values(self):
self.assertEqual(
coerce_date_dict({'year': '2008', 'month': '4', 'hours': '3'}),
{'year': 2008, 'month': 4, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0}
)
class TestUrls(TestCase):
fixtures = ['schedule.json']
highest_event_id = 7
def test_calendar_view(self):
self.response = self.client.get(
reverse("year_calendar", kwargs={"calendar_slug": 'example'}), {})
self.assertEqual(self.response.status_code, 200)
self.assertEqual(self.response.context[0]["calendar"].name,
"Example Calendar")
def test_calendar_month_view(self):
self.response = self.client.get(reverse("month_calendar",
kwargs={"calendar_slug": 'example'}),
{'year': 2000, 'month': 11})
self.assertEqual(self.response.status_code, 200)
self.assertEqual(self.response.context[0]["calendar"].name,
"Example Calendar")
month = self.response.context[0]["periods"]['month']
self.assertEqual((month.start, month.end),
(datetime.datetime(2000, 11, 1, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2000, 12, 1, 0, 0, tzinfo=pytz.utc)))
def test_event_creation_anonymous_user(self):
self.response = self.client.get(reverse("calendar_create_event",
kwargs={"calendar_slug": 'example'}), {})
self.assertEqual(self.response.status_code, 302)
def test_event_creation_authenticated_user(self):
self.client.login(username="admin", password="admin")
self.response = self.client.get(reverse("calendar_create_event",
kwargs={"calendar_slug": 'example'}), {})
self.assertEqual(self.response.status_code, 200)
self.response = self.client.post(reverse("calendar_create_event",
kwargs={"calendar_slug": 'example'}),
{'description': 'description',
'title': 'title',
'end_recurring_period_1': '10:22:00', 'end_recurring_period_0': '2008-10-30',
'end_recurring_period_2': 'AM',
'end_1': '10:22:00', 'end_0': '2008-10-30', 'end_2': 'AM',
'start_0': '2008-10-30', 'start_1': '09:21:57', 'start_2': 'AM'})
self.assertEqual(self.response.status_code, 302)
highest_event_id = self.highest_event_id
highest_event_id += 1
self.response = self.client.get(reverse("event",
kwargs={"event_id": highest_event_id}), {})
self.assertEqual(self.response.status_code, 200)
self.client.logout()
def test_view_event(self):
self.response = self.client.get(reverse("event", kwargs={"event_id": 1}), {})
self.assertEqual(self.response.status_code, 200)
def test_delete_event_anonymous_user(self):
# Only logged-in users should be able to delete, so we're redirected
self.response = self.client.get(reverse("delete_event", kwargs={"event_id": 1}), {})
self.assertEqual(self.response.status_code, 302)
def test_delete_event_authenticated_user(self):
self.client.login(username="admin", password="admin")
# Load the deletion page
self.response = self.client.get(reverse("delete_event", kwargs={"event_id": 1}), {})
self.assertEqual(self.response.status_code, 200)
self.assertEqual(self.response.context['next'],
reverse('day_calendar', args=[Event.objects.get(id=1).calendar.slug]))
# Delete the event
self.response = self.client.post(reverse("delete_event", kwargs={"event_id": 1}), {})
self.assertEqual(self.response.status_code, 302)
# Since the event is now deleted, we get a 404
self.response = self.client.get(reverse("delete_event", kwargs={"event_id": 1}), {})
self.assertEqual(self.response.status_code, 404)
self.client.logout()
|
{
"content_hash": "85a85d6d453a8b5dcb47b0770947b2e5",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 117,
"avg_line_length": 43.736486486486484,
"alnum_prop": 0.5700602502703538,
"repo_name": "mbrondani/django-scheduler",
"id": "c4be1dde581485a61c4d6091afddfaee07218bbb",
"size": "6473",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3394"
},
{
"name": "HTML",
"bytes": "31897"
},
{
"name": "Python",
"bytes": "185707"
}
],
"symlink_target": ""
}
|
def main():
startApplication("sasview")
mouseClick(waitForObject(":groupBox_6.cbCategory_QComboBox_2"), 114, 12, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":groupBox_6.cbCategory_QComboBox_2", "Lamellae"), 103, 10, 0, Qt.LeftButton)
clickButton(waitForObject(":FittingWidgetUI.cmdPlot_QPushButton_2"))
snooze(1)
clickButton(waitForObject(":FittingWidgetUI.cmdPlot_QPushButton_2"))
snooze(2)
openContextMenu(waitForObject(":qt_workspacechild_FigureCanvasQTAgg"), 221, 184, 0)
activateItem(waitForObjectItem(":MainWindow_QMenu", "Change Scale"))
sendEvent("QMoveEvent", waitForObject(":scalePropertiesUI_ScaleProperties"), 685, 470, 973, 489)
test.compare(waitForObjectExists(":qt_workspacechild.cbX_QComboBox").count, 6)
test.compare(str(waitForObjectExists(":qt_workspacechild.cbX_QComboBox").currentText), "log10(x)")
test.compare(waitForObjectExists(":qt_workspacechild.cbX_QComboBox").currentIndex, 4)
test.compare(waitForObjectExists(":qt_workspacechild.cbX_QComboBox").visible, True)
test.compare(str(waitForObjectExists(":qt_workspacechild.cbY_QComboBox").currentText), "log10(y)")
test.compare(waitForObjectExists(":qt_workspacechild.cbY_QComboBox").currentIndex, 7)
test.compare(waitForObjectExists(":qt_workspacechild.cbView_QComboBox").currentIndex, 0)
test.compare(str(waitForObjectExists(":qt_workspacechild.cbView_QComboBox").currentText), "--")
mouseClick(waitForObject(":qt_workspacechild.cbView_QComboBox"), 93, 7, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbView_QComboBox", "Linear y vs x"), 93, 6, 0, Qt.LeftButton)
clickButton(waitForObject(":qt_workspacechild.OK_QPushButton"))
test.vp("VP1")
snooze(2)
#sendEvent("QMouseEvent", waitForObject(":qt_workspacechild_FigureCanvasQTAgg_2"), QEvent.MouseButtonPress, 351, 209, Qt.RightButton, 2, 0)
#sendEvent("QMouseEvent", waitForObject(":qt_workspacechild_FigureCanvasQTAgg"), QEvent.MouseButtonRelease, 351, 209, Qt.RightButton, 0, 0)
openContextMenu(waitForObject(":qt_workspacechild_FigureCanvasQTAgg"), 351, 209, 0)
activateItem(waitForObjectItem(":MainWindow_QMenu", "Change Scale"))
test.compare(str(waitForObjectExists(":qt_workspacechild.cbView_QComboBox").currentText), "Linear y vs x")
mouseClick(waitForObject(":qt_workspacechild.cbView_QComboBox"), 21, 10, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbView_QComboBox", "Guinier lny vs x^(2)"), 27, 5, 0, Qt.LeftButton)
test.compare(str(waitForObjectExists(":qt_workspacechild.cbX_QComboBox").currentText), "x^(2)")
test.compare(str(waitForObjectExists(":qt_workspacechild.cbY_QComboBox").currentText), "ln(y)")
mouseClick(waitForObject(":qt_workspacechild.cbView_QComboBox"), 95, 9, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbView_QComboBox", "XS Guinier ln(y*x) vs x^(2)"), 70, 4, 0, Qt.LeftButton)
test.compare(str(waitForObjectExists(":qt_workspacechild.cbX_QComboBox").currentText), "x^(2)")
test.compare(str(waitForObjectExists(":qt_workspacechild.cbY_QComboBox").currentText), "ln(y*x)")
mouseClick(waitForObject(":qt_workspacechild.cbView_QComboBox"), 83, 5, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbView_QComboBox", "Porod y*x^(4) vs x^(4)"), 57, 1, 0, Qt.LeftButton)
test.compare(str(waitForObjectExists(":qt_workspacechild.cbX_QComboBox").currentText), "x^(4)")
test.compare(str(waitForObjectExists(":qt_workspacechild.cbY_QComboBox").currentText), "y*x^(4)")
mouseClick(waitForObject(":qt_workspacechild.cbView_QComboBox"), 94, 5, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbView_QComboBox", "Kratky y*x^(2) vs x"), 58, 6, 0, Qt.LeftButton)
test.compare(str(waitForObjectExists(":qt_workspacechild.cbX_QComboBox").currentText), "x")
test.compare(str(waitForObjectExists(":qt_workspacechild.cbY_QComboBox").currentText), "y*x^(2)")
mouseClick(waitForObject(":qt_workspacechild.cbView_QComboBox"), 74, 3, 0, Qt.LeftButton)
mouseClick(waitForObject(":qt_workspacechild.cbView_QComboBox"), 74, 3, 0, Qt.LeftButton)
mouseClick(waitForObject(":qt_workspacechild.cbX_QComboBox"), 31, 8, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbX_QComboBox", "log10(x^(4))"), 42, 5, 0, Qt.LeftButton)
mouseClick(waitForObject(":qt_workspacechild.cbY_QComboBox"), 45, 10, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbY_QComboBox", "y"), 30, 8, 0, Qt.LeftButton)
test.compare(str(waitForObjectExists(":qt_workspacechild.cbView_QComboBox").currentText), "--")
mouseClick(waitForObject(":qt_workspacechild.cbX_QComboBox"), 67, 9, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbX_QComboBox", "log10(x)"), 56, 5, 0, Qt.LeftButton)
mouseClick(waitForObject(":qt_workspacechild.cbY_QComboBox"), 42, 12, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":qt_workspacechild.cbY_QComboBox", "log10(y)"), 55, 8, 0, Qt.LeftButton)
clickButton(waitForObject(":qt_workspacechild.OK_QPushButton"))
test.vp("VP2")
|
{
"content_hash": "9d3206699bb80f8281ac0be6b530c5f9",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 143,
"avg_line_length": 85.95,
"alnum_prop": 0.7411285631180919,
"repo_name": "SasView/sasview",
"id": "5b4e56dfe9d0de71fbf9e5daec5499801a3bd3b0",
"size": "5182",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/sas/qtgui/UnitTesting/SquishTestSuites/suite_sasview_qt/tst_Plot1D_ChangeScale/test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "60240"
},
{
"name": "Batchfile",
"bytes": "1616"
},
{
"name": "C",
"bytes": "11379"
},
{
"name": "C++",
"bytes": "217553"
},
{
"name": "CSS",
"bytes": "340"
},
{
"name": "Gherkin",
"bytes": "565"
},
{
"name": "HTML",
"bytes": "9252"
},
{
"name": "Inno Setup",
"bytes": "6892"
},
{
"name": "JavaScript",
"bytes": "27700"
},
{
"name": "Jupyter Notebook",
"bytes": "28926"
},
{
"name": "Makefile",
"bytes": "28052"
},
{
"name": "Python",
"bytes": "2959880"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
}
|
from clld.scripts.util import gbs
if __name__ == '__main__': # pragma: no cover
gbs()
|
{
"content_hash": "ec6d5e584f47f426d65a07241be19140",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 46,
"avg_line_length": 18.6,
"alnum_prop": 0.5806451612903226,
"repo_name": "clld/nts",
"id": "67d05ba4d9f9da2ec6340a08d786a282ff833ce1",
"size": "93",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nts/scripts/gbs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Mako",
"bytes": "15586"
},
{
"name": "Python",
"bytes": "45676"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "polyconf.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "ef3e69c423261d0df9449a52587d9732",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7117903930131004,
"repo_name": "polyaxon/polyaxon",
"id": "a67562f1abd5d01fcdea721fa432c460fa0fe663",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platform/coreapi/polyaxon/manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
}
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "7327511d66e3d576ffacbb48f1205577",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 25.53846153846154,
"alnum_prop": 0.7108433734939759,
"repo_name": "mpachas/django-monthfield",
"id": "3687bd25aacf5db7567576cd6e740f450552a971",
"size": "354",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_project/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17422"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
from fileinput import input
Coord = namedtuple('Coord', 'y x')
Delta = namedtuple('Delta', 'y x')
Cell = namedtuple('Cell', 'y x value')
MOVE_LEFT = Delta(0, -1)
MOVE_RIGHT = Delta(0, 1)
MOVE_UP = Delta(-1, 0)
MOVE_DOWN = Delta(1, 0)
MOVE_DES = {
MOVE_DOWN: 'DOWN',
MOVE_LEFT: 'LEFT',
MOVE_UP: 'UP',
MOVE_RIGHT: 'RIGHT',
}
class Board(object):
_state = None
def __init__(self, grid_size, state):
self.grid_size = grid_size
self.state = state
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = [[char for char in line] for line in value.split('\n') if line]
def iter_state(self):
return (Cell(y, x, char) for y, row in enumerate(self.state) for x, char in enumerate(row))
def find(self, needle):
if isinstance(needle, Cell):
return needle
if isinstance(needle, Coord):
return Cell(needle.y, needle.x, self.state[needle.y][needle.x])
for cell in self.iter_state():
if cell.value == needle:
return cell
return None
def set_cell(self, needle, value):
needle_cell = self.find(needle)
self.state[needle_cell.y][needle_cell.x] = value
def is_valid_coord(self, coord):
if not isinstance(coord, Coord):
return False
for axis in (coord.x, coord.y):
if axis < 0 or axis >= self.grid_size:
return False
return True
def resolve_delta(self, delta, ref):
if not isinstance(delta, Delta):
return delta
ref_cell = self.find(ref)
coord = Coord(ref_cell.y + delta.y, ref_cell.x + delta.x)
if not self.is_valid_coord(coord):
raise InvalidMove()
return coord
def find_delta(self, char, target):
char_cell, target_cell = self.find(char), self.find(target)
if char_cell is None or target_cell is None:
raise NoValidMove()
delta_y = target_cell.y - char_cell.y
delta_x = target_cell.x - char_cell.x
return Delta(delta_y, delta_x)
def move(self, a, b):
cell_a = self.find(a)
cell_b = self.find(self.resolve_delta(b, a))
self.set_cell(cell_a, '-')
self.set_cell(cell_b, cell_a.value)
def pformat(self):
return '\n'.join(''.join(row) for row in self.state if row)
class Bot(object):
def __init__(self, board=None):
self.board = board
def suggest_move(self, char, target, op='+'):
char_cell, target_cell = self.board.find(char), self.board.find(target)
delta = self.board.find_delta(char_cell, target_cell)
if delta.y > 0:
return MOVE_DOWN
if delta.y < 0:
return MOVE_UP
if delta.x > 0:
return MOVE_RIGHT
if delta.x < 0:
return MOVE_LEFT
raise NoValidMove()
class InvalidMove(Exception):
pass
class NoValidMove(Exception):
pass
def parse_input(grid):
grid_size = int(grid.pop(0))
return grid_size, '\n'.join(grid)
def main():
grid_size, grid = parse_input([line.strip() for line in input()])
board = Board(grid_size, grid)
bot = Bot(board)
while True:
move = bot.suggest_move('m', 'p')
board.move('m', move)
print(MOVE_DES[move])
if not board.find('p'):
break
if __name__ == '__main__':
main()
|
{
"content_hash": "cd40a926bceca4cd6bee9f5c569edef1",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 99,
"avg_line_length": 23.972789115646258,
"alnum_prop": 0.5683881952326901,
"repo_name": "bionikspoon/HackerRank-Challenges",
"id": "101303b009167ba74d42f119799c80cabeeb1137",
"size": "3524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artificial_intelligence/bot_building/saveprincess/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80596"
}
],
"symlink_target": ""
}
|
import six
class tsurf(object):
default_name = 'Undefined'
default_color = (0, 1, 1, 1.0)
def __init__(self, *args, **kwargs):
"""Accepts either a single filename or 4 arguments: x, y, z, triangles.
keyword arguments are: "color" and "name"
If a filename is given, the tsurf is read from the file.
Otherwise:
x, y, z are sequences of the x, y, and z coordinates of the vertices.
triangles is a sequence of the indicies of the coord arrays making up
each triangle in the mesh. E.g. [[0, 1, 2], [2, 1, 3], ...]"""
if len(args) == 1:
self._read_tsurf(args[0])
elif len(args) == 4:
self._init_from_xyz(*args)
else:
raise ValueError('Invalid input arguments')
color = kwargs.get('color', None)
name = kwargs.get('name', None)
if color is not None:
self.color = color
if name is not None:
self.name = name
self.header['name'] = self.name
self.header['color'] = self.color
def _read_tsurf(self, filename):
with open(filename, 'r') as infile:
firstline = next(infile).strip()
if not firstline.startswith('GOCAD TSurf'):
raise IOError('This is not a valid TSurf file!')
# Parse Header
self.header = {}
line = next(infile).strip()
if line.startswith('HEADER'):
line = next(infile).strip()
while '}' not in line:
key, value = line.split(':')
self.header[key.lstrip('*')] = value
line = next(infile).strip()
self.name = self.header.get('name', filename)
try:
self.color = [float(item) for item in self.header['color'].split()]
self.color = tuple(self.color)
except KeyError:
self.color = self.default_color
# Read vertices and triangles
if not next(infile).startswith('TFACE'):
raise IOError('Only "TFACE" format TSurf files are supported')
self.vertices, self.triangles = [], []
for line in infile:
line = line.strip().split()
if line[0] == 'VRTX':
self.vertices.append([float(item) for item in line[2:]])
elif line[0] == 'TRGL':
self.triangles.append([int(item)-1 for item in line[1:]])
self.x, self.y, self.z = zip(*self.vertices)
def _init_from_xyz(self, x, y, z, triangles):
self.vertices = list(zip(x, y, z))
self.x, self.y, self.z = x, y, z
self.triangles = triangles
self.color = self.default_color
self.name = self.default_name
self.header = {'moveAs':'2', 'drawAs':'2', 'line':'3',
'clip':'0', 'intersect':'0', 'intercolor':' 1 0 0 1'}
def write(self, outname):
with open(outname, 'w') as outfile:
# Write Header...
outfile.write('GOCAD TSurf 1\n')
outfile.write('HEADER {\n')
"""
for key in ['name', 'color', 'moveAs', 'drawAs', 'line', 'clip',
'intersect', 'intercolor']:
value = self.header[key]
"""
for key, value in six.iteritems(self.header):
if not isinstance(value, six.string_types):
try:
value = ' '.join(repr(item) for item in value)
except TypeError:
value = repr(item)
outfile.write('*{}:{}\n'.format(key, value))
outfile.write('}\n')
# Write data...
outfile.write('TFACE\n')
for i, (x, y, z) in enumerate(self.vertices, start=1):
template = '\t'.join(['VRTX {}'] + 3*['{: >9.3f}']) + '\n'
outfile.write(template.format(i, x, y, z))
for a, b, c in self.triangles:
outfile.write('TRGL {} {} {}\n'.format(a+1, b+1, c+1))
outfile.write('END\n')
|
{
"content_hash": "65977d01bde35e6a80e7ebe2d604cc36",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 83,
"avg_line_length": 40.627450980392155,
"alnum_prop": 0.49758687258687256,
"repo_name": "joferkington/python-geoprobe",
"id": "b27c6b9ffe4fe5154b6c01af3755ebb20e1b1d07",
"size": "4144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geoprobe/tsurf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133201"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from chaco.array_plot_data import ArrayPlotData
from chaco.plot import Plot
from chaco.tools.range_selection import RangeSelection
from enable.testing import EnableTestAssistant
class RangeSelectionTestCase(EnableTestAssistant, unittest.TestCase):
def test_selecting_mouse_leave_clipping(self):
# Regression test for #216.
plot_data = ArrayPlotData()
arr = np.arange(4.0)
plot_data.set_data("x", arr)
plot_data.set_data("y", arr)
for origin in ('bottom left', 'top left', 'bottom right', 'top right'):
for orientation in ('h', 'v'):
for axis in ('index', 'value'):
plot = Plot(
plot_data, orientation=orientation, origin='top right'
)
renderer = plot.plot(('x', 'y'))[0]
renderer.bounds = [10, 20]
tool = RangeSelection(
renderer, left_button_selects=True, axis=axis,
)
renderer.tools.append(tool)
low_x, low_y = plot.position
high_x = low_x + renderer.bounds[0] - 1
high_y = low_y + renderer.bounds[1] - 1
cx = 5
cy = 5
bounds = (
(low_x - 1, low_y),
(high_x + 1, low_y),
(low_x, low_y - 1),
(low_x, high_y + 1),
)
for x, y in bounds:
self.mouse_down(tool, x=cx, y=cy)
self.mouse_leave(tool, x=x, y=y)
selection = tool.selection
self.assertTrue(selection[0] <= selection[1])
self.mouse_up(tool, x=x, y=y)
if __name__ == '__main__':
import nose
nose.run()
|
{
"content_hash": "7348abf9d7d739d6bed3ceb437ebddc4",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 34.29824561403509,
"alnum_prop": 0.46342710997442454,
"repo_name": "tommy-u/chaco",
"id": "c94fb54affa733054fe2d6bae029cab9f7ce8492",
"size": "1955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chaco/tools/tests/range_selection_test_case.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57089"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "Gnuplot",
"bytes": "611"
},
{
"name": "Python",
"bytes": "2475987"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest,os,csv
from rdkit import Chem,RDConfig
class TestCase(unittest.TestCase):
def setUp(self):
self.basePath=os.path.join(RDConfig.RDDataDir,'Pains')
self.painsFile = os.path.join(self.basePath,'wehi_pains.csv')
with open(self.painsFile,'r') as inf:
self.painsDefs = [x for x in csv.reader(inf)]
self.matchers = [Chem.MolFromSmarts(x[0],mergeHs=True) for x in self.painsDefs]
def test1(self):
" molecules that we know should match "
with open(os.path.join(self.basePath,'test_data','test_set3.txt'),'r') as inf:
testData = [x.strip().split() for x in inf if x[0] != '#']
for line in testData:
self.assertEqual(len(line),5)
id_ = int(line[0])
m = Chem.MolFromSmiles(line[2])
self.assertTrue(m is not None)
self.assertTrue(m.HasSubstructMatch(self.matchers[id_]))
self.assertTrue(Chem.AddHs(m).HasSubstructMatch(self.matchers[id_]))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "126f0bfecb940da1133ddfbbd66cf140",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 83,
"avg_line_length": 34.36666666666667,
"alnum_prop": 0.6556741028128031,
"repo_name": "soerendip42/rdkit",
"id": "cd71671d1c3dfb9f0a7d9ec7371d1544cc19954b",
"size": "1261",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Data/Pains/test_data/run_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "203258"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7168898"
},
{
"name": "CMake",
"bytes": "585758"
},
{
"name": "CSS",
"bytes": "4742"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "65468"
},
{
"name": "Java",
"bytes": "248620"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "27271"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15443"
},
{
"name": "Objective-C",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3045831"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "8899"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49170"
}
],
"symlink_target": ""
}
|
"""
Django settings for myapi project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env = environ.Env()
dir_path = os.path.dirname(__file__)
env_path = os.path.join(dir_path, '.env')
env.read_env(env_path)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
TMDB_APIKEY = env('TMDB_APIKEY')
CONSUMER_KEY = env('CONSUMER_KEY')
CONSUMER_SECRET = env('CONSUMER_SECRET')
ACCESS_TOKEN = env('ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = env('ACCESS_TOKEN_SECRET')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(env('DEBUG'))
ALLOWED_HOSTS = [env('VIRTUAL_HOST')]
# Application definition
INSTALLED_APPS = [
'finote_api',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'fixture_magic',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'admin_ip_restrictor.middleware.AdminIPRestrictorMiddleware',
]
ROOT_URLCONF = 'myapi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'mysql.connector.django',
'ENGINE': 'django.db.backends.mysql',
'NAME': env('MYSQL_DATABASE'),
'USER': env('MYSQL_USER'),
'PASSWORD': env('MYSQL_PASSWORD'),
'HOST': 'finote-db',
'PORT': 3306,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = False
SECURE_SSL_REDIRECT = bool(env('SECURE_SSL_REDIRECT'))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
RESTRICT_ADMIN=bool(env('RESTRICT_ADMIN'))
ALLOWED_ADMIN_IPS=[env('ALLOWEDADMINIP')]
RESTRICTED_APP_NAMES=['admin']
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 1
}
AUTH_USER_MODEL = 'finote_api.AuthUser'
|
{
"content_hash": "52171c5439b0f30f4204c9eededae52d",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 91,
"avg_line_length": 27.652173913043477,
"alnum_prop": 0.6850853548966757,
"repo_name": "kentaiwami/FiNote",
"id": "9ddcf396f72fbd4d9ebcce8995f0ac7b52d0a3b5",
"size": "4452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/src/myapi/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "72168"
},
{
"name": "Ruby",
"bytes": "776"
},
{
"name": "Shell",
"bytes": "6122"
},
{
"name": "Swift",
"bytes": "145555"
}
],
"symlink_target": ""
}
|
import sys
import zipfile
from io import BytesIO
from typing import Union
import warnings
import bs4
from bs4.builder import XMLParsedAsHTMLWarning
import pandas as pd
from .mappings import PSRTYPE_MAPPINGS, DOCSTATUS, BSNTYPE, Area
warnings.filterwarnings('ignore', category=XMLParsedAsHTMLWarning)
GENERATION_ELEMENT = "inBiddingZone_Domain.mRID"
CONSUMPTION_ELEMENT = "outBiddingZone_Domain.mRID"
def _extract_timeseries(xml_text):
"""
Parameters
----------
xml_text : str
Yields
-------
bs4.element.tag
"""
if not xml_text:
return
soup = bs4.BeautifulSoup(xml_text, 'html.parser')
for timeseries in soup.find_all('timeseries'):
yield timeseries
def parse_prices(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.Series
"""
series = []
for soup in _extract_timeseries(xml_text):
series.append(_parse_price_timeseries(soup))
series = pd.concat(series)
series = series.sort_index()
return series
def parse_netpositions(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.Series
"""
series = []
for soup in _extract_timeseries(xml_text):
series.append(_parse_netposition_timeseries(soup))
series = pd.concat(series)
series = series.sort_index()
return series
def parse_loads(xml_text, process_type='A01'):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.DataFrame
"""
if process_type == 'A01' or process_type == 'A16':
series = []
for soup in _extract_timeseries(xml_text):
series.append(_parse_load_timeseries(soup))
series = pd.concat(series)
series = series.sort_index()
return pd.DataFrame({
'Forecasted Load' if process_type == 'A01' else 'Actual Load': series
})
else:
series_min = pd.Series(dtype='object')
series_max = pd.Series(dtype='object')
for soup in _extract_timeseries(xml_text):
t = _parse_load_timeseries(soup)
if soup.find('businesstype').text == 'A60':
series_min = series_min.append(t)
elif soup.find('businesstype').text == 'A61':
series_max = series_max.append(t)
else:
continue
return pd.DataFrame({
'Min Forecasted Load': series_min,
'Max Forecasted Load': series_max
})
def parse_generation(
xml_text: str,
per_plant: bool = False,
include_eic: bool = False,
nett: bool = False) -> Union[pd.DataFrame, pd.Series]:
"""
Parameters
----------
xml_text : str
per_plant : bool
Decide if you need the parser that can extract plant info as well.
nett : bool
If you want to condense generation and consumption of a plant into a
nett number
include_eic: bool
If you want to include the eic code of a plan in the output
Returns
-------
pd.DataFrame | pd.Series
"""
all_series = dict()
for soup in _extract_timeseries(xml_text):
ts = _parse_generation_timeseries(soup, per_plant=per_plant, include_eic=include_eic)
# check if we already have a series of this name
series = all_series.get(ts.name)
if series is None:
# If not, we just save ts
all_series[ts.name] = ts
else:
# If yes, we extend it
series = pd.concat([series, ts])
series.sort_index(inplace=True)
all_series[series.name] = series
# drop duplicates in all series
for name in all_series:
ts = all_series[name]
all_series[name] = ts[~ts.index.duplicated(keep='first')]
df = pd.DataFrame.from_dict(all_series)
df.sort_index(inplace=True)
df = _calc_nett_and_drop_redundant_columns(df, nett=nett)
return df
def _calc_nett_and_drop_redundant_columns(
df: pd.DataFrame, nett: bool) -> pd.DataFrame:
def _calc_nett(_df):
try:
if set(['Actual Aggregated']).issubset(_df):
if set(['Actual Consumption']).issubset(_df):
_new = _df['Actual Aggregated'].fillna(0) - _df[
'Actual Consumption'].fillna(0)
else:
_new = _df['Actual Aggregated'].fillna(0)
else:
_new = -_df['Actual Consumption'].fillna(0)
except KeyError:
print ('Netting production and consumption not possible. Column not found')
return _new
if hasattr(df.columns, 'levels'):
if len(df.columns.levels[-1]) == 1:
# Drop the extra header, if it is redundant
df = df.droplevel(axis=1, level=-1)
elif nett:
frames = []
for column in df.columns.levels[-2]:
new = _calc_nett(df[column])
new.name = column
frames.append(new)
df = pd.concat(frames, axis=1)
else:
if nett:
df = _calc_nett(df)
elif len(df.columns) == 1:
df = df.squeeze()
return df
def parse_installed_capacity_per_plant(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.DataFrame
"""
all_series = {}
for soup in _extract_timeseries(xml_text):
s = _parse_installed_capacity_per_plant(soup)
series = all_series.get(s.name)
if series is None:
all_series[s.name] = s
else:
series = pd.concat([series, s])
series.sort_index()
all_series[series.name] = series
for name in all_series:
ts = all_series[name]
all_series[name] = ts[~ts.index.duplicated(keep='first')]
df = pd.DataFrame.from_dict(all_series).T
df['Production Type'] = df['Production Type'].map(PSRTYPE_MAPPINGS)
df['Name'] = df['Name'].str.encode('latin-1').str.decode('utf-8')
# df['Status'] = df['Status'].map(BSNTYPE)
return df
def parse_water_hydro(xml_text, tz):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.Series
"""
all_series = []
for soup in _extract_timeseries(xml_text):
all_series.append(_parse_water_hydro_timeseries(soup, tz=tz))
series = pd.concat(all_series)
return series
def parse_crossborder_flows(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.Series
"""
series = []
for soup in _extract_timeseries(xml_text):
series.append(_parse_crossborder_flows_timeseries(soup))
series = pd.concat(series)
series = series.sort_index()
return series
def parse_imbalance_prices(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.DataFrame
"""
timeseries_blocks = _extract_timeseries(xml_text)
frames = (_parse_imbalance_prices_timeseries(soup)
for soup in timeseries_blocks)
df = pd.concat(frames, axis=1)
df = df.stack().unstack() # ad-hoc fix to prevent column splitting by NaNs
df.sort_index(inplace=True)
return df
def parse_imbalance_volumes(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.DataFrame
"""
timeseries_blocks = _extract_timeseries(xml_text)
frames = (_parse_imbalance_volumes_timeseries(soup)
for soup in timeseries_blocks)
df = pd.concat(frames, axis=1)
df = df.stack().unstack() # ad-hoc fix to prevent column splitting by NaNs
df.sort_index(inplace=True)
return df
def parse_procured_balancing_capacity(xml_text, tz):
"""
Parameters
----------
xml_text : str
tz: str
Returns
-------
pd.DataFrame
"""
timeseries_blocks = _extract_timeseries(xml_text)
frames = (_parse_procured_balancing_capacity(soup, tz)
for soup in timeseries_blocks)
df = pd.concat(frames, axis=1)
df.sort_index(axis=0, inplace=True)
df.sort_index(axis=1, inplace=True)
return df
def _parse_procured_balancing_capacity(soup, tz):
"""
Parameters
----------
soup : bs4.element.tag
tz: str
Returns
-------
pd.DataFrame
"""
direction = {
'A01': 'Up',
'A02': 'Down'
}
flow_direction = direction[soup.find('flowdirection.direction').text]
period = soup.find('period')
start = pd.to_datetime(period.find('timeinterval').find('start').text)
end = pd.to_datetime(period.find('timeinterval').find('end').text)
resolution = _resolution_to_timedelta(period.find('resolution').text)
tx = pd.date_range(start=start, end=end, freq=resolution, inclusive='left')
points = period.find_all('point')
df = pd.DataFrame(index=tx, columns=['Price', 'Volume'])
for dt, point in zip(tx, points):
df.loc[dt, 'Price'] = float(point.find('procurement_price.amount').text)
df.loc[dt, 'Volume'] = float(point.find('quantity').text)
mr_id = int(soup.find('mrid').text)
df.columns = pd.MultiIndex.from_product(
[[flow_direction], [mr_id], df.columns],
names=('direction', 'mrid', 'unit')
)
return df
def parse_contracted_reserve(xml_text, tz, label):
"""
Parameters
----------
xml_text : str
tz: str
label: str
Returns
-------
pd.DataFrame
"""
timeseries_blocks = _extract_timeseries(xml_text)
frames = (_parse_contracted_reserve_series(soup, tz, label)
for soup in timeseries_blocks)
df = pd.concat(frames, axis=1)
# Ad-hoc fix to prevent that columns are split by NaNs:
df = df.groupby(axis=1, level = [0,1]).mean()
df.sort_index(inplace=True)
return df
def _parse_contracted_reserve_series(soup, tz, label):
"""
Parameters
----------
soup : bs4.element.tag
tz: str
label: str
Returns
-------
pd.Series
"""
positions = []
prices = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
prices.append(float(point.find(label).text))
df = pd.DataFrame(data={'position': positions,
label: prices})
df = df.set_index(['position'])
df.sort_index(inplace=True)
index = _parse_datetimeindex(soup, tz)
if len(index) > len(df.index):
print("Shortening index", file=sys.stderr)
df.index = index[:len(df.index)]
else:
df.index = index
df.index.name = None
df.columns.name = None
direction_dico = {'A01': 'Up',
'A02': 'Down',
'A03': 'Symmetric'}
# First column level: the type of reserve
reserve_type = BSNTYPE[soup.find("businesstype").text]
df.rename(columns={label: reserve_type}, inplace=True)
# Second column level: the flow direction
direction = direction_dico[soup.find("flowdirection.direction").text]
df.columns = pd.MultiIndex.from_product([df.columns, [direction]])
return df
def parse_imbalance_prices_zip(zip_contents: bytes) -> pd.DataFrame:
"""
Parameters
----------
zip_contents : bytes
Returns
-------
pd.DataFrame
"""
def gen_frames(archive):
with zipfile.ZipFile(BytesIO(archive), 'r') as arc:
for f in arc.infolist():
if f.filename.endswith('xml'):
frame = parse_imbalance_prices(xml_text=arc.read(f))
yield frame
frames = gen_frames(zip_contents)
df = pd.concat(frames)
df.sort_index(inplace=True)
return df
def _parse_imbalance_prices_timeseries(soup) -> pd.DataFrame:
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.DataFrame
"""
positions = []
amounts = []
categories = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
amounts.append(float(point.find('imbalance_price.amount').text))
if point.find('imbalance_price.category'):
categories.append(point.find('imbalance_price.category').text)
else:
categories.append('None')
df = pd.DataFrame(data={'position': positions,
'amount': amounts, 'category': categories})
df = df.set_index(['position', 'category']).unstack()
df.sort_index(inplace=True)
df.index = _parse_datetimeindex(soup)
df = df.xs('amount', axis=1)
df.index.name = None
df.columns.name = None
df.rename(columns={'A04': 'Long', 'A05': 'Short',
'None': 'Price for Consumption'}, inplace=True)
return df
def parse_imbalance_volumes_zip(zip_contents: bytes) -> pd.DataFrame:
"""
Parameters
----------
zip_contents : bytes
Returns
-------
pd.DataFrame
"""
def gen_frames(archive):
with zipfile.ZipFile(BytesIO(archive), 'r') as arc:
for f in arc.infolist():
if f.filename.endswith('xml'):
frame = parse_imbalance_volumes(xml_text=arc.read(f))
yield frame
frames = gen_frames(zip_contents)
df = pd.concat(frames)
df.sort_index(inplace=True)
return df
def _parse_imbalance_volumes_timeseries(soup) -> pd.DataFrame:
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.DataFrame
"""
flow_direction_factor = {
'A01': 1, # in
'A02': -1 # out
}[soup.find('flowdirection.direction').text]
df = pd.DataFrame(columns=['Imbalance Volume'])
for period in soup.find_all('period'):
start = pd.to_datetime(period.find('timeinterval').find('start').text)
end = pd.to_datetime(period.find('timeinterval').find('end').text)
resolution = _resolution_to_timedelta(period.find('resolution').text)
tx = pd.date_range(start=start, end=end, freq=resolution, inclusive='left')
points = period.find_all('point')
for dt, point in zip(tx, points):
df.loc[dt, 'Imbalance Volume'] = \
float(point.find('quantity').text) * flow_direction_factor
df.set_index(['Imbalance Volume'])
return df
def _parse_netposition_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
quantities = []
if 'REGION' in soup.find('out_domain.mrid').text:
factor = -1 # flow is import so negative
else:
factor = 1
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
quantities.append(factor * float(point.find('quantity').text))
series = pd.Series(index=positions, data=quantities)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
return series
def _parse_price_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
prices = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
prices.append(float(point.find('price.amount').text))
series = pd.Series(index=positions, data=prices)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
return series
def _parse_load_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
prices = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
prices.append(float(point.find('quantity').text))
series = pd.Series(index=positions, data=prices)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
return series
def _parse_generation_timeseries(soup, per_plant: bool = False, include_eic: bool = False) -> pd.Series:
"""
Works for generation by type, generation forecast, and wind and solar
forecast
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
quantities = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
quantity = point.find('quantity')
if quantity is None:
raise LookupError(
f'No quantity found in this point, it should have one: {point}')
quantities.append(float(quantity.text))
series = pd.Series(index=positions, data=quantities)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
# Check if there is a psrtype, if so, get it.
_psrtype = soup.find('psrtype')
if _psrtype is not None:
psrtype = _psrtype.text
else:
psrtype = None
# Check if the Direction is IN or OUT
# If IN, this means Actual Consumption is measured
# If OUT, this means Consumption is measured.
# OUT means Consumption of a generation plant, eg. charging a pumped hydro plant
if soup.find(CONSUMPTION_ELEMENT.lower()):
metric = 'Actual Consumption'
else:
metric = 'Actual Aggregated'
name = [metric]
# Set both psrtype and metric as names of the series
if psrtype:
psrtype_name = PSRTYPE_MAPPINGS[psrtype]
name.append(psrtype_name)
if per_plant:
plantname = soup.find('name').text
name.append(plantname)
if include_eic:
eic = soup.find("mrid", codingscheme="A01").text
name.insert(0, eic)
if len(name) == 1:
series.name = name[0]
else:
# We give the series multiple names in a tuple
# This will result in a multi-index upon concatenation
name.reverse()
series.name = tuple(name)
return series
def _parse_water_hydro_timeseries(soup, tz):
"""
Parses timeseries for water reservoirs and hydro storage plants
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
quantities = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
quantity = point.find('quantity')
if quantity is None:
raise LookupError(
f'No quantity found in this point, it should have one: {point}')
quantities.append(float(quantity.text))
series = pd.Series(index=positions, data=quantities)
series = series.sort_index()
series.index = _parse_datetimeindex(soup, tz)
return series
def _parse_installed_capacity_per_plant(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
extract_vals = {'Name': 'registeredresource.name',
'Production Type': 'psrtype',
'Bidding Zone': 'inbiddingzone_domain.mrid',
# 'Status': 'businesstype',
'Voltage Connection Level [kV]':
'voltage_powersystemresources.highvoltagelimit'}
series = pd.Series(extract_vals).apply(lambda v: soup.find(v).text)
# extract only first point
series['Installed Capacity [MW]'] = \
soup.find_all('point')[0].find('quantity').text
series.name = soup.find('registeredresource.mrid').text
return series
def _parse_datetimeindex(soup, tz=None):
"""
Create a datetimeindex from a parsed beautifulsoup,
given that it contains the elements 'start', 'end'
and 'resolution'
Parameters
----------
soup : bs4.element.tag
tz: str
Returns
-------
pd.DatetimeIndex
"""
start = pd.Timestamp(soup.find('start').text)
end = pd.Timestamp(soup.find_all('end')[-1].text)
if tz is not None:
start = start.tz_convert(tz)
end = end.tz_convert(tz)
delta = _resolution_to_timedelta(res_text=soup.find('resolution').text)
index = pd.date_range(start=start, end=end, freq=delta, inclusive='left')
if tz is not None:
dst_jump = len(set(index.map(lambda d: d.dst()))) > 1
if dst_jump and delta == "7D":
# For a weekly granularity, if we jump over the DST date in October,
# date_range erronously returns an additional index element
# because that week contains 169 hours instead of 168.
index = index[:-1]
index = index.tz_convert("UTC")
return index
def _parse_crossborder_flows_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
flows = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
flows.append(float(point.find('quantity').text))
series = pd.Series(index=positions, data=flows)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
return series
def _resolution_to_timedelta(res_text: str) -> str:
"""
Convert an Entsoe resolution to something that pandas can understand
"""
resolutions = {
'PT60M': '60min',
'P1Y': '12M',
'PT15M': '15min',
'PT30M': '30min',
'P1D': '1D',
'P7D': '7D',
'P1M': '1M',
}
delta = resolutions.get(res_text)
if delta is None:
raise NotImplementedError("Sorry, I don't know what to do with the "
"resolution '{}', because there was no "
"documentation to be found of this format. "
"Everything is hard coded. Please open an "
"issue.".format(res_text))
return delta
# Define inverse bidding zone dico to look up bidding zone labels from the
# domain code in the unavailibility parsers:
_INV_BIDDING_ZONE_DICO = {area.code: area.name for area in Area}
HEADERS_UNAVAIL_GEN = ['created_doc_time',
'docstatus',
'mrid',
'revision',
'businesstype',
'biddingzone_domain',
'qty_uom',
'curvetype',
'production_resource_id',
'production_resource_name',
'production_resource_location',
'plant_type',
'nominal_power',
'start',
'end',
'resolution',
'pstn',
'avail_qty'
]
def _unavailability_gen_ts(soup: bs4.BeautifulSoup) -> list:
"""
Parser for generation unavailibility time-series
Parameters
----------
soup : bs4.element.tag
tz : str
Returns
-------
list
"""
# Avoid attribute errors when some of the fields are void:
get_attr = lambda attr: "" if soup.find(attr) is None else soup.find(
attr).text
# When no nominal power is given, give default numeric value of 0:
get_float = lambda val: float('NaN') if val == "" else float(val)
f = [BSNTYPE[get_attr('businesstype')],
_INV_BIDDING_ZONE_DICO[get_attr('biddingzone_domain.mrid')],
get_attr('quantity_measure_unit.name'),
get_attr('curvetype'),
get_attr('production_registeredresource.mrid'),
get_attr('production_registeredresource.name'),
get_attr('production_registeredresource.location.name'),
PSRTYPE_MAPPINGS.get(get_attr(
'production_registeredresource.psrtype.psrtype'), ""),
get_float(get_attr(
'production_registeredresource.psrtype.powersystemresources.nominalp'))]
return [f + p for p in _available_period(soup)]
HEADERS_UNAVAIL_TRANSM = ['created_doc_time',
'docstatus',
'businesstype',
'in_domain',
'out_domain',
'qty_uom',
'curvetype',
'start',
'end',
'resolution',
'pstn',
'avail_qty'
]
def _unavailability_tm_ts(soup: bs4.BeautifulSoup) -> list:
"""
Parser for transmission unavailibility time-series
Parameters
----------
soup : bs4.element.tag
tz : str
Returns
-------
list
"""
# Avoid attribute errors when some of the fields are void:
get_attr = lambda attr: "" if soup.find(attr) is None else soup.find(
attr).text
# When no nominal power is given, give default numeric value of 0:
f = [BSNTYPE[get_attr('businesstype')],
_INV_BIDDING_ZONE_DICO[get_attr('in_domain.mrid')],
_INV_BIDDING_ZONE_DICO[get_attr('out_domain.mrid')],
get_attr('quantity_measure_unit.name'),
get_attr('curvetype'),
]
return [f + p for p in _available_period(soup)]
_UNAVAIL_PARSE_CFG = {'A77': (HEADERS_UNAVAIL_GEN, _unavailability_gen_ts),
'A78': (HEADERS_UNAVAIL_TRANSM, _unavailability_tm_ts),
'A80': (HEADERS_UNAVAIL_GEN, _unavailability_gen_ts)}
def parse_unavailabilities(response: bytes, doctype: str) -> pd.DataFrame:
"""
Response for Unavailability of Generation Units is ZIP folder
with one document inside it for each outage.
This function parses all the files in the ZIP and returns a Pandas DataFrame.
"""
# First, find out which parser and headers to use, based on the doc type:
headers, ts_func = _UNAVAIL_PARSE_CFG[doctype]
dfs = list()
with zipfile.ZipFile(BytesIO(response), 'r') as arc:
for f in arc.infolist():
if f.filename.endswith('xml'):
frame = _outage_parser(arc.read(f), headers, ts_func)
dfs.append(frame)
if len(dfs) == 0:
df = pd.DataFrame(columns=headers)
else:
df = pd.concat(dfs, axis=0)
df.set_index('created_doc_time', inplace=True)
df.sort_index(inplace=True)
return df
def _available_period(timeseries: bs4.BeautifulSoup) -> list:
# if not timeseries:
# return
for period in timeseries.find_all('available_period'):
start, end = pd.Timestamp(period.timeinterval.start.text), pd.Timestamp(
period.timeinterval.end.text)
res = period.resolution.text
pstn, qty = period.point.position.text, period.point.quantity.text
yield [start, end, res, pstn, qty]
def _outage_parser(xml_file: bytes, headers, ts_func) -> pd.DataFrame:
xml_text = xml_file.decode()
soup = bs4.BeautifulSoup(xml_text, 'html.parser')
mrid = soup.find("mrid").text
revision_number = int(soup.find("revisionnumber").text)
try:
creation_date = pd.Timestamp(soup.createddatetime.text)
except AttributeError:
creation_date = ""
try:
docstatus = DOCSTATUS[soup.docstatus.value.text]
except AttributeError:
docstatus = None
d = list()
series = _extract_timeseries(xml_text)
for ts in series:
row = [creation_date, docstatus, mrid, revision_number]
for t in ts_func(ts):
d.append(row + t)
df = pd.DataFrame.from_records(d, columns=headers)
return df
|
{
"content_hash": "a945750ed060d1bc4f444a41f41267b3",
"timestamp": "",
"source": "github",
"line_count": 981,
"max_line_length": 104,
"avg_line_length": 28.186544342507645,
"alnum_prop": 0.574373440381903,
"repo_name": "EnergieID/entsoe-py",
"id": "e765b6e0c285e3d56c424fc2f7ffb486fed0ba59",
"size": "27651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entsoe/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132589"
}
],
"symlink_target": ""
}
|
"""Serializers for the api views"""
from rest_framework import serializers
from photo_editor.models import Base, Folder, Image, ImageProcessorTool
class BaseSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Base
class ImageSerializer(BaseSerializer):
folder = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Image
fields = ('id', 'name', 'content_type', 'date_created',
'date_last_modified', 'image', 'folder',
'large_image_url', 'thumbnail_image_url')
class FolderSerializer(BaseSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
images = ImageSerializer(many=True, read_only=True)
class Meta:
model = Folder
fields = ('id', 'owner', 'name', 'date_created',
'date_last_modified', 'images')
class ImageProcessorToolSerializer(BaseSerializer):
class Meta:
model = ImageProcessorTool
fields = ('name', 'thumbnail_image_url')
|
{
"content_hash": "575bf8cd3fb6d17151ca6262ebc3935b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 28.86111111111111,
"alnum_prop": 0.6621751684311838,
"repo_name": "andela-cdike/django-photo-editor",
"id": "eb4f96111ab8210eb960510274a7ae8dc55fdc3d",
"size": "1039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "photo_magick/photo_editor/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8000"
},
{
"name": "HTML",
"bytes": "3882"
},
{
"name": "JavaScript",
"bytes": "104328"
},
{
"name": "Python",
"bytes": "60355"
}
],
"symlink_target": ""
}
|
import argparse
import os
import pytest
import taichi as ti
FRAMES = 200
@pytest.mark.skipif(os.environ.get('TI_LITE_TEST') or '0', reason='Lite test')
def test_cornell_box():
from taichi.examples.rendering.cornell_box import render, tonemap
for i in range(FRAMES):
render()
interval = 10
if i % interval == 0:
tonemap(i)
@pytest.mark.skipif(os.environ.get('TI_LITE_TEST') or '0', reason='Lite test')
def video_cornell_box(result_dir):
from taichi.examples.rendering.cornell_box import (render, tonemap,
tonemapped_buffer)
video_manager = ti.tools.VideoManager(output_dir=result_dir,
framerate=24,
automatic_build=False)
gui = ti.GUI("Taichi Cornell Box",
res=800,
background_color=0x112F41,
show_gui=False)
for i in range(FRAMES):
render()
interval = 10
if i % interval == 0:
tonemap(i)
gui.set_image(tonemapped_buffer)
video_manager.write_frame(gui.get_image())
gui.clear()
video_manager.make_video(mp4=True, gif=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate cornell_box video')
parser.add_argument('output_directory',
help='output directory of generated video')
video_cornell_box(parser.parse_args().output_directory)
|
{
"content_hash": "4eac5e28ab37641fe2402ca85f158b40",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 31.75,
"alnum_prop": 0.5767716535433071,
"repo_name": "yuanming-hu/taichi",
"id": "782b8783fee685b0d83d382b7dfa511c89fdc6dc",
"size": "1524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/examples/rendering/test_cornell_box.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "66677"
},
{
"name": "C++",
"bytes": "3713898"
},
{
"name": "CMake",
"bytes": "69354"
},
{
"name": "Cuda",
"bytes": "20566"
},
{
"name": "GLSL",
"bytes": "10756"
},
{
"name": "Makefile",
"bytes": "994"
},
{
"name": "PowerShell",
"bytes": "9227"
},
{
"name": "Python",
"bytes": "2209929"
},
{
"name": "Shell",
"bytes": "12216"
}
],
"symlink_target": ""
}
|
from twisted.web.resource import Resource
from synapse.http.server import respond_with_json_bytes
from signedjson.sign import sign_json
from unpaddedbase64 import encode_base64
from canonicaljson import encode_canonical_json
from hashlib import sha256
from OpenSSL import crypto
import logging
logger = logging.getLogger(__name__)
class LocalKey(Resource):
"""HTTP resource containing encoding the TLS X.509 certificate and NACL
signature verification keys for this server::
GET /_matrix/key/v2/server/a.key.id HTTP/1.1
HTTP/1.1 200 OK
Content-Type: application/json
{
"valid_until_ts": # integer posix timestamp when this result expires.
"server_name": "this.server.example.com"
"verify_keys": {
"algorithm:version": {
"key": # base64 encoded NACL verification key.
}
},
"old_verify_keys": {
"algorithm:version": {
"expired_ts": # integer posix timestamp when the key expired.
"key": # base64 encoded NACL verification key.
}
}
"tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
"signatures": {
"this.server.example.com": {
"algorithm:version": # NACL signature for this server
}
}
}
"""
isLeaf = True
def __init__(self, hs):
self.version_string = hs.version_string
self.config = hs.config
self.clock = hs.clock
self.update_response_body(self.clock.time_msec())
Resource.__init__(self)
def update_response_body(self, time_now_msec):
refresh_interval = self.config.key_refresh_interval
self.valid_until_ts = int(time_now_msec + refresh_interval)
self.response_body = encode_canonical_json(self.response_json_object())
def response_json_object(self):
verify_keys = {}
for key in self.config.signing_key:
verify_key_bytes = key.verify_key.encode()
key_id = "%s:%s" % (key.alg, key.version)
verify_keys[key_id] = {
u"key": encode_base64(verify_key_bytes)
}
old_verify_keys = {}
for key in self.config.old_signing_keys:
key_id = "%s:%s" % (key.alg, key.version)
verify_key_bytes = key.encode()
old_verify_keys[key_id] = {
u"key": encode_base64(verify_key_bytes),
u"expired_ts": key.expired,
}
x509_certificate_bytes = crypto.dump_certificate(
crypto.FILETYPE_ASN1,
self.config.tls_certificate
)
sha256_fingerprint = sha256(x509_certificate_bytes).digest()
json_object = {
u"valid_until_ts": self.valid_until_ts,
u"server_name": self.config.server_name,
u"verify_keys": verify_keys,
u"old_verify_keys": old_verify_keys,
u"tls_fingerprints": [{
u"sha256": encode_base64(sha256_fingerprint),
}]
}
for key in self.config.signing_key:
json_object = sign_json(
json_object,
self.config.server_name,
key,
)
return json_object
def render_GET(self, request):
time_now = self.clock.time_msec()
# Update the expiry time if less than half the interval remains.
if time_now + self.config.key_refresh_interval / 2 > self.valid_until_ts:
self.update_response_body(time_now)
return respond_with_json_bytes(
request, 200, self.response_body,
version_string=self.version_string
)
|
{
"content_hash": "25bf220416c35896b007378c655209dd",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 81,
"avg_line_length": 34.81651376146789,
"alnum_prop": 0.5649538866930172,
"repo_name": "iot-factory/synapse",
"id": "ef7699d590cfa1dc8e5608225ee9368eb82324a1",
"size": "4405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/rest/key/v2/local_key_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2000"
},
{
"name": "HTML",
"bytes": "2905"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31842"
},
{
"name": "Python",
"bytes": "1879672"
},
{
"name": "Shell",
"bytes": "4548"
}
],
"symlink_target": ""
}
|
try:
import idlelib.PyShell
except ImportError:
# IDLE is not installed, but maybe PyShell is on sys.path:
try:
import PyShell
except ImportError:
raise
else:
import os
idledir = os.path.dirname(os.path.abspath(PyShell.__file__))
if idledir != os.getcwd():
# We're not in the IDLE directory, help the subprocess find run.py
pypath = os.environ.get('PYTHONPATH', '')
if pypath:
os.environ['PYTHONPATH'] = pypath + ':' + idledir
else:
os.environ['PYTHONPATH'] = idledir
PyShell.main()
else:
idlelib.PyShell.main()
|
{
"content_hash": "23cd199329e75ed78708a2575285f142",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 78,
"avg_line_length": 31.61904761904762,
"alnum_prop": 0.5677710843373494,
"repo_name": "MalloyPower/parsing-python",
"id": "00a1fe3a00d121300c1e1150427ca629e2d54c5e",
"size": "683",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.3/Lib/idlelib/idle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
"""
@package ion.agents.instrument.port_agent_process
@file ion/agents.instrument/driver_launcher.py
@author Bill French
@brief Port agent process class that provides a factory for different launch mechanisms
USAGE:
config = {
device_host : 'localhost',
device_port : '4001'
type : PortAgentType.ETHERNET,
process_type : PortAgentProcessType.PYTHON,
}
# These lines can also be run as one command, launch_process
process = PortAgentType.get_process(config)
process.launch()
# alternative launch
process = launch_process(config)
pid = process.get_pid()
cmd_port = process.get_command_port()
data_port = process.get_data_port()
if(process.poll()):
process.stop()
"""
__author__ = 'Bill French'
import os
import time
import signal
import gevent
import tempfile
import subprocess
from mi.core.log import log
from mi.core.common import BaseEnum
from mi.core.logger_process import EthernetDeviceLogger
from mi.core.exceptions import PortAgentLaunchException
from mi.core.exceptions import NotImplementedException
from mi.core.exceptions import PortAgentTimeout
from mi.core.exceptions import PortAgentMissingConfig
from gevent import Timeout
PYTHON_PATH = 'bin/python'
UNIX_PROCESS = 'port_agent'
DEFAULT_TIMEOUT = 60
PROCESS_BASE_DIR = '/tmp'
PID_FILE = "%s/port_agent_%d.pid"
LOCALHOST = 'localhost'
DEFAULT_HEARTBEAT = 0
class PortAgentProcessType(BaseEnum):
"""
Defines the process types for the port agent. i.e. C++ or Python
"""
PYTHON = 'PYTHON'
UNIX = 'UNIX'
class PortAgentType(BaseEnum):
"""
What type of port agent are we running? ethernet, serial, digi etc...
"""
ETHERNET = 'tcp'
BOTPT = "botpt"
RSN = 'rsn'
class ObservatoryType(BaseEnum):
"""
What type of port agent are we running? ethernet, serial, digi etc...
"""
STANDARD = 'standard'
MULTI = 'multi'
class PortAgentProcess(object):
"""
Base class for port agent process launcher
"""
_command_port = None
_data_port = None
_pid = None
def __init__(self, config, timeout=DEFAULT_TIMEOUT, test_mode=False):
self._config = config
self._timeout = timeout
self._test_mode = test_mode
@classmethod
def get_process(cls, config, timeout=DEFAULT_TIMEOUT, test_mode=False):
"""
factory class to return the correct PortAgentProcess type based on the config.
config must contain process_type and type. Currently both of these default
to python and ethernet respectively because we only have one type of port agent
could use variable length parameter lists (**kwargs) here, but I am following the
same pattern the initial port agent used for passing in configurations.
@param config dictionary containing configuration information for the port agent.
@param timeout timeout for port agent launch. If exceeded an exception is raised
@param test_mode enable test mode for the port agent
"""
# Default to unix port agent
process_type = config.get("process_type", PortAgentProcessType.UNIX)
if process_type == PortAgentProcessType.PYTHON:
return PythonPortAgentProcess(config, timeout, test_mode)
if process_type == PortAgentProcessType.UNIX:
return UnixPortAgentProcess(config, timeout, test_mode)
else:
raise PortAgentLaunchException("unknown port agent process type: %s" % process_type)
@classmethod
def launch_process(cls, config, timeout=DEFAULT_TIMEOUT, test_mode=False):
"""
Just like the get_process factory method except we call launch with the new object.
@param config dictionary containing configuration information for the port agent.
@param timeout timeout for port agent launch. If exceeded an exception is raised
@param test_mode enable test mode for the port agent
"""
process = cls.get_process(config, timeout, test_mode)
process.launch()
return process
def launch(self):
"""
Launch the port agent process. Must be overloaded.
@raises NotImplementedException
"""
raise NotImplementedException('launch()')
def poll(self):
"""
Check to see if the port agent process is alive.
@return true if process is running, false otherwise
"""
if not self._pid:
return False
try:
os.kill(self._pid, 0)
except OSError, e:
log.warn("Could not send a signal to the driver, pid: %s" % self._pid)
return False
return True
def stop(self):
"""
Stop the driver process. We just send a signal to a process. We may be able to overload this to do something
more graceful.
"""
pid = self.get_pid()
if pid:
os.kill(pid, signal.SIGTERM)
def get_pid(self):
"""
Get the pid of the current running process and ensure that it is running.
@returns the pid of the driver process if it is running, otherwise None
"""
if self.poll():
return self._pid
else:
return None
def get_command_port(self):
"""
Get the command port for the port agent process
@returns port number
"""
return self._command_port
def get_data_port(self):
"""
Get the data port for the port agent process
@returns port number
"""
return self._data_port
class PythonPortAgentProcess(PortAgentProcess):
"""
Object to facilitate launching port agent processes using a python class and module path.
Port Agent config requirements:
dvr_mod :: the python module that defines the driver class
dvr_cls :: the driver class defined in the module
Example:
port_agent_config = {
device_addr: mi.instrument.seabird.sbe37smb.ooicore.driver
device_port: SBE37Driver
working_dir = "/tmp/"
delimiter = ['<<','>>']
type: PortAgentType.ETHERNET
}
@param config configuration parameters for the driver process
@param test_mode should the driver be run in test mode
"""
_port_agent = None
def __init__(self, config, timeout=DEFAULT_TIMEOUT, test_mode=False):
"""
Initialize the Python port agent object using the passed in config. This
defaults to ethernet as the type because that is currently the only port
agent we have.
@raises PortAgentMissingConfig
"""
self._config = config
self._timeout = timeout
self._test_mode = test_mode
# Verify our configuration is correct
self._device_addr = config.get("device_addr")
self._device_port = config.get("device_port")
self._working_dir = config.get("working_dir", '/tmp/')
self._delimiter = config.get("delimiter", ['<<', '>>'])
self._type = config.get("type", PortAgentType.ETHERNET)
if not self._device_addr:
raise PortAgentMissingConfig("missing config: device_addr")
if not self._device_port:
raise PortAgentMissingConfig("missing config: device_port")
if not self._type == PortAgentType.ETHERNET:
raise PortAgentLaunchException("unknown port agent type: %s" % self._type)
def launch(self):
"""
@brief Launch the driver process and driver client. This is used in the
integration and qualification tests. The port agent abstracts the physical
interface with the instrument.
@retval return the pid to the logger process
"""
log.info("Startup Port Agent")
# Create port agent object.
this_pid = os.getpid() if self._test_mode else None
log.debug(" -- our pid: %s" % this_pid)
log.debug(" -- address: %s, port: %s" % (self._device_addr, self._device_port))
# Working dir and delim are hard coded here because this launch process
# will change with the new port agent.
self.port_agent = EthernetDeviceLogger.launch_process(
self._device_addr,
self._device_port,
self._working_dir,
self._delimiter,
this_pid)
log.debug(" Port agent object created")
start_time = time.time()
expire_time = start_time + int(self._timeout)
pid = self.port_agent.get_pid()
while not pid:
gevent.sleep(.1)
pid = self.port_agent.get_pid()
if time.time() > expire_time:
log.error("!!!! Failed to start Port Agent !!!!")
raise PortAgentTimeout('port agent could not be started')
self._pid = pid
port = self.port_agent.get_port()
start_time = time.time()
expire_time = start_time + int(self._timeout)
while not port:
gevent.sleep(.1)
port = self.port_agent.get_port()
if time.time() > expire_time:
log.error("!!!! Port Agent could not bind to port !!!!")
self.stop()
raise PortAgentTimeout('port agent could not bind to port')
self._data_port = port
log.info('Started port agent pid %s listening at port %s' % (pid, port))
return port
def stop(self):
if self.port_agent:
pid = self.port_agent.get_pid()
if pid:
log.info('Stopping pagent pid %i' % pid)
self.port_agent.stop()
else:
log.info('No port agent running.')
class UnixPortAgentProcess(PortAgentProcess):
"""
Object to facilitate launching port agent processes using a c++ compiled port agent
Port Agent config requirements:
binary_path :: Path the the port agent executable
command_port :: port number of the observatory command port to the port agent
log_level :: how many -v options to add to the launch
port_agent_address :: If port agent address isn't localhost the process won't be launched
Example:
port_agent_config = {
device_addr: sbe37-simulator.oceanobservatories.org,
device_port: 4001,
binary_path: /bin/port_agent,
port_agent_addr: localhost
command_port: 4000,
data_port: 4002,
log_level: 5,
type: PortAgentType.ETHERNET
}
@param config configuration parameters for the driver process
@param test_mode should the driver be run in test mode
"""
_port_agent = None
def __init__(self, config, timeout=DEFAULT_TIMEOUT, test_mode=False):
"""
Initialize the Python port agent object using the passed in config. This
defaults to ethernet as the type because that is currently the only port
agent we have.
@raises PortAgentMissingConfig
"""
self._config = config
self._timeout = timeout
self._test_mode = test_mode
# Verify our configuration is correct
self._observatory_type = config.get("observatory_type", ObservatoryType.STANDARD)
self._device_addr = config.get("device_addr")
self._device_port = config.get("device_port")
self._device_tx_port = config.get("device_tx_port")
self._device_rx_port = config.get("device_rx_port")
self._binary_path = config.get("binary_path", "port_agent")
self._command_port = config.get("command_port")
self._pa_addr = config.get("port_agent_addr")
self._heartbeat_interval = config.get("heartbeat_interval")
self._sniffer_port = config.get('telnet_sniffer_port')
self._data_port = config.get("data_port")
self._log_level = config.get("log_level")
self._type = config.get("instrument_type", PortAgentType.ETHERNET)
if self._type == PortAgentType.ETHERNET:
self._device_addr = config.get("device_addr")
self._device_port = config.get("device_port")
elif self._type == PortAgentType.RSN:
self._device_addr = config.get("device_addr")
self._device_port = config.get("device_port")
self._device_cmd_port = config.get("instrument_command_port")
if not self._pa_addr:
self._pa_addr = LOCALHOST
if not self._heartbeat_interval:
self._heartbeat_interval = DEFAULT_HEARTBEAT
if PortAgentType.BOTPT == self._type:
if not self._device_tx_port:
raise PortAgentMissingConfig("missing config: device_tx_port (BOTPT)")
if not self._device_rx_port:
raise PortAgentMissingConfig("missing config: device_rx_port (BOTPT)")
elif PortAgentType.ETHERNET == self._type:
if not self._device_addr:
raise PortAgentMissingConfig("missing config: device_addr")
if not self._device_port:
raise PortAgentMissingConfig("missing config: device_port (ETHERNET)")
elif PortAgentType.RSN == self._type:
if not self._device_addr:
raise PortAgentMissingConfig("missing config: device_addr")
if not self._device_port:
raise PortAgentMissingConfig("missing config: device_port (RSN)")
if not self._device_cmd_port:
raise PortAgentMissingConfig("missing config: device_cmd_port (RSN)")
else:
raise PortAgentLaunchException("unknown port agent type: %s" % self._type)
if not self._command_port:
raise PortAgentMissingConfig("missing config: command_port")
if ObservatoryType.MULTI == self._observatory_type:
if not self._data_port:
raise PortAgentMissingConfig("missing config: data_port")
else:
if not self._data_port:
raise PortAgentMissingConfig("missing config: data_port")
if not self._binary_path:
raise PortAgentMissingConfig("missing config: binary_path")
self._tmp_config = self.get_config()
def get_config(self):
"""
@brief Write a configuration file for the port agent to read.
@ret NamedTemporaryFile object to the config file.
"""
temp = tempfile.NamedTemporaryFile()
temp.write("\n")
temp.write("log_dir %s\n" % PROCESS_BASE_DIR)
temp.write("pid_dir %s\n" % PROCESS_BASE_DIR)
temp.write("data_dir %s\n" % PROCESS_BASE_DIR)
if PortAgentType.BOTPT == self._type:
temp.write("instrument_type botpt\n")
temp.write("instrument_data_tx_port %d\n" % self._device_tx_port)
temp.write("instrument_data_rx_port %d\n" % self._device_rx_port)
else:
temp.write("instrument_type tcp\n")
temp.write("instrument_data_port %d\n" % self._device_port)
temp.write("instrument_addr %s\n" % self._device_addr)
temp.write("data_port %d\n" % self._data_port)
temp.write("heartbeat_interval %d\n" % self._heartbeat_interval)
temp.flush()
return temp
def launch(self):
'''
@brief Launch a port agent process if it is supposed to run on the local host Otherwise
do nothing.
@return the command port the port agent is listening on.
'''
if self._pa_addr == LOCALHOST:
self._launch()
else:
self._pid = None
log.info("Port Agent Address: %s" % self._pa_addr)
log.info("Not starting port agent")
return self._command_port
def _launch(self):
"""
@brief Launch the port agent process. If the address isn't localhost
then we don't start anything
@retval return the command port the process is listening on.
"""
log.info("Startup Unix Port Agent")
# Create port agent object.
this_pid = os.getpid() if self._test_mode else None
log.debug(" -- our pid: %s" % this_pid)
log.debug(" -- command port: %s" % self._command_port)
log.debug(" -- address: %s, port: %s" % (self._device_addr, self._device_port))
command_line = [self._binary_path, self._type, self._data_port, self._command_port, self._device_addr]
if self._type == PortAgentType.ETHERNET:
command_line.extend([self._device_port])
elif self._type == PortAgentType.RSN:
command_line.extend([self._device_port, self._device_cmd_port])
elif self._type == PortAgentType.BOTPT:
command_line.extend([self._device_rx_port, self._device_tx_port])
if self._sniffer_port:
command_line.append('--sniff=%d' % self._sniffer_port)
command_line = [str(arg) for arg in command_line]
self._pid = self.run_command(command_line)
return self._command_port
def run_command(self, command_line):
log.debug("run command: " + str(command_line))
process = subprocess.Popen(command_line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
gevent.sleep(1)
process.poll()
# We have failed!
if process.returncode and process.pid:
output, error_message = process.communicate()
log.error("Failed to run command: STDERR: %s", error_message)
raise PortAgentLaunchException("failed to launch port agent")
log.debug("command successful. pid: %d", process.pid)
return process.pid
def _read_config(self):
self._tmp_config.seek(0)
return "".join(self._tmp_config.readlines())
def stop(self):
log.info('Stop port agent')
os.kill(self._pid, 2)
|
{
"content_hash": "5347ef6f77d7a5e9908e2eea28aefc0f",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 118,
"avg_line_length": 34.53021442495127,
"alnum_prop": 0.6214858304166196,
"repo_name": "vipullakhani/mi-instrument",
"id": "62ee69900f415058f1aa1a796799ad5efcce1b05",
"size": "17737",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "mi/core/port_agent_process.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "9968191"
}
],
"symlink_target": ""
}
|
from dateparser import parse
from datetime import datetime
from pytz import utc
import urllib3
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
# Disable insecure warnings
urllib3.disable_warnings()
# CONSTANTS
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
DEFAULT_INCIDENT_TO_FETCH = 50
SEVERITY_OPTIONS = {
'Low': 0,
'Medium': 1,
'High': 2,
'All': [0, 1, 2],
}
RESOLUTION_STATUS_OPTIONS = {
'Open': 0,
'Dismissed': 1,
'Resolved': 2,
'All': [0, 1, 2],
}
# Note that number 4 is missing
SOURCE_TYPE_OPTIONS = {
'Access_control': 0,
'Session_control': 1,
'App_connector': 2,
'App_connector_analysis': 3,
'Discovery': 5,
'MDATP': 6,
}
FILE_TYPE_OPTIONS = {
'Other': 0,
'Document': 1,
'Spreadsheet': 2,
'Presentation': 3,
'Text': 4,
'Image': 5,
'Folder': 6,
}
FILE_SHARING_OPTIONS = {
'Private': 0,
'Internal': 1,
'External': 2,
'Public': 3,
'Public_Internet': 4,
}
IP_CATEGORY_OPTIONS = {
'Corporate': 1,
'Administrative': 2,
'Risky': 3,
'VPN': 4,
'Cloud_provider': 5,
'Other': 6,
}
IS_EXTERNAL_OPTIONS = {
'External': True,
'Internal': False,
'No_value': None,
}
STATUS_OPTIONS = {
'N/A': 0,
'Staged': 1,
'Active': 2,
'Suspended': 3,
'Deleted': 4,
}
CLOSE_BENIGN_REASON_OPTIONS = {
'Actual severity is lower': 0,
'Other': 1,
'Confirmed with end user': 2,
'Triggered by test': 3,
}
CLOSE_FALSE_POSITIVE_REASON_OPTIONS = {
'Not of interest': 0,
'Too many similar alerts': 1,
'Alert is not accurate': 2,
'Other': 3,
}
INTEGRATION_NAME = 'MicrosoftCloudAppSecurity'
class LegacyClient(BaseClient):
def http_request(self, **args):
return self._http_request(**args)
class Client:
@logger
def __init__(self, app_id: str, verify: bool, proxy: bool, base_url: str, auth_mode: str, tenant_id: str = None,
enc_key: str = None, headers: Optional[dict] = {}):
if auth_mode == 'legacy':
self.ms_client = LegacyClient(
base_url=base_url,
verify=verify,
headers=headers,
proxy=proxy)
else:
self.client_credentials = True if auth_mode == 'client credentials' else False
if '@' in app_id:
app_id, refresh_token = app_id.split('@')
integration_context = get_integration_context()
integration_context.update(current_refresh_token=refresh_token)
set_integration_context(integration_context)
client_args = assign_params(
base_url=base_url,
verify=verify,
proxy=proxy,
ok_codes=(200, 201, 202, 204),
scope='05a65629-4c1b-48c1-a78b-804c4abdd4af/.default',
self_deployed=True, # We always set the self_deployed key as True because when not using a self
# deployed machine, the DEVICE_CODE flow should behave somewhat like a self deployed
# flow and most of the same arguments should be set, as we're !not! using OProxy.
auth_id=app_id,
grant_type=CLIENT_CREDENTIALS if auth_mode == 'client credentials' else DEVICE_CODE,
# used for device code flow
resource='https://api.security.microsoft.com' if auth_mode == 'device code flow' else None,
token_retrieval_url='https://login.windows.net/organizations/oauth2/v2.0/token'
if auth_mode == 'device code flow' else None,
# used for client credentials flow
tenant_id=tenant_id,
enc_key=enc_key
)
self.ms_client = MicrosoftClient(**client_args) # type: ignore
def list_alerts(self, url_suffix: str, request_data: dict):
data = self.ms_client.http_request(
method='GET',
url_suffix=url_suffix,
json_data=request_data,
)
return data
def dismiss_bulk_alerts(self, request_data: dict):
data = self.ms_client.http_request(
method='POST',
url_suffix='/alerts/close_false_positive/',
json_data=request_data,
)
return data
def resolve_bulk_alerts(self, request_data: dict):
data = self.ms_client.http_request(
method='POST',
url_suffix='/alerts/close_true_positive/',
json_data=request_data,
)
return data
def close_benign(self, request_data: dict):
return self.ms_client.http_request(
method='POST',
url_suffix='/alerts/close_benign/',
json_data=request_data,
)
def close_false_positive(self, request_data: dict):
return self.ms_client.http_request(
method='POST',
url_suffix='/alerts/close_false_positive/',
json_data=request_data,
)
def close_true_positive(self, request_data: dict):
return self.ms_client.http_request(
method='POST',
url_suffix='/alerts/close_true_positive/',
json_data=request_data,
)
def list_activities(self, url_suffix: str, request_data: dict, timeout: int):
data = self.ms_client.http_request(
method='GET',
url_suffix=url_suffix,
json_data=request_data,
timeout=timeout
)
return data
def list_users_accounts(self, url_suffix: str, request_data: dict):
data = self.ms_client.http_request(
method='GET',
url_suffix=url_suffix,
json_data=request_data,
)
return data
def list_files(self, url_suffix: str, request_data: dict):
data = self.ms_client.http_request(
method='GET',
url_suffix=url_suffix,
json_data=request_data,
)
return data
def list_incidents(self, filters: dict, limit: Union[int, str]):
return self.ms_client.http_request(
method='POST',
url_suffix='/alerts/',
json_data={
'filters': filters,
'limit': limit,
'sortDirection': 'asc',
},
)
@logger
def start_auth(client: Client) -> CommandResults:
result = client.ms_client.start_auth('!microsoft-cas-auth-complete') # type: ignore[attr-defined]
return CommandResults(readable_output=result)
@logger
def complete_auth(client: Client) -> CommandResults:
client.ms_client.get_access_token() # type: ignore[attr-defined]
return CommandResults(readable_output='✅ Authorization completed successfully.')
@logger
def reset_auth() -> CommandResults:
set_integration_context({})
return CommandResults(readable_output='Authorization was reset successfully. You can now run '
'**!microsoft-cas-auth-start** and **!microsoft-cas-auth-complete**.')
@logger
def test_connection(client: Client) -> CommandResults:
client.ms_client.get_access_token() # type: ignore[attr-defined]
# If fails, MicrosoftApiModule returns an error
return CommandResults(readable_output='✅ Success!')
def args_to_filter(arguments: dict):
"""
Common filters of **all** related entities (Activities, Alerts, Files and Data Entities).
For more info please check:
- Activities: https://docs.microsoft.com/en-us/cloud-app-security/api-activities#filters
- Alerts: https://docs.microsoft.com/en-us/cloud-app-security/api-alerts#filters
- Files: https://docs.microsoft.com/en-us/cloud-app-security/api-files#filters
- Entities: https://docs.microsoft.com/en-us/cloud-app-security/api-entities#filters
"""
request_data: Dict[str, Any] = {}
filters: Dict[str, Any] = {}
for key, value in arguments.items():
if key in ['skip', 'limit']:
request_data[key] = int(value)
if key == 'source':
filters[key] = {'eq': SOURCE_TYPE_OPTIONS[value]}
if key == 'ip_category':
filters['ip.category'] = {'eq': IP_CATEGORY_OPTIONS[value]}
if key == 'ip':
filters['ip.address'] = {'eq': value}
if key == 'taken_action':
filters['activity.takenAction'] = {'eq': value}
if key == 'severity' and value != 'All':
filters[key] = {'eq': SEVERITY_OPTIONS[value]}
if key == 'resolution_status' and value != 'All':
filters['resolutionStatus'] = {'eq': RESOLUTION_STATUS_OPTIONS[value]}
if key == 'file_type':
filters['fileType'] = {'eq': FILE_TYPE_OPTIONS[value]}
if key == 'sharing':
filters[key] = {'eq': FILE_SHARING_OPTIONS[value]}
if key == 'extension':
filters[key] = {'eq': value}
if key == 'quarantined':
filters[key] = {'eq': argToBoolean(value)}
if key == 'type':
filters[key] = {'eq': value}
if key == 'group_id':
filters['userGroups'] = {'eq': value}
if key == 'is_admin':
filters['isAdmin'] = {'eq': value}
if key == 'is_external':
filters['isExternal'] = {'eq': IS_EXTERNAL_OPTIONS[value]}
if key == 'status':
filters[key] = {'eq': STATUS_OPTIONS[value]}
request_data['filters'] = filters
return request_data
def build_filter_and_url_to_search_with(url_suffix: str, custom_filter: Optional[Any], arguments: dict,
specific_id_to_search: Any = '', is_scan: bool = False):
"""
This function build the filters dict or url to filter with.
Args:
url_suffix: The url suffix.
custom_filter: custom filters from the customer (other filters will not work).
arguments: args to filter with.
specific_id_to_search: filter by specific id (other filters will not work).
Returns:
The dict or the url to filter with.
"""
request_data = {}
if specific_id_to_search:
url_suffix += specific_id_to_search
elif custom_filter:
request_data = json.loads(custom_filter)
else:
request_data = args_to_filter(arguments)
request_data = {'filters': request_data} if 'filters' not in request_data.keys() else request_data
if is_scan:
request_data['isScan'] = True
return request_data, url_suffix
def args_to_filter_close_alerts(alert_ids: Optional[List] = None,
custom_filter: Optional[Union[str, dict]] = None,
comment: Optional[str] = None,
send_feedback: bool = False,
feedback_text: Optional[str] = None,
allow_contact: bool = False,
contact_email: Optional[str] = None,
reason: Optional[int] = None,
):
if custom_filter:
if isinstance(custom_filter, str):
request_data = json.loads(custom_filter)
else:
request_data = custom_filter
elif alert_ids:
request_data = {
'filters': {
'id': {
'eq': alert_ids,
},
},
'comment': comment,
'reason': reason,
'sendFeedback': send_feedback,
'feedbackText': feedback_text,
'allowContact': allow_contact,
'contactEmail': contact_email,
}
else:
raise DemistoException("Expecting at least one of the following arguments: alert_id, custom_filter.")
return request_data
def args_to_filter_for_dismiss_and_resolve_alerts(alert_ids: Any, custom_filter: Any, comments: Any):
"""
Deprecated by args_to_filter_close_alerts.
"""
request_data: Dict[str, Any] = {}
filters = {}
if alert_ids:
ids = {'eq': alert_ids.split(',')}
filters['id'] = ids
if comments:
request_data['comment'] = comments
request_data['filters'] = filters
elif custom_filter:
request_data = json.loads(custom_filter)
else:
raise DemistoException("Error: You must enter at least one of these arguments: alert ID, custom filter.")
request_data = {'filters': request_data} if 'filters' not in request_data.keys() else request_data
return request_data
def test_module(client: Client, auth_mode: str, is_fetch: Optional[Any], custom_filter: Optional[str]):
try:
if auth_mode == "device code flow":
raise DemistoException(
"To test the device code flow Please run !microsoft-cas-auth-start and "
"!microsoft-cas-auth-complete and check the connection using !microsoft-cas-auth-test")
else:
client.list_alerts(url_suffix='/alerts/', request_data={})
if is_fetch:
client.list_incidents(filters={}, limit=1)
if custom_filter:
try:
json.loads(custom_filter)
except ValueError:
raise DemistoException('Custom Filter Error: Your custom filter format is incorrect, '
'please try again.')
except Exception as e:
if 'No connection' in str(e):
return 'Connection Error: The URL you entered is probably incorrect, please try again.'
if 'Invalid token' in str(e):
return 'Authorization Error: make sure API Key is correctly set.'
return str(e)
return 'ok'
def alerts_to_human_readable(alerts: List[dict]):
alerts_readable_outputs = []
for alert in alerts:
readable_output = assign_params(alert_id=alert.get('_id'), title=alert.get('title'),
description=alert.get('description'), is_open=alert.get('is_open'),
status_value=[key for key, value in STATUS_OPTIONS.items()
if alert.get('statusValue') == value],
severity_value=[key for key, value in SEVERITY_OPTIONS.items()
if alert.get('severityValue') == value],
alert_date=datetime.fromtimestamp(
alert.get('timestamp', 0) / 1000.0).isoformat())
alerts_readable_outputs.append(readable_output)
headers = ['alert_id', 'alert_date', 'title', 'description', 'status_value', 'severity_value', 'is_open']
human_readable = tableToMarkdown('Microsoft CAS Alerts', alerts_readable_outputs, headers, removeNull=True)
return human_readable
def create_ip_command_results(activities: List[dict]):
command_results: List[CommandResults] = []
for activity in activities:
ip_address = str(dict_safe_get(activity, ['device', 'clientIP']))
indicator = Common.IP(
ip=ip_address,
dbot_score=Common.DBotScore(
ip_address,
DBotScoreType.IP,
INTEGRATION_NAME,
Common.DBotScore.NONE,
),
geo_latitude=str(dict_safe_get(activity, ['location', 'latitude'])),
geo_longitude=str(dict_safe_get(activity, ['location', 'longitude'])),
)
human_readable = activity_to_human_readable(activity)
command_results.append(CommandResults(
readable_output=human_readable,
outputs_prefix='MicrosoftCloudAppSecurity.Activities',
outputs_key_field='_id',
outputs=activities,
indicator=indicator
))
return command_results
def arrange_alerts_descriptions(alerts: List[dict]):
for alert in alerts:
description = alert.get('description', '')
if isinstance(description, str) and '__siteIcon__' in description:
description = description.replace('__siteIcon__', 'siteIcon')
alert['description'] = description
return alerts
def set_alerts_is_open(alerts: List[dict]):
for alert in alerts:
if alert.get('resolveTime'):
alert['is_open'] = False
else:
alert['is_open'] = True
return alerts
def list_alerts_command(client: Client, args: dict):
url_suffix = '/alerts/'
alert_id = args.get('alert_id')
custom_filter = args.get('custom_filter')
arguments = assign_params(**args)
request_data, url_suffix = build_filter_and_url_to_search_with(url_suffix, custom_filter, arguments, alert_id)
alerts_response_data = client.list_alerts(url_suffix, request_data)
list_alert = alerts_response_data.get('data') if 'data' in alerts_response_data.keys() else [alerts_response_data]
if list_alert: # organize the output
alerts = arrange_alerts_by_incident_type(list_alert)
alerts = arrange_alerts_descriptions(alerts)
alerts = set_alerts_is_open(alerts)
human_readable = alerts_to_human_readable(alerts)
return CommandResults(
readable_output=human_readable,
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='_id',
outputs=alerts
)
else:
human_readable = f"No alerts found for the given filter: {custom_filter}."
return CommandResults(readable_output=human_readable)
def bulk_dismiss_alert_command(client: Client, args: dict): # pragma: no cover
"""
Deprecated: use close_false_positive_command instead.
"""
alert_ids = args.get('alert_ids')
custom_filter = args.get('custom_filter')
comment = args.get('comment')
request_data = args_to_filter_for_dismiss_and_resolve_alerts(alert_ids, custom_filter, comment)
dismissed_alerts_data = {}
try:
dismissed_alerts_data = client.dismiss_bulk_alerts(request_data)
except Exception as e:
if 'alertsNotFound' in str(e):
raise DemistoException('Error: This alert id is already dismissed or does not exist.')
number_of_dismissed_alerts = dismissed_alerts_data['closed_false_positive']
return CommandResults(
readable_output=f'{number_of_dismissed_alerts} alerts dismissed',
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='_id'
)
def bulk_resolve_alert_command(client: Client, args: dict): # pragma: no cover
"""
Deprecated: use close_true_positive_command instead.
"""
alert_ids = args.get('alert_ids')
custom_filter = args.get('custom_filter')
comment = args.get('comment')
request_data = args_to_filter_for_dismiss_and_resolve_alerts(alert_ids, custom_filter, comment)
resolve_alerts = client.resolve_bulk_alerts(request_data)
number_of_resolved_alerts = resolve_alerts['closed_true_positive']
return CommandResults(
readable_output=f'{number_of_resolved_alerts} alerts resolved',
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='alert_id',
outputs=resolve_alerts
)
def activity_to_human_readable(activity: dict):
readable_output = assign_params(activity_id=activity.get('_id'), severity=activity.get('severity'),
activity_date=datetime.fromtimestamp(activity.get('timestamp', 0) / 1000.0)
.isoformat(),
app_name=activity.get('appName'), description=activity.get('description'))
headers = ['activity_id', 'activity_date', 'app_name', 'description', 'severity']
human_readable = tableToMarkdown('Microsoft CAS Activity', readable_output, headers, removeNull=True)
return human_readable
def arrange_entities_data(activities: List[dict]):
for activity in activities:
entities_data = []
if 'entityData' in activity.keys():
entity_data = activity['entityData']
if entity_data:
for key, value in entity_data.items():
if value:
entities_data.append(value)
activity['entityData'] = entities_data
return activities
def list_activities_command(client: Client, args: dict):
url_suffix = '/activities/'
activity_id = args.get('activity_id')
custom_filter = args.get('custom_filter')
is_scan = argToBoolean(args.get('is_scan', 'false'))
arguments = assign_params(**args)
timeout = arg_to_number(arguments.get('timeout', 60)) or 60
request_data, url_suffix = build_filter_and_url_to_search_with(url_suffix, custom_filter, arguments, activity_id, is_scan)
has_next = True
list_activities = []
while has_next:
activities_response_data = client.list_activities(url_suffix, request_data, timeout)
list_activities.extend(
activities_response_data.get('data') if activities_response_data.get('data') else [activities_response_data]
)
has_next = activities_response_data.get('hasNext', False)
request_data['filters'] = activities_response_data.get('nextQueryFilters')
if is_scan is False:
# This is to prevent run-away iterations
break
activities = arrange_entities_data(list_activities)
return create_ip_command_results(activities)
def files_to_human_readable(files: List[dict]):
files_readable_outputs = []
for file in files:
readable_output = assign_params(owner_name=file.get('ownerName'), file_id=file.get('_id'),
file_type=file.get('fileType'), file_name=file.get('name'),
file_access_level=file.get('fileAccessLevel'), app_name=file.get('appName'),
file_status=file.get('fileStatus'))
files_readable_outputs.append(readable_output)
headers = ['owner_name', 'file_id', 'file_type', 'file_name', 'file_access_level', 'file_status',
'app_name']
human_readable = tableToMarkdown('Microsoft CAS Files', files_readable_outputs, headers, removeNull=True)
return human_readable
def arrange_files_type_access_level_and_status(files: List[dict]):
"""
This function refines the file to look better.
Args:
files: The file for refinement ("fileType": [4, TEXT]).
Returns:
The file when it is more refined and easier to read ("fileType": TEXT).
"""
for file in files:
if file.get('fileType'):
file['fileType'] = file['fileType'][1]
if file.get('fileAccessLevel'):
file['fileAccessLevel'] = file['fileAccessLevel'][1]
if file.get('fileStatus'):
file['fileStatus'] = file['fileStatus'][1]
return files
def list_files_command(client: Client, args: dict):
url_suffix = '/files/'
file_id = args.get('file_id')
custom_filter = args.get('custom_filter')
arguments = assign_params(**args)
request_data, url_suffix = build_filter_and_url_to_search_with(url_suffix, custom_filter, arguments, file_id)
files_response_data = client.list_files(url_suffix, request_data)
list_files = files_response_data.get('data') if files_response_data.get('data') else [files_response_data]
files = arrange_files_type_access_level_and_status(list_files)
human_readable = files_to_human_readable(files)
return CommandResults(
readable_output=human_readable,
outputs_prefix='MicrosoftCloudAppSecurity.Files',
outputs_key_field='_id',
outputs=files
)
def users_accounts_to_human_readable(users_accounts: List[dict]):
users_accounts_readable_outputs = []
for entity in users_accounts:
readable_output = assign_params(display_name=entity.get('displayName'), last_seen=entity.get('lastSeen'),
is_admin=entity.get('isAdmin'), is_external=entity.get('isExternal'),
email=entity.get('email'))
users_accounts_readable_outputs.append(readable_output)
headers = ['display_name', 'last_seen', 'is_admin', 'is_external', 'email', 'identifier']
human_readable = tableToMarkdown('Microsoft CAS Users And Accounts', users_accounts_readable_outputs, headers,
removeNull=True)
return human_readable
def list_users_accounts_command(client: Client, args: dict):
url_suffix = '/entities/'
custom_filter = args.get('custom_filter')
arguments = assign_params(**args)
request_data, url_suffix = build_filter_and_url_to_search_with(url_suffix, custom_filter, arguments)
users_accounts_response_data = client.list_users_accounts(url_suffix, request_data)
users_accounts = users_accounts_response_data.get('data') \
if users_accounts_response_data.get('data') else [users_accounts_response_data]
human_readable = users_accounts_to_human_readable(users_accounts)
return CommandResults(
readable_output=human_readable,
outputs_prefix='MicrosoftCloudAppSecurity.UsersAccounts',
outputs_key_field='_id',
outputs=users_accounts
)
def format_fetch_start_time_to_timestamp(fetch_start_time: Optional[str]):
first_fetch_dt = parse(fetch_start_time).replace(tzinfo=utc) # type:ignore
# Changing 10-digits timestamp to 13-digits by padding with zeroes, since API supports 13-digits
return int(first_fetch_dt.timestamp()) * 1000
def arrange_alerts_by_incident_type(alerts: List[dict]):
for alert in alerts:
incident_types: Dict[str, Any] = {}
for entity in alert['entities']:
if not entity['type'] in incident_types.keys():
incident_types[entity['type']] = []
incident_types[entity['type']].append(entity)
alert.update(incident_types)
del alert['entities']
return alerts
def is_the_first_alert_is_already_fetched_in_previous_fetch(alerts: List[dict], last_run: dict):
last_incident_in_previous_fetch = last_run.get('last_fetch_id')
alert = alerts[0]
return alert.get('_id') == last_incident_in_previous_fetch
def alerts_to_incidents_and_fetch_start_from(alerts: List[dict], fetch_start_time: str, last_run: dict):
fetch_start_time = int(fetch_start_time)
incidents = []
current_last_incident_fetched = ''
if alerts and is_the_first_alert_is_already_fetched_in_previous_fetch(alerts, last_run):
alerts = alerts[1:]
for alert in alerts:
incident_created_time = (alert['timestamp'])
incident_created_datetime = datetime.fromtimestamp(incident_created_time / 1000.0).isoformat()
incident_occurred = incident_created_datetime.split('.')
occurred = incident_occurred[0]
demisto.debug("------ Alert occurred time is: " + occurred)
incident = {
'name': alert['title'],
'occurred': occurred + 'Z',
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
alert['timestamp'] = occurred
if incident_created_time > fetch_start_time:
current_last_incident_fetched = str(alert.get('_id', ''))
if not current_last_incident_fetched:
current_last_incident_fetched = str(last_run.get('last_fetch_id', ''))
return incidents, current_last_incident_fetched, alerts
def fetch_incidents(client: Client, max_results: Optional[str], last_run: dict, first_fetch: Optional[str],
filters: dict, look_back: int):
date_format = '%Y-%m-%dT%H:%M:%S'
if not first_fetch:
first_fetch = '3 days'
max_results = int(max_results) if max_results else DEFAULT_INCIDENT_TO_FETCH
if not last_run.get("time") and last_run.get("last_fetch"):
demisto.debug(f"last fetch from old version is: {str(last_run.get('last_fetch'))}")
last_fetch_time = datetime.fromtimestamp(last_run.get("last_fetch", 0) / 1000.0).isoformat()
last_run.update({"time": last_fetch_time})
fetch_start_time, fetch_end_time = get_fetch_run_time_range(last_run=last_run, first_fetch=first_fetch,
look_back=look_back)
formatted_fetch_start_time = format_fetch_start_time_to_timestamp(fetch_start_time)
filters["date"] = {"gte": formatted_fetch_start_time}
demisto.debug(f'fetching alerts using filter {filters} with max results {max_results}')
alerts_response_data = client.list_incidents(filters, limit=max_results)
alerts = alerts_response_data.get('data')
alerts = arrange_alerts_by_incident_type(alerts)
alerts_to_incident = filter_incidents_by_duplicates_and_limit(
incidents_res=alerts, last_run=last_run, fetch_limit=max_results, id_field='_id'
)
incidents, last_fetch_id, alerts_to_incident = alerts_to_incidents_and_fetch_start_from(
alerts_to_incident, str(formatted_fetch_start_time), last_run)
last_run = update_last_run_object(
last_run=last_run,
incidents=alerts_to_incident,
fetch_limit=max_results,
start_fetch_time=fetch_start_time,
end_fetch_time=fetch_end_time,
look_back=look_back,
created_time_field='timestamp',
id_field='_id',
date_format=date_format,
increase_last_run_time=True
)
last_run.update({'last_fetch_id': last_fetch_id})
demisto.debug(f'setting last run to: {last_run}')
return last_run, incidents
def params_to_filter(severity: List[str], resolution_status: str):
filters: Dict[str, Any] = {}
if len(severity) == 1:
filters['severity'] = {'eq': SEVERITY_OPTIONS[severity[0]]}
else:
severities = []
for severity_option in severity:
severities.append(SEVERITY_OPTIONS[severity_option])
filters['severity'] = {'eq': severities}
if len(resolution_status) == 1:
filters['resolutionStatus'] = {'eq': RESOLUTION_STATUS_OPTIONS[resolution_status[0]]}
else:
resolution_statuses = []
for resolution in resolution_status:
resolution_statuses.append(RESOLUTION_STATUS_OPTIONS[resolution])
filters['resolutionStatus'] = {'eq': resolution_statuses}
return filters
def close_benign_command(client: Client, args: dict) -> CommandResults:
"""
Closing alerts as benign.
API: https://docs.microsoft.com/en-gb/cloud-app-security/api-alerts-close-benign
"""
alert_ids = argToList(args.get('alert_ids'))
custom_filter = args.get('custom_filter')
comment = args.get('comment')
reason = CLOSE_BENIGN_REASON_OPTIONS.get(args.get('reason', ''))
send_feedback = argToBoolean(args.get('sendFeedback', 'false'))
feedback_text = args.get('feedbackText')
allow_contact = argToBoolean(args.get('allowContact', 'false'))
contact_email = args.get('contactEmail')
request_data = args_to_filter_close_alerts(
alert_ids=alert_ids,
custom_filter=custom_filter,
comment=comment,
send_feedback=send_feedback,
feedback_text=feedback_text,
allow_contact=allow_contact,
contact_email=contact_email,
reason=reason,
)
closed_benign_alerts = client.close_benign(request_data)
if 'alertsNotFound' in closed_benign_alerts:
not_found_alerts = '\n'.join(closed_benign_alerts['alertsNotFound'])
raise DemistoException(f'Failed to close the following alerts:\n{not_found_alerts}')
number_of_close_benign = closed_benign_alerts['closed_benign']
return CommandResults(
readable_output=f'{number_of_close_benign} alerts were closed as benign.',
raw_response=closed_benign_alerts,
)
def close_false_positive_command(client: Client, args: dict) -> CommandResults:
"""
Closing alert as false-positive.
API: https://docs.microsoft.com/en-gb/cloud-app-security/api-alerts-close-false-positive
"""
alert_ids = argToList(args.get('alert_ids'))
custom_filter = args.get('custom_filter')
comment = args.get('comment')
reason = CLOSE_FALSE_POSITIVE_REASON_OPTIONS.get(args.get('reason', ''))
send_feedback = argToBoolean(args.get('sendFeedback')) if args.get('sendFeedback') else False
feedback_text = args.get('feedbackText')
allow_contact = argToBoolean(args.get('allowContact')) if args.get('allowContact') else False
contact_email = args.get('contactEmail')
request_data = args_to_filter_close_alerts(
alert_ids=alert_ids,
custom_filter=custom_filter,
comment=comment,
send_feedback=send_feedback,
feedback_text=feedback_text,
allow_contact=allow_contact,
contact_email=contact_email,
reason=reason,
)
closed_false_positive_alerts = client.close_false_positive(request_data)
if 'alertsNotFound' in closed_false_positive_alerts:
not_found_alerts = '\n'.join(closed_false_positive_alerts['alertsNotFound'])
raise DemistoException(f'Failed to close the following alerts:\n{not_found_alerts}')
number_of_closed_false_positive_alerts = closed_false_positive_alerts['closed_false_positive']
return CommandResults(
readable_output=f'{number_of_closed_false_positive_alerts} alerts were closed as false-positive.',
raw_response=closed_false_positive_alerts,
)
def close_true_positive_command(client: Client, args: dict) -> CommandResults:
"""
Closing alerts as true-positive.
API: https://docs.microsoft.com/en-gb/cloud-app-security/api-alerts-close-true-positive
"""
alert_ids = argToList(args.get('alert_ids'))
custom_filter = args.get('custom_filter')
comment = args.get('comment')
send_feedback = argToBoolean(args.get('sendFeedback')) if args.get('sendFeedback') else False
feedback_text = args.get('feedbackText')
allow_contact = argToBoolean(args.get('allowContact')) if args.get('allowContact') else False
contact_email = args.get('contactEmail')
request_data = args_to_filter_close_alerts(
alert_ids=alert_ids,
custom_filter=custom_filter,
comment=comment,
send_feedback=send_feedback,
feedback_text=feedback_text,
allow_contact=allow_contact,
contact_email=contact_email,
)
closed_true_positive_alert = client.close_true_positive(request_data)
if 'alertsNotFound' in closed_true_positive_alert:
not_found_alerts = '\n'.join(closed_true_positive_alert['alertsNotFound'])
raise DemistoException(f'Failed to close the following alerts:\n{not_found_alerts}')
number_of_close_true_positive = closed_true_positive_alert['closed_true_positive']
return CommandResults(
readable_output=f'{number_of_close_true_positive} alerts were closed as true-positive.',
raw_response=closed_true_positive_alert,
)
def main(): # pragma: no cover
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params: dict = demisto.params()
app_id = params.get('app_id')
tenant_id = params.get('tenant_id')
auth_mode = params.get('auth_mode', 'legacy')
enc_key = params.get('client_id', {}).get('password')
verify = not params.get('insecure', False)
proxy = params.get('proxy', False)
token = params.get('token')
base_url = f'{params.get("url")}/api/v1'
first_fetch = params.get('first_fetch')
max_results = params.get('max_fetch')
severity = params.get('severity')
resolution_status = params.get('resolution_status')
look_back = arg_to_number(params.get('look_back')) or 0
command = demisto.command()
args = demisto.args()
try:
client = Client(
app_id=app_id,
verify=verify,
base_url=base_url,
proxy=proxy,
tenant_id=tenant_id,
enc_key=enc_key,
auth_mode=auth_mode,
headers={'Authorization': f'Token {token}'}
)
LOG(f'Command being called is {command}')
if command == 'test-module':
result = test_module(client, auth_mode, params.get('isFetch'), params.get('custom_filter'))
return_results(result)
elif command == 'fetch-incidents':
if params.get('custom_filter'):
filters = json.loads(str(params.get('custom_filter')))
else:
filters = params_to_filter(severity, resolution_status) # type: ignore
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(),
first_fetch=first_fetch,
filters=filters,
look_back=look_back)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command == 'microsoft-cas-auth-start':
return_results(start_auth(client))
elif command == 'microsoft-cas-auth-complete':
return_results(complete_auth(client))
elif command == 'microsoft-cas-auth-reset':
return_results(reset_auth())
elif command == 'microsoft-cas-auth-test':
return_results(test_connection(client))
elif command == 'microsoft-cas-alerts-list':
return_results(list_alerts_command(client, args))
elif command == 'microsoft-cas-alert-dismiss-bulk':
# Deprecated.
return_results(bulk_dismiss_alert_command(client, args))
elif command == 'microsoft-cas-alert-resolve-bulk':
# Deprecated.
return_results(bulk_resolve_alert_command(client, args))
elif command == 'microsoft-cas-activities-list':
return_results(list_activities_command(client, args))
elif command == 'microsoft-cas-files-list':
return_results(list_files_command(client, args))
elif command == 'microsoft-cas-users-accounts-list':
return_results(list_users_accounts_command(client, args))
elif command == 'microsoft-cas-alert-close-benign':
return_results(close_benign_command(client, args))
elif command == 'microsoft-cas-alert-close-true-positive':
return_results(close_true_positive_command(client, args))
elif command == 'microsoft-cas-alert-close-false-positive':
return_results(close_false_positive_command(client, args))
else:
raise NotImplementedError(f'command {command} is not implemented.')
# Log exceptions
except Exception as exc:
return_error(f'Failed to execute {command} command. Error: {str(exc)}', error=exc)
from MicrosoftApiModule import * # noqa: E402
if __name__ in ('__main__', '__builtin__', 'builtins'): # pragma: no cover
main()
|
{
"content_hash": "f42d3b1dc0e692a3e300735f7bdb13e9",
"timestamp": "",
"source": "github",
"line_count": 1008,
"max_line_length": 126,
"avg_line_length": 38.67559523809524,
"alnum_prop": 0.6122354751827626,
"repo_name": "demisto/content",
"id": "308ff2e094e26e83cc7f74b97c7de73786787a66",
"size": "38989",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/MicrosoftCloudAppSecurity/Integrations/MicrosoftCloudAppSecurity/MicrosoftCloudAppSecurity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
import tests.periodicities.period_test as per
per.buildModel((24 , 'T' , 200));
|
{
"content_hash": "524f83efa0752b8231150524ef035d14",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 20.5,
"alnum_prop": 0.7073170731707317,
"repo_name": "antoinecarme/pyaf",
"id": "323687fd4a820a21aba35923187ca1cb7344bc04",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/periodicities/Minute/Cycle_Minute_200_T_24.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
class MatcherType(object):
"""
Interface for each Matcher functions. All matchers types must be implement get_ratio_match method between two
objects and return a ratio value.
"""
def get_ratio_match(self, object_a, object_b):
pass
|
{
"content_hash": "d0d6fa26409b8f6eaaab74992940784b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 113,
"avg_line_length": 32.5,
"alnum_prop": 0.6884615384615385,
"repo_name": "Raul-diffindo/Django-Matcher",
"id": "1a31661d6e41d863898adefd18b3f0dc9e40c83b",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matcher/matcher_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44458"
}
],
"symlink_target": ""
}
|
import amsoil.core.pluginmanager as pm
import amsoil.core.log
logger=amsoil.core.log.getLogger('osliceauthorityrm')
import uuid
import pyrfc3339
import datetime
import pytz
class OSliceAuthorityResourceManager(object):
"""
Manage Slice Authority objects and resources.
Generates neccessary fields when creating a new object.
"""
AUTHORITY_NAME = 'sa' #: The short-name for this authority
SUPPORTED_SERVICES = ['SLICE','SLICE_MEMBER', 'SLIVER_INFO', 'PROJECT', 'PROJECT_MEMBER'] #: The objects supported by this authority
SUPPORTED_CREDENTIAL_TYPES = [{"type" : "SFA", "version" : 1}] #: The credential type supported by this authority
def __init__(self):
"""
Get plugins for use in other class methods.
Set unique keys.
"""
super(OSliceAuthorityResourceManager, self).__init__()
self._resource_manager_tools = pm.getService('resourcemanagertools')
self._set_unique_keys()
#--- 'get_version' methods
def _set_unique_keys(self):
"""
Set the required unique keys in the database for a Slice Authority.
"""
self._resource_manager_tools.set_index(self.AUTHORITY_NAME, 'SLICE_UID')
self._resource_manager_tools.set_index(self.AUTHORITY_NAME, 'SLICE_URN')
self._resource_manager_tools.set_index(self.AUTHORITY_NAME, 'SLIVER_INFO_URN')
self._resource_manager_tools.set_index(self.AUTHORITY_NAME, 'PROJECT_UID')
self._resource_manager_tools.set_index(self.AUTHORITY_NAME, 'PROJECT_URN')
def urn(self):
"""
Get the URN for this Slice Authority.
Retrieve the hostname from the Flask AMsoil plugin and use this to build
the URN.
"""
config = pm.getService('config')
hostname = config.get('flask.hostname')
return 'urn:publicid:IDN+' + hostname + '+authority+sa'
def implementation(self):
"""
Get the implementation details for this Slice Authority.
Retrieve details from the AMsoil plugin and form them into a dictionary
suitable for the API call response.
"""
manifest = pm.getManifest('osliceauthorityrm')
if len(manifest) > 0:
return {'code_version' : str(manifest['version'])}
else:
return None
def services(self):
"""
Return the services implemented by this Slice Authority.
"""
return self.SUPPORTED_SERVICES
def api_versions(self):
"""
Get the different endpoints (of type 'ma'), registered with AMsoil.
Form these endpoints into a dictionary suitable for the API call response.
"""
config = pm.getService('config')
hostname = config.get('flask.hostname')
port = str(config.get('flask.app_port'))
endpoints = pm.getService('apitools').get_endpoints(type=self.AUTHORITY_NAME)
return self._resource_manager_tools.form_api_versions(hostname, port, endpoints)
def credential_types(self):
"""
Return the credential types implemented by this Slice Authority.
"""
return self.SUPPORTED_CREDENTIAL_TYPES
#--- object methods
def create_slice(self, client_cert, credentials, fields, options):
"""
Create a slice object.
Generate fields for a new object:
* SLICE_URN: retrieve the hostname from the Flask AMsoil plugin
and form into a valid URN
* SLICE_UID: generate a new UUID4 value
* SLICE_CREATION: get the time now and convert it into RFC3339 form
* SLICE_EXPIRED: slice object has just been created, so it is has not
yet expired
"""
config = pm.getService('config')
hostname = config.get('flask.hostname')
fields['SLICE_URN'] = 'urn:publicid+IDN+' + hostname + '+slice+' + fields.get('SLICE_NAME')
fields['SLICE_UID'] = str(uuid.uuid4())
fields['SLICE_CREATION'] = pyrfc3339.generate(datetime.datetime.utcnow().replace(tzinfo=pytz.utc))
fields['SLICE_EXPIRED'] = False
return self._resource_manager_tools.object_create(self.AUTHORITY_NAME, fields, 'slice')
def update_slice(self, urn, client_cert, credentials, fields, options):
"""
Update a slice object.
"""
return self._resource_manager_tools.object_update(self.AUTHORITY_NAME, fields, 'slice', {'SLICE_URN':urn})
def lookup_slice(self, client_cert, credentials, match, filter_, options):
"""
Lookup a slice object.
"""
return self._resource_manager_tools.object_lookup(self.AUTHORITY_NAME, 'slice', match, filter_)
def create_sliver_info(self, client_cert, credentials, fields, options):
"""
Create a sliver information object.
"""
return self._resource_manager_tools.object_create(self.AUTHORITY_NAME, fields, 'sliver_info')
def update_sliver_info(self, urn, client_cert, credentials, fields, options):
"""
Update a sliver information object.
"""
return self._resource_manager_tools.object_update(self.AUTHORITY_NAME, fields, 'sliver_info', {'SLIVER_INFO_URN':urn})
def lookup_sliver_info(self, client_cert, credentials, match, filter_, options):
"""
Lookup a sliver information object.
"""
return self._resource_manager_tools.object_lookup(self.AUTHORITY_NAME, 'sliver_info', match, filter_)
def delete_sliver_info(self, urn, client_cert, credentials, options):
"""
Delete a sliver information object.
"""
return self._resource_manager_tools.object_delete(self.AUTHORITY_NAME, 'sliver_info', {'SLIVER_INFO_URN':urn})
def create_project(self, client_cert, credentials, fields, options):
"""
Create a project object.
Generate fields for a new object:
* PROJECT_URN: retrieve the hostname from the Flask AMsoil plugin
and form into a valid URN
* PROJECT_UID: generate a new UUID4 value
* PROJECT_CREATION: get the time now and convert it into RFC3339 form
* PROJECT_EXPIRED: project object has just been created, so it is
has not yet expired
"""
config = pm.getService('config')
hostname = config.get('flask.hostname')
fields['PROJECT_URN'] = 'urn:publicid+IDN+' + hostname + '+project+' + fields.get('PROJECT_NAME')
fields['PROJECT_UID'] = str(uuid.uuid4())
fields['PROJECT_CREATION'] = pyrfc3339.generate(datetime.datetime.utcnow().replace(tzinfo=pytz.utc))
fields['PROJECT_EXPIRED'] = False
return self._resource_manager_tools.object_create(self.AUTHORITY_NAME, fields, 'project')
def update_project(self, urn, client_cert, credentials, fields, options):
"""
Update a project object.
"""
return self._resource_manager_tools.object_update(self.AUTHORITY_NAME, fields, 'project', {'PROJECT_URN':urn})
def delete_project(self, urn, client_cert, credentials, options):
"""
Delete a project object.
"""
return self._resource_manager_tools.object_delete(self.AUTHORITY_NAME, 'project', {'PROJECT_URN':urn})
def lookup_project(self, client_cert, credentials, match, filter_, options):
"""
Lookup a project object.
"""
return self._resource_manager_tools.object_lookup(self.AUTHORITY_NAME, 'project', match, filter_)
def modify_slice_membership(self, urn, certificate, credentials, options):
"""
Modify a slice membership object.
"""
return self._resource_manager_tools.member_modify(self.AUTHORITY_NAME, 'slice_member', urn, options, 'SLICE_MEMBER', 'SLICE_URN')
def modify_project_membership(self, urn, certificate, credentials, options):
"""
Modify a project membership object.
"""
return self._resource_manager_tools.member_modify(self.AUTHORITY_NAME, 'project_member', urn, options, 'PROJECT_MEMBER', 'PROJECT_URN')
def lookup_slice_membership(self, urn, certificate, credentials, options):
"""
Lookup a slice membership object.
"""
return self._resource_manager_tools.member_lookup(self.AUTHORITY_NAME, 'slice_member', 'SLICE_URN', urn, ['SLICE_URN'])
def lookup_project_membership(self, urn, certificate, credentials, options):
"""
Lookup a project membership object.
"""
return self._resource_manager_tools.member_lookup(self.AUTHORITY_NAME, 'project_member', 'PROJECT_URN', urn, ['PROJECT_URN'])
def lookup_slice_membership_for_member(self, member_urn, certificate, credentials, options):
"""
Lookup a slice membership object for a given member.
"""
return self._resource_manager_tools.member_lookup(self.AUTHORITY_NAME, 'slice_member', 'SLICE_MEMBER', member_urn, ['SLICE_MEMBER'])
def lookup_project_membership_for_member(self, member_urn, certificate, credentials, options):
"""
Lookup a project membership object for a given member.
"""
return self._resource_manager_tools.member_lookup(self.AUTHORITY_NAME, 'project_member', 'PROJECT_MEMBER', member_urn, ['PROJECT_MEMBER'])
|
{
"content_hash": "41958f7a2a531dcad33e170f3da40ca4",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 146,
"avg_line_length": 40.38528138528139,
"alnum_prop": 0.6427269803837496,
"repo_name": "motine/Ohouse",
"id": "8ad05674a9d6ea807d3ca1a0a1b28214dcc8fd3c",
"size": "9329",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "src/plugins/osliceauthorityrm/osliceauthorityresourcemanager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "265931"
},
{
"name": "Shell",
"bytes": "520"
}
],
"symlink_target": ""
}
|
'''
Copyright (c) 2013, UC Regents
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
'''
class ConnectionError(Exception):
pass
class GatewayError(Exception):
pass
|
{
"content_hash": "fbcb733aa540b49e434b3595fe5f9e01",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 47.26470588235294,
"alnum_prop": 0.8039825762289982,
"repo_name": "ooici/ooitk",
"id": "4fc8b4b065d3aca16b41b0583d6620c95808344a",
"size": "1629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ooitk/exception.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "14327"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
}
|
"""
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
import warnings
import numbers
import time
from traceback import format_exc
from contextlib import suppress
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, logger
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, _safe_indexing
from ..utils.validation import _check_fit_params
from ..utils.validation import _num_samples
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
from ..utils.metaestimators import _safe_split
from ..metrics import check_scoring
from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer
from ..exceptions import FitFailedWarning, NotFittedError
from ._split import check_cv
from ..preprocessing import LabelEncoder
__all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict',
'permutation_test_score', 'learning_curve', 'validation_curve']
@_deprecate_positional_args
def cross_validate(estimator, X, y=None, *, groups=None, scoring=None, cv=None,
n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score=False,
return_estimator=False, error_score=np.nan):
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Read more in the :ref:`User Guide <multimetric_cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
scoring : str, callable, list, tuple, or dict, default=None
Strategy to evaluate the performance of the cross-validated model on
the test set.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_parameter`);
- a callable (see :ref:`scoring`) that returns a single value.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`.Fold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
return_train_score : bool, default=False
Whether to include train scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
return_estimator : bool, default=False
Whether to return the estimators fitted on each split.
.. versionadded:: 0.20
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : dict of float arrays of shape (n_splits,)
Array of scores of the estimator for each run of the cross validation.
A dict of arrays containing the score/time arrays for each scorer is
returned. The possible keys for this ``dict`` are:
``test_score``
The score array for test scores on each cv split.
Suffix ``_score`` in ``test_score`` changes to a specific
metric like ``test_r2`` or ``test_auc`` if there are
multiple scoring metrics in the scoring parameter.
``train_score``
The score array for train scores on each cv split.
Suffix ``_score`` in ``train_score`` changes to a specific
metric like ``train_r2`` or ``train_auc`` if there are
multiple scoring metrics in the scoring parameter.
This is available only if ``return_train_score`` parameter
is ``True``.
``fit_time``
The time for fitting the estimator on the train
set for each cv split.
``score_time``
The time for scoring the estimator on the test set for each
cv split. (Note time for scoring on the train set is not
included even if ``return_train_score`` is set to ``True``
``estimator``
The estimator objects for each cv split.
This is available only if ``return_estimator`` parameter
is set to ``True``.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_validate
>>> from sklearn.metrics import make_scorer
>>> from sklearn.metrics import confusion_matrix
>>> from sklearn.svm import LinearSVC
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
Single metric evaluation using ``cross_validate``
>>> cv_results = cross_validate(lasso, X, y, cv=3)
>>> sorted(cv_results.keys())
['fit_time', 'score_time', 'test_score']
>>> cv_results['test_score']
array([0.33150734, 0.08022311, 0.03531764])
Multiple metric evaluation using ``cross_validate``
(please refer the ``scoring`` parameter doc for more information)
>>> scores = cross_validate(lasso, X, y, cv=3,
... scoring=('r2', 'neg_mean_squared_error'),
... return_train_score=True)
>>> print(scores['test_neg_mean_squared_error'])
[-3635.5... -3573.3... -6114.7...]
>>> print(scores['train_r2'])
[0.28010158 0.39088426 0.22784852]
See Also
---------
cross_val_score : Run cross-validation for single metric evaluation.
cross_val_predict : Get predictions from each split of cross-validation for
diagnostic purposes.
sklearn.metrics.make_scorer : Make a scorer from a performance metric or
loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
if callable(scoring):
scorers = scoring
elif scoring is None or isinstance(scoring, str):
scorers = check_scoring(estimator, scoring)
else:
scorers = _check_multimetric_scoring(estimator, scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
results = parallel(
delayed(_fit_and_score)(
clone(estimator), X, y, scorers, train, test, verbose, None,
fit_params, return_train_score=return_train_score,
return_times=True, return_estimator=return_estimator,
error_score=error_score)
for train, test in cv.split(X, y, groups))
# For callabe scoring, the return type is only know after calling. If the
# return type is a dictionary, the error scores can now be inserted with
# the correct key.
if callable(scoring):
_insert_error_scores(results, error_score)
results = _aggregate_score_dicts(results)
ret = {}
ret['fit_time'] = results["fit_time"]
ret['score_time'] = results["score_time"]
if return_estimator:
ret['estimator'] = results["estimator"]
test_scores_dict = _normalize_score_results(results["test_scores"])
if return_train_score:
train_scores_dict = _normalize_score_results(results["train_scores"])
for name in test_scores_dict:
ret['test_%s' % name] = test_scores_dict[name]
if return_train_score:
key = 'train_%s' % name
ret[key] = train_scores_dict[name]
return ret
def _insert_error_scores(results, error_score):
"""Insert error in `results` by replacing them inplace with `error_score`.
This only applies to multimetric scores because `_fit_and_score` will
handle the single metric case.
"""
successful_score = None
failed_indices = []
for i, result in enumerate(results):
if result["fit_failed"]:
failed_indices.append(i)
elif successful_score is None:
successful_score = result["test_scores"]
if successful_score is None:
raise NotFittedError("All estimators failed to fit")
if isinstance(successful_score, dict):
formatted_error = {name: error_score for name in successful_score}
for i in failed_indices:
results[i]["test_scores"] = formatted_error.copy()
if "train_scores" in results[i]:
results[i]["train_scores"] = formatted_error.copy()
def _normalize_score_results(scores, scaler_score_key='score'):
"""Creates a scoring dictionary based on the type of `scores`"""
if isinstance(scores[0], dict):
# multimetric scoring
return _aggregate_score_dicts(scores)
# scaler
return {scaler_score_key: scores}
@_deprecate_positional_args
def cross_val_score(estimator, X, y=None, *, groups=None, scoring=None,
cv=None, n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', error_score=np.nan):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only
a single value.
Similar to :func:`cross_validate`
but only a single metric is permitted.
If None, the estimator's default scorer (if available) is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : ndarray of float of shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y, cv=3))
[0.33150734 0.08022311 0.03531764]
See Also
---------
cross_validate : To run cross-validation on multiple metrics and also to
return train scores, fit times and score times.
cross_val_predict : Get predictions from each split of cross-validation for
diagnostic purposes.
sklearn.metrics.make_scorer : Make a scorer from a performance metric or
loss function.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,
scoring={'score': scorer}, cv=cv,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch,
error_score=error_score)
return cv_results['test_score']
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
split_progress=None, candidate_progress=None,
error_score=np.nan):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_failed : bool
The estimator failed to fit.
"""
if not isinstance(error_score, numbers.Number) and error_score != 'raise':
raise ValueError(
"error_score must be the string 'raise' or a numeric value. "
"(Hint: if using 'raise', please make sure that it has been "
"spelled correctly.)"
)
progress_msg = ""
if verbose > 2:
if split_progress is not None:
progress_msg = f" {split_progress[0]+1}/{split_progress[1]}"
if candidate_progress and verbose > 9:
progress_msg += (f"; {candidate_progress[0]+1}/"
f"{candidate_progress[1]}")
if verbose > 1:
if parameters is None:
params_msg = ''
else:
sorted_keys = sorted(parameters) # Ensure deterministic o/p
params_msg = (', '.join(f'{k}={parameters[k]}'
for k in sorted_keys))
if verbose > 9:
start_msg = f"[CV{progress_msg}] START {params_msg}"
print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for k, v in parameters.items():
cloned_parameters[k] = clone(v, safe=False)
estimator = estimator.set_params(**cloned_parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, dict):
test_scores = {name: error_score for name in scorer}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%s" %
(error_score, format_exc()),
FitFailedWarning)
result["fit_failed"] = True
else:
result["fit_failed"] = False
fit_time = time.time() - start_time
test_scores = _score(estimator, X_test, y_test, scorer, error_score)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(
estimator, X_train, y_train, scorer, error_score
)
if verbose > 1:
total_time = score_time + fit_time
end_msg = f"[CV{progress_msg}] END "
result_msg = params_msg + (";" if params_msg else "")
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
result_msg += f" {scorer_name}: ("
if return_train_score:
scorer_scores = train_scores[scorer_name]
result_msg += f"train={scorer_scores:.3f}, "
result_msg += f"test={test_scores[scorer_name]:.3f})"
else:
result_msg += ", score="
if return_train_score:
result_msg += (f"(train={train_scores:.3f}, "
f"test={test_scores:.3f})")
else:
result_msg += f"{test_scores:.3f}"
result_msg += f" total time={logger.short_format_time(total_time)}"
# Right align the result_msg
end_msg += "." * (80 - len(end_msg) - len(result_msg))
end_msg += result_msg
print(end_msg)
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result
def _score(estimator, X_test, y_test, scorer, error_score="raise"):
"""Compute the score(s) of an estimator on a given test set.
Will return a dict of floats if `scorer` is a dict, otherwise a single
float is returned.
"""
if isinstance(scorer, dict):
# will cache method calls if needed. scorer() returns a dict
scorer = _MultimetricScorer(**scorer)
try:
if y_test is None:
scores = scorer(estimator, X_test)
else:
scores = scorer(estimator, X_test, y_test)
except Exception:
if error_score == 'raise':
raise
else:
if isinstance(scorer, _MultimetricScorer):
scores = {name: error_score for name in scorer._scorers}
else:
scores = error_score
warnings.warn(
f"Scoring failed. The score on this train-test partition for "
f"these parameters will be set to {error_score}. Details: \n"
f"{format_exc()}",
UserWarning,
)
error_msg = (
"scoring must return a number, got %s (%s) instead. (scorer=%s)"
)
if isinstance(scores, dict):
for name, score in scores.items():
if hasattr(score, 'item'):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
score = score.item()
if not isinstance(score, numbers.Number):
raise ValueError(error_msg % (score, type(score), name))
scores[name] = score
else: # scalar
if hasattr(scores, 'item'):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
scores = scores.item()
if not isinstance(scores, numbers.Number):
raise ValueError(error_msg % (scores, type(scores), scorer))
return scores
@_deprecate_positional_args
def cross_val_predict(estimator, X, y=None, *, groups=None, cv=None,
n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', method='predict'):
"""Generate cross-validated estimates for each input data point
The data is split according to the cv parameter. Each sample belongs
to exactly one test set, and its prediction is computed with an
estimator fitted on the corresponding training set.
Passing these predictions into an evaluation metric may not be a valid
way to measure generalization performance. Results can differ from
:func:`cross_validate` and :func:`cross_val_score` unless all tests sets
have equal size and the metric decomposes over samples.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
predicting are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : {'predict', 'predict_proba', 'predict_log_proba', \
'decision_function'}, default='predict'
The method to be invoked by `estimator`.
Returns
-------
predictions : ndarray
This is the result of calling `method`. Shape:
- When `method` is 'predict' and in special case where `method` is
'decision_function' and the target is binary: (n_samples,)
- When `method` is one of {'predict_proba', 'predict_log_proba',
'decision_function'} (unless special case above):
(n_samples, n_classes)
- If `estimator` is :term:`multioutput`, an extra dimension
'n_outputs' is added to the end of each shape above.
See Also
--------
cross_val_score : Calculate score for each CV split.
cross_validate : Calculate one or more scores and timings for each CV
split.
Notes
-----
In the case that one or more classes are absent in a training portion, a
default score needs to be assigned to all instances for that class if
``method`` produces columns per class, as in {'decision_function',
'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is
0. In order to ensure finite output, we approximate negative infinity by
the minimum finite float value for the dtype in other cases.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y, cv=3)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
splits = list(cv.split(X, y, groups))
test_indices = np.concatenate([test for _, test in splits])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
# If classification methods produce multiple columns of output,
# we need to manually encode classes to ensure consistent column ordering.
encode = method in ['decision_function', 'predict_proba',
'predict_log_proba'] and y is not None
if encode:
y = np.asarray(y)
if y.ndim == 1:
le = LabelEncoder()
y = le.fit_transform(y)
elif y.ndim == 2:
y_enc = np.zeros_like(y, dtype=int)
for i_label in range(y.shape[1]):
y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label])
y = y_enc
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
predictions = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in splits)
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
elif encode and isinstance(predictions[0], list):
# `predictions` is a list of method outputs from each fold.
# If each of those is also a list, then treat this as a
# multioutput-multiclass task. We need to separately concatenate
# the method outputs for each label into an `n_labels` long list.
n_labels = y.shape[1]
concat_pred = []
for i_label in range(n_labels):
label_preds = np.concatenate([p[i_label] for p in predictions])
concat_pred.append(label_preds)
predictions = concat_pred
else:
predictions = np.concatenate(predictions)
if isinstance(predictions, list):
return [p[inv_test_indices] for p in predictions]
else:
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
.. versionchanged:: 0.20
X is only required to be an object with finite length or shape now
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : str
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
encode = method in ['decision_function', 'predict_proba',
'predict_log_proba'] and y is not None
if encode:
if isinstance(predictions, list):
predictions = [_enforce_prediction_order(
estimator.classes_[i_label], predictions[i_label],
n_classes=len(set(y[:, i_label])), method=method)
for i_label in range(len(predictions))]
else:
# A 2D y array should be a binary label indicator matrix
n_classes = len(set(y)) if y.ndim == 1 else y.shape[1]
predictions = _enforce_prediction_order(
estimator.classes_, predictions, n_classes, method)
return predictions
def _enforce_prediction_order(classes, predictions, n_classes, method):
"""Ensure that prediction arrays have correct column order
When doing cross-validation, if one or more classes are
not present in the subset of data used for training,
then the output prediction array might not have the same
columns as other folds. Use the list of class names
(assumed to be ints) to enforce the correct column order.
Note that `classes` is the list of classes in this fold
(a subset of the classes in the full training set)
and `n_classes` is the number of classes in the full training set.
"""
if n_classes != len(classes):
recommendation = (
'To fix this, use a cross-validation '
'technique resulting in properly '
'stratified folds')
warnings.warn('Number of classes in training fold ({}) does '
'not match total number of classes ({}). '
'Results may not be appropriate for your use case. '
'{}'.format(len(classes), n_classes, recommendation),
RuntimeWarning)
if method == 'decision_function':
if (predictions.ndim == 2 and
predictions.shape[1] != len(classes)):
# This handles the case when the shape of predictions
# does not match the number of classes used to train
# it with. This case is found when sklearn.svm.SVC is
# set to `decision_function_shape='ovo'`.
raise ValueError('Output shape {} of {} does not match '
'number of classes ({}) in fold. '
'Irregular decision_function outputs '
'are not currently supported by '
'cross_val_predict'.format(
predictions.shape, method, len(classes)))
if len(classes) <= 2:
# In this special case, `predictions` contains a 1D array.
raise ValueError('Only {} class/es in training fold, but {} '
'in overall dataset. This '
'is not supported for decision_function '
'with imbalanced folds. {}'.format(
len(classes), n_classes, recommendation))
float_min = np.finfo(predictions.dtype).min
default_values = {'decision_function': float_min,
'predict_log_proba': float_min,
'predict_proba': 0}
predictions_for_all_classes = np.full((_num_samples(predictions),
n_classes),
default_values[method],
dtype=predictions.dtype)
predictions_for_all_classes[:, classes] = predictions
predictions = predictions_for_all_classes
return predictions
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
int array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
@_deprecate_positional_args
def permutation_test_score(estimator, X, y, *, groups=None, cv=None,
n_permutations=100, n_jobs=None, random_state=0,
verbose=0, scoring=None, fit_params=None):
"""Evaluate the significance of a cross-validated score with permutations
Permutes targets to generate 'randomized data' and compute the empirical
p-value against the null hypothesis that features and targets are
independent.
The p-value represents the fraction of randomized data sets where the
estimator performed as well or better than in the original data. A small
p-value suggests that there is a real dependency between features and
targets which has been used by the estimator to give good predictions.
A large p-value may be due to lack of real dependency between features
and targets or the estimator was not able to use the dependency to
give good predictions.
Read more in the :ref:`User Guide <permutation_test_score>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : str or callable, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None the estimator's score method is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_permutations : int, default=100
Number of times to permute ``y``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the cross-validated score are parallelized over the permutations.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=0
Pass an int for reproducible output for permutation of
``y`` values among samples. See :term:`Glossary <random_state>`.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array of shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. `Permutation Tests for Studying Classifier
Performance
<http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The
Journal of Machine Learning Research (2010) vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer,
fit_params=fit_params)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer, fit_params=fit_params)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
def _permutation_test_score(estimator, X, y, groups, cv, scorer,
fit_params):
"""Auxiliary function for permutation_test_score"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
fit_params = _check_fit_params(X, fit_params, train)
estimator.fit(X_train, y_train, **fit_params)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return _safe_indexing(y, indices)
@_deprecate_positional_args
def learning_curve(estimator, X, y, *, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None,
scoring=None, exploit_incremental_learning=False,
n_jobs=None, pre_dispatch="all", verbose=0, shuffle=False,
random_state=None, error_score=np.nan, return_times=False,
fit_params=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the different training and test sets.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, default=None
Used when ``shuffle`` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
return_times : bool, default=False
Whether to return the fit and score times.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
fit_times : array of shape (n_ticks, n_cv_folds)
Times spent for fitting in seconds. Only present if ``return_times``
is True.
score_times : array of shape (n_ticks, n_cv_folds)
Times spent for scoring in seconds. Only present if ``return_times``
is True.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose, return_times, error_score=error_score,
fit_params=fit_params)
for train, test in cv_iter
)
out = np.asarray(out).transpose((2, 1, 0))
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
results = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose,
parameters=None, fit_params=fit_params, return_train_score=True,
error_score=error_score, return_times=return_times)
for train, test in train_test_proportions
)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T
test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T
out = [train_scores, test_scores]
if return_times:
fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T
score_times = results["score_time"].reshape(-1, n_unique_ticks).T
out.extend([fit_times, score_times])
ret = train_sizes_abs, out[0], out[1]
if return_times:
ret = ret + (out[2], out[3])
return ret
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like of shape (n_ticks,)
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose,
return_times, error_score, fit_params):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores, fit_times, score_times = [], [], [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
if fit_params is None:
fit_params = {}
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
start_fit = time.time()
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes,
**fit_params)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes, **fit_params)
fit_time = time.time() - start_fit
fit_times.append(fit_time)
start_score = time.time()
test_scores.append(
_score(estimator, X_test, y_test, scorer, error_score)
)
train_scores.append(
_score(estimator, X_train, y_train, scorer, error_score)
)
score_time = time.time() - start_score
score_times.append(score_time)
ret = ((train_scores, test_scores, fit_times, score_times)
if return_times else (train_scores, test_scores))
return np.array(ret).T
@_deprecate_positional_args
def validation_curve(estimator, X, y, *, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=None, pre_dispatch="all",
verbose=0, error_score=np.nan, fit_params=None):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the combinations of each parameter
value and each cross-validation split.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
results = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=fit_params,
return_train_score=True, error_score=error_score)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
n_params = len(param_range)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_params).T
test_scores = results["test_scores"].reshape(-1, n_params).T
return train_scores, test_scores
def _aggregate_score_dicts(scores):
"""Aggregate the list of dict to dict of np ndarray
The aggregated output of _aggregate_score_dicts will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
... {'a': 10, 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
"""
return {
key: np.asarray([score[key] for score in scores])
if isinstance(scores[0][key], numbers.Number)
else [score[key] for score in scores]
for key in scores[0]
}
|
{
"content_hash": "19b4b2aade7ae3c90cf7a24a4ccfb43f",
"timestamp": "",
"source": "github",
"line_count": 1693,
"max_line_length": 79,
"avg_line_length": 40.337271116361485,
"alnum_prop": 0.6204477896062439,
"repo_name": "ryfeus/lambda-packs",
"id": "e54dade45512431964c42764f0e4a84f9f0834ee",
"size": "68291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Sklearn_arm/source/sklearn/model_selection/_validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib import auth
from django.core.urlresolvers import reverse
from django import http
from django import test
from keystoneclient.auth.identity import v2 as auth_v2
from keystoneclient.auth.identity import v3 as auth_v3
from keystoneclient.auth import token_endpoint
from keystoneclient import exceptions as keystone_exceptions
from keystoneclient import session
from keystoneclient.v2_0 import client as client_v2
from keystoneclient.v3 import client as client_v3
import mock
from mox3 import mox
from testscenarios import load_tests_apply_scenarios # noqa
from openstack_auth import policy
from openstack_auth.tests import data_v2
from openstack_auth.tests import data_v3
from openstack_auth import user
from openstack_auth import utils
DEFAULT_DOMAIN = settings.OPENSTACK_KEYSTONE_DEFAULT_DOMAIN
class OpenStackAuthTestsMixin(object):
'''Common functions for version specific tests.'''
scenarios = [
('pure', {'interface': None}),
('public', {'interface': 'publicURL'}),
('internal', {'interface': 'internalURL'}),
('admin', {'interface': 'adminURL'})
]
def _mock_unscoped_client(self, user):
plugin = self._create_password_auth()
plugin.get_access(mox.IsA(session.Session)). \
AndReturn(self.data.unscoped_access_info)
return self.ks_client_module.Client(session=mox.IsA(session.Session),
auth=plugin)
def _mock_unscoped_client_with_token(self, user, unscoped):
plugin = token_endpoint.Token(settings.OPENSTACK_KEYSTONE_URL,
unscoped.auth_token)
return self.ks_client_module.Client(session=mox.IsA(session.Session),
auth=plugin)
def _mock_client_token_auth_failure(self, unscoped, tenant_id):
plugin = self._create_token_auth(tenant_id, unscoped.auth_token)
plugin.get_access(mox.IsA(session.Session)). \
AndRaise(keystone_exceptions.AuthorizationFailure)
def _mock_client_password_auth_failure(self, username, password, exc):
plugin = self._create_password_auth(username=username,
password=password)
plugin.get_access(mox.IsA(session.Session)).AndRaise(exc)
def _mock_scoped_client_for_tenant(self, auth_ref, tenant_id, url=None,
client=True):
if url is None:
url = settings.OPENSTACK_KEYSTONE_URL
plugin = self._create_token_auth(
tenant_id,
token=self.data.unscoped_access_info.auth_token,
url=url)
plugin.get_access(mox.IsA(session.Session)).AndReturn(auth_ref)
if client:
return self.ks_client_module.Client(
session=mox.IsA(session.Session),
auth=plugin)
def get_form_data(self, user):
return {'region': settings.OPENSTACK_KEYSTONE_URL,
'domain': DEFAULT_DOMAIN,
'password': user.password,
'username': user.name}
class OpenStackAuthTestsV2(OpenStackAuthTestsMixin, test.TestCase):
def setUp(self):
super(OpenStackAuthTestsV2, self).setUp()
if self.interface:
override = self.settings(OPENSTACK_ENDPOINT_TYPE=self.interface)
override.enable()
self.addCleanup(override.disable)
self.mox = mox.Mox()
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
self.data = data_v2.generate_test_data()
self.ks_client_module = client_v2
settings.OPENSTACK_API_VERSIONS['identity'] = 2.0
settings.OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0"
self.mox.StubOutClassWithMocks(token_endpoint, 'Token')
self.mox.StubOutClassWithMocks(auth_v2, 'Token')
self.mox.StubOutClassWithMocks(auth_v2, 'Password')
self.mox.StubOutClassWithMocks(client_v2, 'Client')
def _mock_unscoped_list_tenants(self, client, tenants):
client.tenants = self.mox.CreateMockAnything()
client.tenants.list().AndReturn(tenants)
def _mock_unscoped_client_list_tenants(self, user, tenants):
client = self._mock_unscoped_client(user)
self._mock_unscoped_list_tenants(client, tenants)
def _mock_client_delete_token(self, user, token, url=None):
if not url:
url = settings.OPENSTACK_KEYSTONE_URL
plugin = token_endpoint.Token(
endpoint=url,
token=self.data.unscoped_access_info.auth_token)
client = self.ks_client_module.Client(session=mox.IsA(session.Session),
auth=plugin)
client.tokens = self.mox.CreateMockAnything()
client.tokens.delete(token=token)
return client
def _create_password_auth(self, username=None, password=None, url=None):
if not username:
username = self.data.user.name
if not password:
password = self.data.user.password
if not url:
url = settings.OPENSTACK_KEYSTONE_URL
return auth_v2.Password(auth_url=url,
password=password,
username=username)
def _create_token_auth(self, project_id, token=None, url=None):
if not token:
token = self.data.unscoped_access_info.auth_token
if not url:
url = settings.OPENSTACK_KEYSTONE_URL
return auth_v2.Token(auth_url=url,
token=token,
tenant_id=project_id,
reauthenticate=False)
def _login(self):
tenants = [self.data.tenant_one, self.data.tenant_two]
user = self.data.user
unscoped = self.data.unscoped_access_info
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_tenants(user, tenants)
self._mock_scoped_client_for_tenant(unscoped, self.data.tenant_one.id)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
def test_login(self):
self._login()
def test_login_with_disabled_tenant(self):
# Test to validate that authentication will not try to get
# scoped token for disabled project.
tenants = [self.data.tenant_two, self.data.tenant_one]
user = self.data.user
unscoped = self.data.unscoped_access_info
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_tenants(user, tenants)
self._mock_scoped_client_for_tenant(unscoped, self.data.tenant_one.id)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
def test_login_w_bad_region_cookie(self):
self.client.cookies['services_region'] = "bad_region"
self._login()
self.assertNotEqual("bad_region",
self.client.session['services_region'])
self.assertEqual("RegionOne",
self.client.session['services_region'])
def test_no_enabled_tenants(self):
tenants = [self.data.tenant_two]
user = self.data.user
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_tenants(user, tenants)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response,
'You are not authorized for any projects.')
def test_no_tenants(self):
user = self.data.user
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_tenants(user, [])
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response,
'You are not authorized for any projects.')
def test_invalid_credentials(self):
user = self.data.user
form_data = self.get_form_data(user)
form_data['password'] = "invalid"
exc = keystone_exceptions.Unauthorized(401)
self._mock_client_password_auth_failure(user.name, "invalid", exc)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response, "Invalid user name or password.")
def test_exception(self):
user = self.data.user
form_data = self.get_form_data(user)
exc = keystone_exceptions.ClientException(500)
self._mock_client_password_auth_failure(user.name, user.password, exc)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response,
("An error occurred authenticating. Please try "
"again later."))
def test_redirect_when_already_logged_in(self):
self._login()
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 302)
self.assertNotIn(reverse('login'), response['location'])
def test_dont_redirect_when_already_logged_in_if_next_is_set(self):
self._login()
expected_url = "%s?%s=/%s/" % (reverse('login'),
auth.REDIRECT_FIELD_NAME,
'special')
response = self.client.get(expected_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'auth/login.html')
def test_switch(self, next=None):
tenant = self.data.tenant_two
tenants = [self.data.tenant_one, self.data.tenant_two]
user = self.data.user
unscoped = self.data.unscoped_access_info
scoped = self.data.scoped_access_info
sc = self.data.service_catalog
et = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL')
endpoint = sc.url_for(endpoint_type=et)
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_tenants(user, tenants)
self._mock_scoped_client_for_tenant(unscoped, self.data.tenant_one.id)
self._mock_client_delete_token(user, unscoped.auth_token, endpoint)
self._mock_scoped_client_for_tenant(scoped, tenant.id, url=endpoint,
client=False)
self.mox.ReplayAll()
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
url = reverse('switch_tenants', args=[tenant.id])
scoped['token']['tenant']['id'] = self.data.tenant_two.id
if next:
form_data.update({auth.REDIRECT_FIELD_NAME: next})
response = self.client.get(url, form_data)
if next:
expected_url = 'http://testserver%s' % next
self.assertEqual(response['location'], expected_url)
else:
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
self.assertEqual(self.client.session['token'].tenant['id'],
scoped.tenant_id)
def test_switch_with_next(self):
self.test_switch(next='/next_url')
def test_switch_region(self, next=None):
tenants = [self.data.tenant_one, self.data.tenant_two]
user = self.data.user
scoped = self.data.scoped_access_info
sc = self.data.service_catalog
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_tenants(user, tenants)
self._mock_scoped_client_for_tenant(scoped, self.data.tenant_one.id)
self.mox.ReplayAll()
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
old_region = sc.get_endpoints()['compute'][0]['region']
self.assertEqual(self.client.session['services_region'], old_region)
region = sc.get_endpoints()['compute'][1]['region']
url = reverse('switch_services_region', args=[region])
form_data['region_name'] = region
if next:
form_data.update({auth.REDIRECT_FIELD_NAME: next})
response = self.client.get(url, form_data)
if next:
expected_url = 'http://testserver%s' % next
self.assertEqual(response['location'], expected_url)
else:
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
self.assertEqual(self.client.session['services_region'], region)
self.assertEqual(self.client.cookies['services_region'].value, region)
def test_switch_region_with_next(self, next=None):
self.test_switch_region(next='/next_url')
def test_tenant_sorting(self):
tenants = [self.data.tenant_two, self.data.tenant_one]
expected_tenants = [self.data.tenant_one, self.data.tenant_two]
user = self.data.user
unscoped = self.data.unscoped_access_info
client = self._mock_unscoped_client_with_token(user, unscoped)
self._mock_unscoped_list_tenants(client, tenants)
self.mox.ReplayAll()
tenant_list = utils.get_project_list(
user_id=user.id,
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=unscoped.auth_token)
self.assertEqual(tenant_list, expected_tenants)
def test_tenant_list_caching(self):
tenants = [self.data.tenant_two, self.data.tenant_one]
expected_tenants = [self.data.tenant_one, self.data.tenant_two]
user = self.data.user
unscoped = self.data.unscoped_access_info
client = self._mock_unscoped_client_with_token(user, unscoped)
self._mock_unscoped_list_tenants(client, tenants)
self.mox.ReplayAll()
tenant_list = utils.get_project_list(
user_id=user.id,
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=unscoped.auth_token)
self.assertEqual(tenant_list, expected_tenants)
# Test to validate that requesting the project list again results
# to using the cache and will not make a Keystone call.
self.assertEqual(utils._PROJECT_CACHE.get(unscoped.auth_token),
expected_tenants)
tenant_list = utils.get_project_list(
user_id=user.id,
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=unscoped.auth_token)
self.assertEqual(tenant_list, expected_tenants)
utils.remove_project_cache(unscoped.auth_token)
self.assertIsNone(utils._PROJECT_CACHE.get(unscoped.auth_token))
class OpenStackAuthTestsV3(OpenStackAuthTestsMixin, test.TestCase):
def _mock_unscoped_client_list_projects(self, user, projects):
client = self._mock_unscoped_client(user)
self._mock_unscoped_list_projects(client, user, projects)
def _mock_unscoped_list_projects(self, client, user, projects):
client.projects = self.mox.CreateMockAnything()
client.projects.list(user=user.id).AndReturn(projects)
def _create_password_auth(self, username=None, password=None, url=None):
if not username:
username = self.data.user.name
if not password:
password = self.data.user.password
if not url:
url = settings.OPENSTACK_KEYSTONE_URL
return auth_v3.Password(auth_url=url,
password=password,
username=username,
user_domain_name=DEFAULT_DOMAIN)
def _create_token_auth(self, project_id, token=None, url=None):
if not token:
token = self.data.unscoped_access_info.auth_token
if not url:
url = settings.OPENSTACK_KEYSTONE_URL
return auth_v3.Token(auth_url=url,
token=token,
project_id=project_id,
reauthenticate=False)
def setUp(self):
super(OpenStackAuthTestsV3, self).setUp()
if getattr(self, 'interface', None):
override = self.settings(OPENSTACK_ENDPOINT_TYPE=self.interface)
override.enable()
self.addCleanup(override.disable)
self.mox = mox.Mox()
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
self.data = data_v3.generate_test_data()
self.ks_client_module = client_v3
settings.OPENSTACK_API_VERSIONS['identity'] = 3
settings.OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v3"
self.mox.StubOutClassWithMocks(token_endpoint, 'Token')
self.mox.StubOutClassWithMocks(auth_v3, 'Token')
self.mox.StubOutClassWithMocks(auth_v3, 'Password')
self.mox.StubOutClassWithMocks(client_v3, 'Client')
def test_login(self):
projects = [self.data.project_one, self.data.project_two]
user = self.data.user
unscoped = self.data.unscoped_access_info
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_projects(user, projects)
self._mock_scoped_client_for_tenant(unscoped, self.data.project_one.id)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
def test_login_with_disabled_project(self):
# Test to validate that authentication will not try to get
# scoped token for disabled project.
projects = [self.data.project_two, self.data.project_one]
user = self.data.user
unscoped = self.data.unscoped_access_info
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_projects(user, projects)
self._mock_scoped_client_for_tenant(unscoped, self.data.project_one.id)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
def test_no_enabled_projects(self):
projects = [self.data.project_two]
user = self.data.user
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_projects(user, projects)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response,
'You are not authorized for any projects.')
def test_no_projects(self):
user = self.data.user
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_projects(user, [])
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response,
'You are not authorized for any projects.')
def test_invalid_credentials(self):
user = self.data.user
form_data = self.get_form_data(user)
form_data['password'] = "invalid"
exc = keystone_exceptions.Unauthorized(401)
self._mock_client_password_auth_failure(user.name, "invalid", exc)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response, "Invalid user name or password.")
def test_exception(self):
user = self.data.user
form_data = self.get_form_data(user)
exc = keystone_exceptions.ClientException(500)
self._mock_client_password_auth_failure(user.name, user.password, exc)
self.mox.ReplayAll()
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response,
("An error occurred authenticating. Please try "
"again later."))
def test_switch(self, next=None):
project = self.data.project_two
projects = [self.data.project_one, self.data.project_two]
user = self.data.user
scoped = self.data.scoped_access_info
sc = self.data.service_catalog
et = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL')
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_projects(user, projects)
self._mock_scoped_client_for_tenant(scoped, self.data.project_one.id)
self._mock_scoped_client_for_tenant(
scoped,
project.id,
url=sc.url_for(endpoint_type=et),
client=False)
self.mox.ReplayAll()
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
url = reverse('switch_tenants', args=[project.id])
scoped['project']['id'] = self.data.project_two.id
if next:
form_data.update({auth.REDIRECT_FIELD_NAME: next})
response = self.client.get(url, form_data)
if next:
expected_url = 'http://testserver%s' % next
self.assertEqual(response['location'], expected_url)
else:
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
self.assertEqual(self.client.session['token'].project['id'],
scoped.project_id)
def test_switch_with_next(self):
self.test_switch(next='/next_url')
def test_switch_region(self, next=None):
projects = [self.data.project_one, self.data.project_two]
user = self.data.user
scoped = self.data.unscoped_access_info
sc = self.data.service_catalog
form_data = self.get_form_data(user)
self._mock_unscoped_client_list_projects(user, projects)
self._mock_scoped_client_for_tenant(scoped, self.data.project_one.id)
self.mox.ReplayAll()
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
old_region = sc.get_endpoints()['compute'][0]['region']
self.assertEqual(self.client.session['services_region'], old_region)
region = sc.get_endpoints()['compute'][1]['region']
url = reverse('switch_services_region', args=[region])
form_data['region_name'] = region
if next:
form_data.update({auth.REDIRECT_FIELD_NAME: next})
response = self.client.get(url, form_data)
if next:
expected_url = 'http://testserver%s' % next
self.assertEqual(response['location'], expected_url)
else:
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
self.assertEqual(self.client.session['services_region'], region)
def test_switch_region_with_next(self, next=None):
self.test_switch_region(next='/next_url')
def test_tenant_sorting(self):
projects = [self.data.project_two, self.data.project_one]
expected_projects = [self.data.project_one, self.data.project_two]
user = self.data.user
unscoped = self.data.unscoped_access_info
client = self._mock_unscoped_client_with_token(user, unscoped)
self._mock_unscoped_list_projects(client, user, projects)
self.mox.ReplayAll()
project_list = utils.get_project_list(
user_id=user.id,
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=unscoped.auth_token)
self.assertEqual(project_list, expected_projects)
def test_tenant_list_caching(self):
projects = [self.data.project_two, self.data.project_one]
expected_projects = [self.data.project_one, self.data.project_two]
user = self.data.user
unscoped = self.data.unscoped_access_info
client = self._mock_unscoped_client_with_token(user, unscoped)
self._mock_unscoped_list_projects(client, user, projects)
self.mox.ReplayAll()
project_list = utils.get_project_list(
user_id=user.id,
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=unscoped.auth_token)
self.assertEqual(project_list, expected_projects)
# Test to validate that requesting the project list again results
# to using the cache and will not make a Keystone call.
self.assertEqual(utils._PROJECT_CACHE.get(unscoped.auth_token),
expected_projects)
project_list = utils.get_project_list(
user_id=user.id,
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=unscoped.auth_token)
self.assertEqual(project_list, expected_projects)
utils.remove_project_cache(unscoped.auth_token)
self.assertIsNone(utils._PROJECT_CACHE.get(unscoped.auth_token))
load_tests = load_tests_apply_scenarios
class PolicyLoaderTestCase(test.TestCase):
def test_policy_file_load(self):
policy.reset()
enforcer = policy._get_enforcer()
self.assertEqual(2, len(enforcer))
self.assertTrue('identity' in enforcer)
self.assertTrue('compute' in enforcer)
def test_policy_reset(self):
policy._get_enforcer()
self.assertEqual(2, len(policy._ENFORCER))
policy.reset()
self.assertIsNone(policy._ENFORCER)
class PolicyTestCase(test.TestCase):
_roles = []
def setUp(self):
mock_user = user.User(id=1, roles=self._roles)
patcher = mock.patch('openstack_auth.utils.get_user',
return_value=mock_user)
self.MockClass = patcher.start()
self.addCleanup(patcher.stop)
self.request = http.HttpRequest()
class PolicyTestCaseNonAdmin(PolicyTestCase):
_roles = [{'id': '1', 'name': 'member'}]
def test_check_admin_required_false(self):
policy.reset()
value = policy.check((("identity", "admin_required"),),
request=self.request)
self.assertFalse(value)
def test_check_identity_rule_not_found_false(self):
policy.reset()
value = policy.check((("identity", "i_dont_exist"),),
request=self.request)
# this should fail because the default check for
# identity is admin_required
self.assertFalse(value)
def test_check_nova_context_is_admin_false(self):
policy.reset()
value = policy.check((("compute", "context_is_admin"),),
request=self.request)
self.assertFalse(value)
def test_compound_check_false(self):
policy.reset()
value = policy.check((("identity", "admin_required"),
("identity", "identity:default"),),
request=self.request)
self.assertFalse(value)
def test_scope_not_found(self):
policy.reset()
value = policy.check((("dummy", "default"),),
request=self.request)
self.assertTrue(value)
class PolicyTestCaseAdmin(PolicyTestCase):
_roles = [{'id': '1', 'name': 'admin'}]
def test_check_admin_required_true(self):
policy.reset()
value = policy.check((("identity", "admin_required"),),
request=self.request)
self.assertTrue(value)
def test_check_identity_rule_not_found_true(self):
policy.reset()
value = policy.check((("identity", "i_dont_exist"),),
request=self.request)
# this should succeed because the default check for
# identity is admin_required
self.assertTrue(value)
def test_compound_check_true(self):
policy.reset()
value = policy.check((("identity", "admin_required"),
("identity", "identity:default"),),
request=self.request)
self.assertTrue(value)
def test_check_nova_context_is_admin_true(self):
policy.reset()
value = policy.check((("compute", "context_is_admin"),),
request=self.request)
self.assertTrue(value)
|
{
"content_hash": "ed9d766b00169cb7fe84308e01090aee",
"timestamp": "",
"source": "github",
"line_count": 876,
"max_line_length": 79,
"avg_line_length": 36.05365296803653,
"alnum_prop": 0.6165342114428648,
"repo_name": "promptworks/django_openstack_auth",
"id": "7baa896cb1ad2a4e9fdfbbced234aeb9d375f265",
"size": "32129",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_auth/tests/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "99"
},
{
"name": "Gettext Catalog",
"bytes": "91879"
},
{
"name": "HTML",
"bytes": "205"
},
{
"name": "Python",
"bytes": "167158"
}
],
"symlink_target": ""
}
|
"""A script to automatically convert an INI Pants config file to TOML. There will still likely be
some issues remaining which require manual fixes, but this script will automate most of the tedium.
Run `python3 migrate_to_toml_config.py --help`.
"""
import argparse
import logging
import re
from pathlib import Path
from typing import Dict, List
def main() -> None:
args = create_parser().parse_args()
updates: Dict[Path, List[str]] = {}
for config in args.files:
if config.suffix not in [".ini", ".cfg"]:
logging.warning(f"This script may only be run on INI files. Skipping {config}.")
continue
new_path = Path(config.parent, f"{config.stem}.toml")
if new_path.exists():
logging.warning(f"{new_path} already exists. Skipping conversion of {config}.")
continue
new_config_content = generate_new_config(config)
updates[new_path] = new_config_content
for new_path, new_content in updates.items():
joined_new_content = "\n".join(new_content) + "\n"
if args.preview:
print(f"Would create {new_path} with the following content:\n\n{joined_new_content}")
else:
logging.info(
f"Created {new_path}. There are likely some remaining issues that need manual "
"attention. Please copy the file into https://www.toml-lint.com or open with your editor "
"to fix any remaining issues."
)
new_path.write_text(joined_new_content)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Convert INI config files to TOML config files.")
parser.add_argument("files", type=Path, nargs="*", help="Config files to convert.")
parser.add_argument(
"-p",
"--preview",
action="store_true",
help="Output to stdout rather than creating the new TOML config file(s).",
)
return parser
def update_primitive_value(original: str) -> str:
if original in ["true", "True"]:
return "true"
if original in ["false", "False"]:
return "false"
try:
return str(int(original))
except ValueError:
pass
try:
return str(float(original))
except ValueError:
pass
return f'"{original}"'
def generate_new_config(config: Path) -> List[str]:
original_text = config.read_text()
original_text_lines = original_text.splitlines()
updated_text_lines = original_text_lines.copy()
for i, line in enumerate(original_text_lines):
option_regex = r"(?P<option>[a-zA-Z0-9_]+)"
before_value_regex = rf"\s*{option_regex}\s*[:=]\s*"
valid_value_characters = r"a-zA-Z0-9_.@!:%\*\=\>\<\-\(\)\/"
value_regex = rf"(?P<value>[{valid_value_characters}]+)"
parsed_line = re.match(rf"{before_value_regex}{value_regex}\s*$", line)
if parsed_line:
option, value = parsed_line.groups()
updated_text_lines[i] = f"{option} = {update_primitive_value(value)}"
continue
# Check if it's a one-line list value
list_value_regex = rf"(?P<list>[\+\-]?\[[{valid_value_characters},\s\'\"]*\])"
parsed_list_line = re.match(rf"{before_value_regex}{list_value_regex}\s*$", line)
if parsed_list_line:
option, value = parsed_list_line.groups()
if value.startswith("+"):
updated_line = f"{option}.add = {value[1:]}"
elif value.startswith("-"):
updated_line = f"{option}.remove = {value[1:]}"
else:
updated_line = f"{option} = {value}"
updated_text_lines[i] = updated_line
continue
# Check if it's a one-line dict value
dict_value_regex = rf"(?P<dict>{{[{valid_value_characters},:\s\'\"]*}})"
parsed_dict_line = re.match(rf"{before_value_regex}{dict_value_regex}\s*$", line)
if parsed_dict_line:
option, value = parsed_dict_line.groups()
updated_text_lines[i] = f'{option} = """{value}"""'
continue
return updated_text_lines
if __name__ == "__main__":
logging.basicConfig(format="[%(levelname)s]: %(message)s", level=logging.INFO)
try:
main()
except KeyboardInterrupt:
pass
|
{
"content_hash": "084022382475a8ee823ae7c328c2b617",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 106,
"avg_line_length": 36.75423728813559,
"alnum_prop": 0.5937283836753516,
"repo_name": "jsirois/pants",
"id": "0a28b32b065ddb42986385f498f72f6236f078b1",
"size": "4492",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build-support/migration-support/migrate_to_toml_config.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6008"
},
{
"name": "Mustache",
"bytes": "1798"
},
{
"name": "Python",
"bytes": "2837069"
},
{
"name": "Rust",
"bytes": "1241058"
},
{
"name": "Shell",
"bytes": "57720"
},
{
"name": "Starlark",
"bytes": "27937"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('users', '0011_auto_20151023_1228'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(max_length=50, blank=True, verbose_name='First name'),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(max_length=50, blank=True, verbose_name='Last name'),
),
migrations.AlterField(
model_name='user',
name='sfa_token',
field=models.UUIDField(unique=True, verbose_name='SFA token', default=uuid.uuid4, editable=False),
),
]
|
{
"content_hash": "ec377a566954240cd4f1e06049315711",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 110,
"avg_line_length": 28.79310344827586,
"alnum_prop": 0.5844311377245509,
"repo_name": "qdqmedia/wiggum",
"id": "30b954730f7ac95593d9ca6ff9c634c5f5074e0f",
"size": "859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wiggum/users/migrations/0012_auto_20151116_1422.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1901"
},
{
"name": "HTML",
"bytes": "20888"
},
{
"name": "JavaScript",
"bytes": "41128"
},
{
"name": "Makefile",
"bytes": "1968"
},
{
"name": "Python",
"bytes": "234610"
},
{
"name": "Shell",
"bytes": "1734"
}
],
"symlink_target": ""
}
|
from meter import *
from datapoints import *
from metergroup import *
from account import *
|
{
"content_hash": "86d032f8db18c8cb959249b01d28649c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 24,
"avg_line_length": 23,
"alnum_prop": 0.782608695652174,
"repo_name": "impactlab/jps-handoff",
"id": "c911f6590435bd8c72e3b68009d7780ad79a73a2",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/viewer/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45251"
},
{
"name": "HTML",
"bytes": "14930"
},
{
"name": "JavaScript",
"bytes": "111861"
},
{
"name": "Python",
"bytes": "120770"
},
{
"name": "Shell",
"bytes": "3443"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name = 'django-test-addons',
packages = ['test_addons'],
version = '0.3.5',
description = 'Library to provide support for testing multiple database system like Mongo, Redis, Neo4j along with django.',
author = 'Hakampreet Singh Pandher',
author_email = 'hspandher@outlook.com',
url = 'https://github.com/hspandher/django-test-utils',
download_url = 'https://github.com/hspandher/django-test-utils/tarball/0.1',
keywords = ['testing', 'django', 'mongo', 'redis', 'neo4j', 'TDD', 'python', 'memcache', 'django rest framework'],
license = 'MIT',
install_requires = [
'django>1.6'
],
extras_require = {
'mongo_testing': ['mongoengine>=0.8.7'],
'redis_testing': ['django-redis>=3.8.2'],
'neo4j_testing': ['py2neo>=2.0.6'],
'rest_framework_testing': ['djangorestframework>=3.0.5'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
'Topic :: Database',
],
)
|
{
"content_hash": "49dc606e330aef481e3026d3e2a8cf34",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 125,
"avg_line_length": 38.23529411764706,
"alnum_prop": 0.6207692307692307,
"repo_name": "arpitgoyaiitkgp/django-test-addons",
"id": "0874c0e1a5e8dfa67dd93b901b1d82b12cd69b9e",
"size": "1300",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14652"
}
],
"symlink_target": ""
}
|
dbuser = 'wpml'
# what is your database password?
dbpass = 'wpml'
# name of your project. you can specify here or pass as argument -n [name]
name = 'wordpress'
# what is your root dir for server (where you want to have wordpress installed)?
serverpath = '/var/www/html'
# what is base url to your server (without name)?
serverurl = 'http://localhost/'
# what is desired wordpress admin name?
wpuser = 'konrad'
# and this user password is:
wppass = '1'
# his email address is:
wpmail = 'konrad.k@icanlocalize.com'
#English will be default language of your site,
# what are additional languages?
languages = ['pl']
|
{
"content_hash": "0175e595d072315cd2907ef2b2443211",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.7224025974025974,
"repo_name": "kkarpieszuk/setwp",
"id": "06d8a483da1b0d69393d372a406a69a0cccc84f0",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15436"
}
],
"symlink_target": ""
}
|
import re
import os
import shutil
import Prompt
class PromptClass(Prompt.ErrPrompt):
def CreateFileFail(self):
self.PrintErr ("Error: Fail to create file!")
def FileExist(self, File = "File"):
"""
return type: instance of basestring
return value:
c if user prefer createing new one
e if user prefer editing mode
q if user want to quit the action
"""
self.PrintErr("Warning: '%s' already exists, please choose the action:" %(File) +
"\n(c)over/ (e)dit/ (q)uit: ")
tmp = raw_input()
return tmp[0].lower()
class BasecfgFile(object):
"""
Base. Only contain GetPath()
"""
@staticmethod
def GetPath(name): # Name is a the name of part in this project file structure, like "cfg"
"""
Return string: realpath of part in this project file structure
Sample of name: "cfg", "$cfg"
[\W] char will not be considered.
"""
name = re.sub(r'[\W]', '', name)
pwd = os.path.realpath(os.getcwd())
os.chdir(os.path.pardir)
os.chdir(os.path.curdir + os.path.sep + name.lower())
path = os.path.realpath(os.getcwd())
os.chdir(pwd)
class cfgFileIn(BasecfgFile):
"""
Inherit from BasecfgFile
Aim: for load file only
Hack: by pack file.read, support json.load directly
"""
def InitFile(self, name, path, TransferredMeaning = False):
self.name = name
if TransferredMeaning:
self.path = self.GetPath(path)
else:
self.path = os.path.realpath(path)
try:
self.File = open(self.path + os.path.sep + self.name)
except:
self.File = None
return self.File
def read(self):
return self.File.read()
class cfgFile(BasecfgFile):
"""
Inherit from BasecfgFile
Aim: for dump file.
Hack: by pack file.write, support json.dump directly
"""
def write(self, content):
"""
Simply call file.write(content)
"""
self.File.write(content)
def __del__(self):
"""
Save all changes
"""
try:
self.CloseFile()
except AttributeError:
pass
def InitFile(self, name, path, TransferredMeaning = False, Prompt = False, Force = False):
"""
Arguments:
name is the name of file
path is the $path(in project) or relative path or realpath.
TransferredMeaning will make function treat 'path' as a special name for part in project, resolve by self.GetPath()
Prompt is True if you want ask user to confirm Overwrite or not. return None if user deny
Force is force create without prompt. May failed due to unexpected reason, like path error or file system error.
Specifcaly return None if False for Prompt and Force and file exists.
Set self.File as the file operation ID
return file content if success, or None if failed
"""
prompt = PromptClass()
self.name = name
if TransferredMeaning:
self.path = self.GetPath(path)
else:
self.path = os.path.realpath(path)
self.swp = '.' + self.name + '.swp'
try:
shutil.copyfile(self.path + os.path.sep + self.name, self.path + os.path.sep + self.swp)
except IOError:
pass
self.FileFunction = lambda(x): open(self.path + os.path.sep + self.swp, x)
if Force:
self.File = self.FileFunction('wb')
return ''
if os.path.exists(self.path + os.path.sep + self.name):
if not Prompt:
return None
mode = prompt.FileExist()
if mode == 'c':
self.File = self.FileFunction('wb')
return ''
elif mode == 'e':
File = self.FileFunction('rb')
content = File.read()
File.close()
self.File = self.FileFunction('wb')
return content
elif mode == 'q':
prompt.CreateFileFail()
return None
else:
prompt.PrintErr("Error: Cannot identify the choice, action failed")
return None
else:
self.File = self.FileFunction('wb')
return ''
def SaveFile(self):
self.File.flush()
shutil.copyfile(self.path + os.path.sep + self.swp, self.path + os.path.sep + self.name)
def CloseFile(self):
"""
Close File and save it via rename swp file to target file
"""
self.File.close()
os.rename(self.path + os.path.sep + self.swp, self.path + os.path.sep + self.name)
if __name__ == '__main__':
raise EnvironmentError("DO NOT DIRECTLY RUN THIS SCRIPT!")
|
{
"content_hash": "f3c1535fc2b3178893ab5164f92f54fc",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 127,
"avg_line_length": 33.08108108108108,
"alnum_prop": 0.5588235294117647,
"repo_name": "nday-dev/Spider-Framework",
"id": "d08f4fad6145256f8c69471ce86ed68a8fb4f371",
"size": "4975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tmpl/cfgFile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26989"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/community_crafting/component/shared_lightweight_turret_electronics.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "7fd951a5013ce87db40f59142ad81a76",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 114,
"avg_line_length": 26.384615384615383,
"alnum_prop": 0.7201166180758017,
"repo_name": "anhstudios/swganh",
"id": "3158332578955e9a437964e07c3e3aa1bb04c157",
"size": "488",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/community_crafting/component/shared_lightweight_turret_electronics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
def arrayReplace(inputArray, elemToReplace, substitutionElem):
return [substitutionElem if e == elemToReplace else e for e in inputArray]
|
{
"content_hash": "e7a29b24184a04a1d64adc1b2a3ab9ef",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 78,
"avg_line_length": 71,
"alnum_prop": 0.795774647887324,
"repo_name": "emirot/codefights",
"id": "176972e43daf5bb3e01dab3bdca2616c65989160",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "the_core/arrayReplace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "104702"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from events.forms import EventCreateForm
from events.views import EventCreateView, EventUpdateView, EventImageCreateView, EventImageUpdateView, EventDeleteView, \
EventImageDeleteView, EventDetailView
urlpatterns = [
url(r'^create/', EventCreateView.as_view(), name = "Event_add"),
url(r'^edit/(?P<pk>[0-9]+)', EventUpdateView.as_view(), name = "Event_edit"),
url(r'^addimage/', EventImageCreateView.as_view(), name = "eventimage_add"),
url(r'^event/(?P<pk>[0-9]+)', EventDetailView.as_view(), name = "event_detail"),
url(r'^editimage/(?P<pk>[0-9]+)', EventImageUpdateView.as_view(), name = "eventimage_edit"),
url(r'^deleteimage/(?P<pk>[0-9]+)/', EventDeleteView.as_view(), name = "eventimage_delete"),
url(r'^delete/(?P<pk>[0-9]+)', EventImageDeleteView.as_view(), name = "event_delete"),
]
|
{
"content_hash": "8361a54f8a639c84879466e581bf4481",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 121,
"avg_line_length": 50.64705882352941,
"alnum_prop": 0.6840882694541232,
"repo_name": "amfoss/fosswebsite",
"id": "7247693846f985c35d160bc1ec78972ebc80a834",
"size": "861",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "events/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "78438"
},
{
"name": "HTML",
"bytes": "434752"
},
{
"name": "JavaScript",
"bytes": "2198"
},
{
"name": "Python",
"bytes": "248081"
}
],
"symlink_target": ""
}
|
"""
Project management classes and functions
"""
import os
from ConfigParser import SafeConfigParser, NoSectionError, NoOptionError
import time
import datetime
from shutil import copy
try:
from PIL import Image
except:
import Image
import cvutils
import numpy as np
import subprocess
import uuid
from app_config import get_project_path, get_project_config_path, config_section_exists, update_config_without_sections
from video import get_framerate
from statusHelper import StatusHelper
def create_project(identifier, video_part):
config_dict = {}
_update_config_dict_with_defaults(config_dict)
_translate_config_dict(config_dict)
_create_project_dir(identifier, config_dict, video_part)
StatusHelper.initalize_project(identifier) # This must be called after the config path has been created
def update_homography(identifier, homography_path, unitpixelratio):
pass
def update_project_config(identifier, config_dict):
_update_config_dict_with_defaults(config_dict)
_translate_config_dict(config_dict)
project_path = get_project_path(identifier)
tracking_path = os.path.join(project_path, "tracking.cfg")
update_config_without_sections(tracking_path, config_dict)
def default_config_dict():
return {
'max_features_per_frame': 1000,
'num_displacement_frames': 10,
'min_feature_displacement': 0.0001,
'max_iterations_to_persist': 200,
'min_feature_frames': 15,
'max_connection_distance': 1.0,
'max_segmentation_distance': 0.7,
}
def _translate_config_dict(config_dict):
translate_dict = \
{ 'max_features_per_frame': "max-nfeatures",
'num_displacement_frames': "ndisplacements",
'min_feature_displacement': "min-feature-displacement",
'max_iterations_to_persist': "max-number-iterations",
'min_feature_frames': "min-feature-time",
'max_connection_distance': "mm-connection-distance",
'max_segmentation_distance': "mm-segmentation-distance"}
for (key, val) in config_dict.iteritems():
if key in translate_dict:
config_dict[translate_dict[key]] = val
del config_dict[key]
def _update_config_dict_with_defaults(config_dict):
default_dict = default_config_dict()
# Remove things that we don't want the user to be able to configure
for key in config_dict.keys():
if key not in default_dict.keys():
del config_dict[key]
# Add defaults for things that don't exist
for (key, value) in default_dict.iteritems():
if key not in config_dict.keys():
config_dict[key] = value
def _create_project_dir(identifier, config_dict, video_part):
test_object_dir = os.path.join(".temp", "test", "test_object")
test_feature_dir = os.path.join(".temp", "test", "test_feature")
directory_names = ["homography", test_object_dir, test_feature_dir, "run", "results"]
project_path = get_project_path(identifier)
if not os.path.exists(project_path):
# Create proj dirs
for new_dir in directory_names:
os.makedirs(os.path.join(project_path, new_dir))
video_part.move(os.path.join(project_path, video_part.get_filename()))
_write_to_project_config(identifier, video_part.get_filename())
default_files_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "default")
tracking_path = os.path.join(project_path, "tracking.cfg")
classifier_path = os.path.join(project_path, "classifier.cfg")
copy(os.path.join(default_files_dir, "tracking.cfg"), tracking_path)
copy(os.path.join(default_files_dir, "classifier.cfg"), classifier_path)
# Add classifier path along with configuration
config_dict['classifier-filename'] = classifier_path
update_config_without_sections(tracking_path, config_dict)
update_dict = {
'pbv-svm-filename': os.path.join(default_files_dir, 'modelPBV.xml'),
'bv-svm-filename': os.path.join(default_files_dir, 'modelBV.xml')
}
update_config_without_sections(classifier_path, update_dict)
update_config_without_sections(tracking_path,update_dict)
else:
print("Project exists. No new project created.")
def _write_to_project_config(identifier, video_filename):
ts = time.time()
vid_ts = datetime.datetime.now()
#This line needs to be updated to no longer need the ui class. Load video and pull time.
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y %H:%M:%S %Z')
video_timestamp = vid_ts.strftime('%d-%m-%Y %H:%M:%S %Z')
config_parser = SafeConfigParser()
config_parser.add_section("info")
config_parser.set("info", "project_name", identifier)
config_parser.set("info", "creation_date", timestamp)
config_parser.add_section("video")
config_parser.set("video", "name", video_filename)
config_parser.set("video", "source", video_filename)
config_parser.set("video", "framerate", str(get_framerate(os.path.join(get_project_path(identifier), video_filename))))
config_parser.set("video", "start", video_timestamp)
with open(get_project_config_path(identifier), 'wb') as configfile:
config_parser.write(configfile)
|
{
"content_hash": "3002b66b1e882c921a07d88ef5f2e530",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 123,
"avg_line_length": 39.05185185185185,
"alnum_prop": 0.6820940819423369,
"repo_name": "santosfamilyfoundation/TrafficCloud",
"id": "ebba2e82c34985ca5fad6c4c9b03ab4f88ceceec",
"size": "5272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/traffic_cloud_utils/pm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "665"
},
{
"name": "HTML",
"bytes": "900"
},
{
"name": "JavaScript",
"bytes": "479"
},
{
"name": "Python",
"bytes": "42776"
}
],
"symlink_target": ""
}
|
"""docker-compose.yml generator.
It takes existing docker-compose.yml,
replaces "build:" instructions with the "image:" instructions,
preserves current binds - will read them from the running containers
Usage:
generate-compose.py [ --debug ] --file=file --jmaster-image=jm_image --jmaster-version=jm_version --jslave-image=js_image --jslave-version=js_version
generate-compose.py (-h | --help)
Options:
--file=file Path to the base docker-compose.yml file
--jmaster-image=jm_image Jenkins master image name
--jmaster-version=jm_version Jenkins master image version
--jslave-name=js_name Jenkins slave image name
--jslave-version=js_version Jenkins slave image version
-h --help Show this screen.
-d --debug Print debug info
"""
from docopt import docopt
from subprocess import check_output
import logging
import yaml
import re
def get_binds(service_name):
logging.info("Read {} container config...".format(service_name))
command = ["docker", "ps", "-a", "-q", "--no-trunc", "-f", "name=dockerizeit_{}_".format(service_name)]
logging.debug("Get {} container id: {}".format(service_name, " ".join(command)))
container_id = check_output(command).strip()
logging.debug("{} container id: {}".format(service_name, container_id))
if bool(re.match("[A-Za-z0-9]{65}", container_id)):
raise ValueError("Unexpected result. Expected docker container id, got {}. Command: {}".format(container_id, " ".join(command)))
binds = check_output(["docker", "inspect", '--format="{{ .HostConfig.Binds }}"', container_id]).strip()
logging.debug("{} binds: {}".format(service_name, binds))
return binds
def main():
arguments = docopt(__doc__)
if arguments['--debug']:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Arguments: {}".format(arguments))
with open(arguments['--file'], 'r') as f:
doc = yaml.load(f)
logging.debug("Provided docker compose file: {}".format(doc))
# Update jmaster
logging.info("Update definition for jmaster. Relace build with image...")
logging.debug("Before: {}".format(doc["services"]["jmaster"]))
del doc["services"]["jmaster"]["build"]
doc["services"]["jmaster"]["image"] = "{}:{}".format(arguments['--jmaster-image'], arguments['--jmaster-version'])
# We are resolving binds to make sure that we point out correct location of the home directory
# Use case: starting containers using docker-machine - on the start path resolved to the home directory on the host
# When munchausen restarts services he will be inside Linux virtual machine and home directory will be resolved differently
logging.info("Update definition for jmaster. Resolve binds...")
jmaster_binds = get_binds("jmaster")
doc["services"]["jmaster"]["volumes"] = jmaster_binds.strip('"[|]').replace(":rw","").split()
# We have to check that we don't have not mounted volumes like 1ab3ba428445786de381d741cd2d3c4dff2e956342f02712ef205fa63ba47779:/var/jenkins_home
# This volume comes from Jenkins Dockerfile - it is declared there but we do not mount it explicitly
# If bring it to the compose file like this 1ab3ba428445786de381d741cd2d3c4dff2e956342f02712ef205fa63ba47779:/var/jenkins_home then
# docker-compose won't be able to create new container to replace old one since we have direct reference to the volume mount point
pattern=re.compile("^[a-zA-Z0-9]+:/var/jenkins_home$")
for item in doc["services"]["jmaster"]["volumes"]:
if re.findall(pattern, item):
logging.debug("Found not mounted volume {}. Drop it from the volumes list...".format(re.findall(pattern, item)[0]))
doc["services"]["jmaster"]["volumes"].remove(re.findall(pattern, item)[0])
logging.debug("After: {}".format(doc["services"]["jmaster"]))
# Update jslave
logging.info("Update definition for jslave. Relace build with image...")
logging.debug("Before: {}".format(doc["services"]["jslave"]))
del doc["services"]["jslave"]["build"]
doc["services"]["jslave"]["image"] = "{}:{}".format(arguments['--jslave-image'], arguments['--jslave-version'])
jslave_binds = get_binds("jslave")
doc["services"]["jslave"]["volumes"] =jslave_binds.strip('"[|]').replace(":rw","").split()
# Registry might be removed and replaced by something else so we have to check that it is there
if "registry" in doc["services"]:
logging.debug("Before: {}".format(doc["services"]["registry"]))
registry_binds = get_binds("registry")
doc["services"]["registry"]["volumes"] = registry_binds.strip('"[|]').replace(":rw","").split()
logging.debug("After: {}".format(doc["services"]["registry"]))
with open('docker-compose.yml', 'w') as outfile:
outfile.write( yaml.dump(doc, default_flow_style=False) )
if __name__ == "__main__":
main()
|
{
"content_hash": "0770f03c7337fc095d670e4f241b9425",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 151,
"avg_line_length": 51.07446808510638,
"alnum_prop": 0.6877733805457197,
"repo_name": "Praqma/JenkinsAsCodeReference",
"id": "ced6389dad8fb20bcf845a41d760331e61c86c24",
"size": "4824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dockerizeit/generate-compose.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "3173"
},
{
"name": "Groovy",
"bytes": "57079"
},
{
"name": "Python",
"bytes": "4824"
},
{
"name": "Shell",
"bytes": "1827"
}
],
"symlink_target": ""
}
|
import time
time.sleep(0.25)
contents = clipboard.get_selection()
if len(contents) > 20:
title = contents[0:17] + "..."
else:
title = contents
folder = engine.get_folder("My Phrases")
engine.create_phrase(folder, title, contents)
|
{
"content_hash": "5d4bdbf4a894e4ddbe7b559a06dfab6e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 45,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.70042194092827,
"repo_name": "andresgomezvidal/autokey_scripts",
"id": "80ccb95bcad807e64aac9ec2f7c04df4e263b8fd",
"size": "237",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Scripts/Sample_Scripts/Phrase from selection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14958"
},
{
"name": "Shell",
"bytes": "39"
}
],
"symlink_target": ""
}
|
from test.support import (TESTFN, run_unittest, import_module, unlink,
requires, _2G, _4G, gc_collect, cpython_only)
import unittest
import os
import re
import itertools
import socket
import sys
import weakref
# Skip test if we can't import mmap.
mmap = import_module('mmap')
PAGESIZE = mmap.PAGESIZE
class MmapTests(unittest.TestCase):
def setUp(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def tearDown(self):
try:
os.unlink(TESTFN)
except OSError:
pass
def test_basic(self):
# Test mmap module on Unix systems and Windows
# Create a file to be mmap'ed.
f = open(TESTFN, 'bw+')
try:
# Write 2 pages worth of data to the file
f.write(b'\0'* PAGESIZE)
f.write(b'foo')
f.write(b'\0'* (PAGESIZE-3) )
f.flush()
m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
finally:
f.close()
# Simple sanity checks
tp = str(type(m)) # SF bug 128713: segfaulted on Linux
self.assertEqual(m.find(b'foo'), PAGESIZE)
self.assertEqual(len(m), 2*PAGESIZE)
self.assertEqual(m[0], 0)
self.assertEqual(m[0:3], b'\0\0\0')
# Shouldn't crash on boundary (Issue #5292)
self.assertRaises(IndexError, m.__getitem__, len(m))
self.assertRaises(IndexError, m.__setitem__, len(m), b'\0')
# Modify the file's content
m[0] = b'3'[0]
m[PAGESIZE +3: PAGESIZE +3+3] = b'bar'
# Check that the modification worked
self.assertEqual(m[0], b'3'[0])
self.assertEqual(m[0:3], b'3\0\0')
self.assertEqual(m[PAGESIZE-1 : PAGESIZE + 7], b'\0foobar\0')
m.flush()
# Test doing a regular expression match in an mmap'ed file
match = re.search(b'[A-Za-z]+', m)
if match is None:
self.fail('regex match on mmap failed!')
else:
start, end = match.span(0)
length = end - start
self.assertEqual(start, PAGESIZE)
self.assertEqual(end, PAGESIZE + 6)
# test seeking around (try to overflow the seek implementation)
m.seek(0,0)
self.assertEqual(m.tell(), 0)
m.seek(42,1)
self.assertEqual(m.tell(), 42)
m.seek(0,2)
self.assertEqual(m.tell(), len(m))
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -1)
# Try to seek beyond end of mmap...
self.assertRaises(ValueError, m.seek, 1, 2)
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -len(m)-1, 2)
# Try resizing map
try:
m.resize(512)
except SystemError:
# resize() not supported
# No messages are printed, since the output of this test suite
# would then be different across platforms.
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the underlying file is truncated too
# (bug #728515)
f = open(TESTFN, 'rb')
try:
f.seek(0, 2)
self.assertEqual(f.tell(), 512)
finally:
f.close()
self.assertEqual(m.size(), 512)
m.close()
def test_access_parameter(self):
# Test for "access" keyword parameter
mapsize = 10
with open(TESTFN, "wb") as fp:
fp.write(b"a"*mapsize)
with open(TESTFN, "rb") as f:
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
self.assertEqual(m[:], b'a'*mapsize, "Readonly memory map data incorrect.")
# Ensuring that readonly mmap can't be slice assigned
try:
m[:] = b'b'*mapsize
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be item assigned
try:
m[0] = b'b'
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write() to
try:
m.seek(0,0)
m.write(b'abc')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write_byte() to
try:
m.seek(0,0)
m.write_byte(b'd')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be resized
try:
m.resize(2*mapsize)
except SystemError: # resize is not universally supported
pass
except TypeError:
pass
else:
self.fail("Able to resize readonly memory map")
with open(TESTFN, "rb") as fp:
self.assertEqual(fp.read(), b'a'*mapsize,
"Readonly memory map data file was modified")
# Opening mmap with size too big
with open(TESTFN, "r+b") as f:
try:
m = mmap.mmap(f.fileno(), mapsize+1)
except ValueError:
# we do not expect a ValueError on Windows
# CAUTION: This also changes the size of the file on disk, and
# later tests assume that the length hasn't changed. We need to
# repair that.
if sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should work on Windows.")
else:
# we expect a ValueError on Unix, but not on Windows
if not sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should raise ValueError.")
m.close()
if sys.platform.startswith('win'):
# Repair damage from the resizing test.
with open(TESTFN, 'r+b') as f:
f.truncate(mapsize)
# Opening mmap with access=ACCESS_WRITE
with open(TESTFN, "r+b") as f:
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE)
# Modifying write-through memory map
m[:] = b'c'*mapsize
self.assertEqual(m[:], b'c'*mapsize,
"Write-through memory map memory not updated properly.")
m.flush()
m.close()
with open(TESTFN, 'rb') as f:
stuff = f.read()
self.assertEqual(stuff, b'c'*mapsize,
"Write-through memory map data file not updated properly.")
# Opening mmap with access=ACCESS_COPY
with open(TESTFN, "r+b") as f:
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY)
# Modifying copy-on-write memory map
m[:] = b'd'*mapsize
self.assertEqual(m[:], b'd' * mapsize,
"Copy-on-write memory map data not written correctly.")
m.flush()
with open(TESTFN, "rb") as fp:
self.assertEqual(fp.read(), b'c'*mapsize,
"Copy-on-write test data file should not be modified.")
# Ensuring copy-on-write maps cannot be resized
self.assertRaises(TypeError, m.resize, 2*mapsize)
m.close()
# Ensuring invalid access parameter raises exception
with open(TESTFN, "r+b") as f:
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize, access=4)
if os.name == "posix":
# Try incompatible flags, prot and access parameters.
with open(TESTFN, "r+b") as f:
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize,
flags=mmap.MAP_PRIVATE,
prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE)
# Try writing with PROT_EXEC and without PROT_WRITE
prot = mmap.PROT_READ | getattr(mmap, 'PROT_EXEC', 0)
with open(TESTFN, "r+b") as f:
m = mmap.mmap(f.fileno(), mapsize, prot=prot)
self.assertRaises(TypeError, m.write, b"abcdef")
self.assertRaises(TypeError, m.write_byte, 0)
m.close()
def test_bad_file_desc(self):
# Try opening a bad file descriptor...
self.assertRaises(OSError, mmap.mmap, -2, 4096)
def test_tougher_find(self):
# Do a tougher .find() test. SF bug 515943 pointed out that, in 2.2,
# searching for data with embedded \0 bytes didn't work.
with open(TESTFN, 'wb+') as f:
data = b'aabaac\x00deef\x00\x00aa\x00'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
for start in range(n+1):
for finish in range(start, n+1):
slice = data[start : finish]
self.assertEqual(m.find(slice), data.find(slice))
self.assertEqual(m.find(slice + b'x'), -1)
m.close()
def test_find_end(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'wb+')
data = b'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.find(b'one'), 0)
self.assertEqual(m.find(b'ones'), 8)
self.assertEqual(m.find(b'one', 0, -1), 0)
self.assertEqual(m.find(b'one', 1), 8)
self.assertEqual(m.find(b'one', 1, -1), 8)
self.assertEqual(m.find(b'one', 1, -2), -1)
def test_rfind(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'wb+')
data = b'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.rfind(b'one'), 8)
self.assertEqual(m.rfind(b'one '), 0)
self.assertEqual(m.rfind(b'one', 0, -1), 8)
self.assertEqual(m.rfind(b'one', 0, -2), 0)
self.assertEqual(m.rfind(b'one', 1, -1), 8)
self.assertEqual(m.rfind(b'one', 1, -2), -1)
def test_double_close(self):
# make sure a double close doesn't crash on Solaris (Bug# 665913)
f = open(TESTFN, 'wb+')
f.write(2**16 * b'a') # Arbitrary character
f.close()
f = open(TESTFN, 'rb')
mf = mmap.mmap(f.fileno(), 2**16, access=mmap.ACCESS_READ)
mf.close()
mf.close()
f.close()
@unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_entire_file(self):
# test mapping of entire file by passing 0 for map length
f = open(TESTFN, "wb+")
f.write(2**16 * b'm') # Arbitrary character
f.close()
f = open(TESTFN, "rb+")
mf = mmap.mmap(f.fileno(), 0)
self.assertEqual(len(mf), 2**16, "Map size should equal file size.")
self.assertEqual(mf.read(2**16), 2**16 * b"m")
mf.close()
f.close()
@unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_length_0_offset(self):
# Issue #10916: test mapping of remainder of file by passing 0 for
# map length with an offset doesn't cause a segfault.
# NOTE: allocation granularity is currently 65536 under Win64,
# and therefore the minimum offset alignment.
with open(TESTFN, "wb") as f:
f.write((65536 * 2) * b'm') # Arbitrary character
with open(TESTFN, "rb") as f:
with mmap.mmap(f.fileno(), 0, offset=65536, access=mmap.ACCESS_READ) as mf:
self.assertRaises(IndexError, mf.__getitem__, 80000)
@unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_length_0_large_offset(self):
# Issue #10959: test mapping of a file by passing 0 for
# map length with a large offset doesn't cause a segfault.
with open(TESTFN, "wb") as f:
f.write(115699 * b'm') # Arbitrary character
with open(TESTFN, "w+b") as f:
self.assertRaises(ValueError, mmap.mmap, f.fileno(), 0,
offset=2147418112)
def test_move(self):
# make move works everywhere (64-bit format problem earlier)
f = open(TESTFN, 'wb+')
f.write(b"ABCDEabcde") # Arbitrary character
f.flush()
mf = mmap.mmap(f.fileno(), 10)
mf.move(5, 0, 5)
self.assertEqual(mf[:], b"ABCDEABCDE", "Map move should have duplicated front 5")
mf.close()
f.close()
# more excessive test
data = b"0123456789"
for dest in range(len(data)):
for src in range(len(data)):
for count in range(len(data) - max(dest, src)):
expected = data[:dest] + data[src:src+count] + data[dest+count:]
m = mmap.mmap(-1, len(data))
m[:] = data
m.move(dest, src, count)
self.assertEqual(m[:], expected)
m.close()
# segfault test (Issue 5387)
m = mmap.mmap(-1, 100)
offsets = [-100, -1, 0, 1, 100]
for source, dest, size in itertools.product(offsets, offsets, offsets):
try:
m.move(source, dest, size)
except ValueError:
pass
offsets = [(-1, -1, -1), (-1, -1, 0), (-1, 0, -1), (0, -1, -1),
(-1, 0, 0), (0, -1, 0), (0, 0, -1)]
for source, dest, size in offsets:
self.assertRaises(ValueError, m.move, source, dest, size)
m.close()
m = mmap.mmap(-1, 1) # single byte
self.assertRaises(ValueError, m.move, 0, 0, 2)
self.assertRaises(ValueError, m.move, 1, 0, 1)
self.assertRaises(ValueError, m.move, 0, 1, 1)
m.move(0, 0, 1)
m.move(0, 0, 0)
def test_anonymous(self):
# anonymous mmap.mmap(-1, PAGE)
m = mmap.mmap(-1, PAGESIZE)
for x in range(PAGESIZE):
self.assertEqual(m[x], 0,
"anonymously mmap'ed contents should be zero")
for x in range(PAGESIZE):
b = x & 0xff
m[x] = b
self.assertEqual(m[x], b)
def test_read_all(self):
m = mmap.mmap(-1, 16)
self.addCleanup(m.close)
# With no parameters, or None or a negative argument, reads all
m.write(bytes(range(16)))
m.seek(0)
self.assertEqual(m.read(), bytes(range(16)))
m.seek(8)
self.assertEqual(m.read(), bytes(range(8, 16)))
m.seek(16)
self.assertEqual(m.read(), b'')
m.seek(3)
self.assertEqual(m.read(None), bytes(range(3, 16)))
m.seek(4)
self.assertEqual(m.read(-1), bytes(range(4, 16)))
m.seek(5)
self.assertEqual(m.read(-2), bytes(range(5, 16)))
m.seek(9)
self.assertEqual(m.read(-42), bytes(range(9, 16)))
def test_read_invalid_arg(self):
m = mmap.mmap(-1, 16)
self.addCleanup(m.close)
self.assertRaises(TypeError, m.read, 'foo')
self.assertRaises(TypeError, m.read, 5.5)
self.assertRaises(TypeError, m.read, [1, 2, 3])
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = bytes(reversed(range(256)))
m = mmap.mmap(-1, len(s))
m[:] = s
self.assertEqual(m[:], s)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(m[start:stop:step],
s[start:stop:step])
def test_extended_set_del_slice(self):
# Test extended slicing by comparing with list slicing.
s = bytes(reversed(range(256)))
m = mmap.mmap(-1, len(s))
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
m[:] = s
self.assertEqual(m[:], s)
L = list(s)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data = bytes(reversed(data))
L[start:stop:step] = data
m[start:stop:step] = data
self.assertEqual(m[:], bytes(L))
def make_mmap_file (self, f, halfsize):
# Write 2 pages worth of data to the file
f.write (b'\0' * halfsize)
f.write (b'foo')
f.write (b'\0' * (halfsize - 3))
f.flush ()
return mmap.mmap (f.fileno(), 0)
def test_empty_file (self):
f = open (TESTFN, 'w+b')
f.close()
with open(TESTFN, "rb") as f :
self.assertRaisesRegex(ValueError,
"cannot mmap an empty file",
mmap.mmap, f.fileno(), 0,
access=mmap.ACCESS_READ)
def test_offset (self):
f = open (TESTFN, 'w+b')
try: # unlink TESTFN no matter what
halfsize = mmap.ALLOCATIONGRANULARITY
m = self.make_mmap_file (f, halfsize)
m.close ()
f.close ()
mapsize = halfsize * 2
# Try invalid offset
f = open(TESTFN, "r+b")
for offset in [-2, -1, None]:
try:
m = mmap.mmap(f.fileno(), mapsize, offset=offset)
self.assertEqual(0, 1)
except (ValueError, TypeError, OverflowError):
pass
else:
self.assertEqual(0, 0)
f.close()
# Try valid offset, hopefully 8192 works on all OSes
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize - halfsize, offset=halfsize)
self.assertEqual(m[0:3], b'foo')
f.close()
# Try resizing map
try:
m.resize(512)
except SystemError:
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the content is not changed
self.assertEqual(m[0:3], b'foo')
# Check that the underlying file is truncated too
f = open(TESTFN, 'rb')
f.seek(0, 2)
self.assertEqual(f.tell(), halfsize + 512)
f.close()
self.assertEqual(m.size(), halfsize + 512)
m.close()
finally:
f.close()
try:
os.unlink(TESTFN)
except OSError:
pass
def test_subclass(self):
class anon_mmap(mmap.mmap):
def __new__(klass, *args, **kwargs):
return mmap.mmap.__new__(klass, -1, *args, **kwargs)
anon_mmap(PAGESIZE)
@unittest.skipUnless(hasattr(mmap, 'PROT_READ'), "needs mmap.PROT_READ")
def test_prot_readonly(self):
mapsize = 10
with open(TESTFN, "wb") as fp:
fp.write(b"a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, prot=mmap.PROT_READ)
self.assertRaises(TypeError, m.write, "foo")
f.close()
def test_error(self):
self.assertIs(mmap.error, OSError)
def test_io_methods(self):
data = b"0123456789"
with open(TESTFN, "wb") as fp:
fp.write(b"x"*len(data))
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), len(data))
f.close()
# Test write_byte()
for i in range(len(data)):
self.assertEqual(m.tell(), i)
m.write_byte(data[i])
self.assertEqual(m.tell(), i+1)
self.assertRaises(ValueError, m.write_byte, b"x"[0])
self.assertEqual(m[:], data)
# Test read_byte()
m.seek(0)
for i in range(len(data)):
self.assertEqual(m.tell(), i)
self.assertEqual(m.read_byte(), data[i])
self.assertEqual(m.tell(), i+1)
self.assertRaises(ValueError, m.read_byte)
# Test read()
m.seek(3)
self.assertEqual(m.read(3), b"345")
self.assertEqual(m.tell(), 6)
# Test write()
m.seek(3)
m.write(b"bar")
self.assertEqual(m.tell(), 6)
self.assertEqual(m[:], b"012bar6789")
m.seek(8)
self.assertRaises(ValueError, m.write, b"bar")
def test_non_ascii_byte(self):
for b in (129, 200, 255): # > 128
m = mmap.mmap(-1, 1)
m.write_byte(b)
self.assertEqual(m[0], b)
m.seek(0)
self.assertEqual(m.read_byte(), b)
m.close()
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_tagname(self):
data1 = b"0123456789"
data2 = b"abcdefghij"
assert len(data1) == len(data2)
# Test same tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="foo")
m2[:] = data2
self.assertEqual(m1[:], data2)
self.assertEqual(m2[:], data2)
m2.close()
m1.close()
# Test different tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="boo")
m2[:] = data2
self.assertEqual(m1[:], data1)
self.assertEqual(m2[:], data2)
m2.close()
m1.close()
@cpython_only
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_sizeof(self):
m1 = mmap.mmap(-1, 100)
tagname = "foo"
m2 = mmap.mmap(-1, 100, tagname=tagname)
self.assertEqual(sys.getsizeof(m2),
sys.getsizeof(m1) + len(tagname) + 1)
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_crasher_on_windows(self):
# Should not crash (Issue 1733986)
m = mmap.mmap(-1, 1000, tagname="foo")
try:
mmap.mmap(-1, 5000, tagname="foo")[:] # same tagname, but larger size
except:
pass
m.close()
# Should not crash (Issue 5385)
with open(TESTFN, "wb") as fp:
fp.write(b"x"*10)
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), 0)
f.close()
try:
m.resize(0) # will raise OSError
except:
pass
try:
m[:]
except:
pass
m.close()
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_invalid_descriptor(self):
# socket file descriptors are valid, but out of range
# for _get_osfhandle, causing a crash when validating the
# parameters to _get_osfhandle.
s = socket.socket()
try:
with self.assertRaises(OSError):
m = mmap.mmap(s.fileno(), 10)
finally:
s.close()
def test_context_manager(self):
with mmap.mmap(-1, 10) as m:
self.assertFalse(m.closed)
self.assertTrue(m.closed)
def test_context_manager_exception(self):
# Test that the OSError gets passed through
with self.assertRaises(Exception) as exc:
with mmap.mmap(-1, 10) as m:
raise OSError
self.assertIsInstance(exc.exception, OSError,
"wrong exception raised in context manager")
self.assertTrue(m.closed, "context manager failed")
def test_weakref(self):
# Check mmap objects are weakrefable
mm = mmap.mmap(-1, 16)
wr = weakref.ref(mm)
self.assertIs(wr(), mm)
del mm
gc_collect()
self.assertIs(wr(), None)
class LargeMmapTests(unittest.TestCase):
def setUp(self):
unlink(TESTFN)
def tearDown(self):
unlink(TESTFN)
def _make_test_file(self, num_zeroes, tail):
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(0x180000000))
f = open(TESTFN, 'w+b')
try:
f.seek(num_zeroes)
f.write(tail)
f.flush()
except (OSError, OverflowError):
try:
f.close()
except (OSError, OverflowError):
pass
raise unittest.SkipTest("filesystem does not have largefile support")
return f
def test_large_offset(self):
with self._make_test_file(0x14FFFFFFF, b" ") as f:
with mmap.mmap(f.fileno(), 0, offset=0x140000000, access=mmap.ACCESS_READ) as m:
self.assertEqual(m[0xFFFFFFF], 32)
def test_large_filesize(self):
with self._make_test_file(0x17FFFFFFF, b" ") as f:
if sys.maxsize < 0x180000000:
# On 32 bit platforms the file is larger than sys.maxsize so
# mapping the whole file should fail -- Issue #16743
with self.assertRaises(OverflowError):
mmap.mmap(f.fileno(), 0x180000000, access=mmap.ACCESS_READ)
with self.assertRaises(ValueError):
mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
with mmap.mmap(f.fileno(), 0x10000, access=mmap.ACCESS_READ) as m:
self.assertEqual(m.size(), 0x180000000)
# Issue 11277: mmap() with large (~4GB) sparse files crashes on OS X.
def _test_around_boundary(self, boundary):
tail = b' DEARdear '
start = boundary - len(tail) // 2
end = start + len(tail)
with self._make_test_file(start, tail) as f:
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m:
self.assertEqual(m[start:end], tail)
@unittest.skipUnless(sys.maxsize > _4G, "test cannot run on 32-bit systems")
def test_around_2GB(self):
self._test_around_boundary(_2G)
@unittest.skipUnless(sys.maxsize > _4G, "test cannot run on 32-bit systems")
def test_around_4GB(self):
self._test_around_boundary(_4G)
def test_main():
run_unittest(MmapTests, LargeMmapTests)
if __name__ == '__main__':
test_main()
|
{
"content_hash": "737507ec8ea6194b73114da2f36caa97",
"timestamp": "",
"source": "github",
"line_count": 777,
"max_line_length": 92,
"avg_line_length": 35.011583011583014,
"alnum_prop": 0.5217982649610351,
"repo_name": "chidea/GoPythonDLLWrapper",
"id": "ad93a59383421625ad76bf693174cba6708caaf7",
"size": "27204",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bin/lib/test/test_mmap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1345"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Go",
"bytes": "2169"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "152703"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "23244205"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "3770"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
"""From the paper Convolutional Sequence to Sequence Learning
http://arxiv.org/abs/1705.03122
"""
import tensorflow as tf
import numpy as np
from typeguard import check_argument_types
from neuralmonkey.encoders.attentive import Attentive
from neuralmonkey.model.model_part import ModelPart, FeedDict
from neuralmonkey.logging import log
from neuralmonkey.dataset import Dataset
from neuralmonkey.decorators import tensor
from neuralmonkey.nn.projection import glu, linear
from neuralmonkey.nn.utils import dropout
from neuralmonkey.model.sequence import EmbeddedSequence
class SentenceEncoder(ModelPart, Attentive):
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
input_sequence: EmbeddedSequence,
conv_features: int,
encoder_layers: int,
kernel_width: int = 5,
dropout_keep_prob: float = 1.0,
attention_type: type = None,
attention_state_size: int = None,
attention_fertility: int = 3,
save_checkpoint: str = None,
load_checkpoint: str = None) -> None:
assert check_argument_types()
ModelPart.__init__(self, name, save_checkpoint, load_checkpoint)
Attentive.__init__(self, attention_type,
attention_state_size=attention_state_size,
attention_fertility=attention_fertility)
self.input_sequence = input_sequence
self.encoder_layers = encoder_layers
self.conv_features = conv_features
self.kernel_width = kernel_width
self.dropout_keep_prob = dropout_keep_prob
if conv_features <= 0:
raise ValueError("Number of features must be a positive integer.")
if encoder_layers <= 0:
raise ValueError(
"Number of encoder layers must be a positive integer.")
log("Initializing convolutional seq2seq encoder, name {}"
.format(self.name))
# pylint: enable=too-many-arguments
@tensor
def states(self) -> tf.Tensor:
convolutions = linear(self.ordered_embedded_inputs,
self.conv_features,
scope="order_and_embed")
for layer in range(self.encoder_layers):
convolutions = self._residual_conv(
convolutions, "encoder_conv_{}".format(layer))
return convolutions + linear(self.ordered_embedded_inputs,
self.conv_features,
scope="input_to_final_state")
@tensor
def encoded(self) -> tf.Tensor:
# This state concatenation is not based on any paper, but was
# tested empirically
return tf.reduce_max(self.states, axis=1)
@tensor
def _attention_tensor(self) -> tf.Tensor:
return dropout(self.states, self.dropout_keep_prob, self.train_mode)
@tensor
def _attention_mask(self) -> tf.Tensor:
# TODO tohle je proti OOP prirode
return self.input_sequence.mask
@tensor
def states_mask(self) -> tf.Tensor:
return self.input_sequence.mask
@tensor
def order_embeddings(self) -> tf.Tensor:
# initialization in the same way as in original CS2S implementation
with tf.variable_scope("input_projection"):
return tf.get_variable(
"order_embeddings", [self.input_sequence.max_length,
self.input_sequence.embedding_sizes[0]],
initializer=tf.random_normal_initializer(stddev=0.1))
@tensor
def ordered_embedded_inputs(self) -> tf.Tensor:
# shape (batch, time, embedding size)
ordering_additive = tf.expand_dims(self.order_embeddings, 0)
batch_max_len = tf.shape(self.input_sequence.data)[1]
clipped_ordering_embed = ordering_additive[:, :batch_max_len, :]
return self.input_sequence.data + clipped_ordering_embed
def _residual_conv(self, input_signals: tf.Tensor, name: str):
with tf.variable_scope(name):
# initialized as described in the paper
init_deviat = np.sqrt(4 / self.conv_features)
convolution_filters = tf.get_variable(
"convolution_filters",
[self.kernel_width, self.conv_features,
2 * self.conv_features],
initializer=tf.random_normal_initializer(stddev=init_deviat))
bias = tf.get_variable(
name="conv_bias",
shape=[2 * self.conv_features],
initializer=tf.zeros_initializer())
conv = (tf.nn.conv1d(input_signals, convolution_filters, 1, "SAME")
+ bias)
return glu(conv) + input_signals
# pylint: disable=no-self-use
@tensor
def train_mode(self):
# scalar tensor
return tf.placeholder(tf.bool, shape=[], name="mode_placeholder")
# pylint: enable=no-self-use
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
fd = self.input_sequence.feed_dict(dataset, train)
fd[self.train_mode] = train
return fd
|
{
"content_hash": "ff2bc382052441a389b3000e3e87ccb4",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 37.41428571428571,
"alnum_prop": 0.6065292096219931,
"repo_name": "bastings/neuralmonkey",
"id": "478a60e4284d922de36809be6cc77e69215eef77",
"size": "5238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralmonkey/encoders/facebook_conv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5306"
},
{
"name": "HTML",
"bytes": "1159"
},
{
"name": "JavaScript",
"bytes": "2070"
},
{
"name": "Makefile",
"bytes": "2564"
},
{
"name": "Mathematica",
"bytes": "1874"
},
{
"name": "Perl",
"bytes": "45129"
},
{
"name": "Python",
"bytes": "487576"
},
{
"name": "Shell",
"bytes": "2577"
}
],
"symlink_target": ""
}
|
""" Crawling the geocode.
File: geocoder.py
Author: SpaceLis
Email: Wen.Li@tudelft.nl
GitHub: http://github.com/spacelis
Description:
"""
from gevent import monkey
monkey.patch_all()
import sys
import re
import requests
from pyproj import Proj
from crawler.actors import Controller
from crawler.readers import CSVReader
from crawler.writers import FileWriter
import logging
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
COORD = re.compile(r'<gml:pos dimension="2">([\d.]+ [\d.]+)</gml:pos>')
RESTURL = 'http://geodata.nationaalgeoregister.nl/geocoder/Geocoder'
PROJ = Proj(init='EPSG:28992')
def parsecoord(text):
""" Parse coordinates from the returns. """
x, y = [float(i) for i in COORD.search(text).group(1).split(' ')]
return ','.join([str(i) for i in PROJ(x, y, inverse=True)][::-1])
class Worker(object):
""" A worker. """
def __init__(self, _):
"""@todo: to be defined1.
:args: @todo
"""
pass
def work_on(self, x):
"""@todo: Docstring for work_on.
:x: A dict holding rows in csv
:returns: The coordinates.
"""
if ('Straatnaam' not in x) and ('address' in x):
q = {'zoekterm': x['address']}
try:
ret = requests.get(RESTURL, params=q)
return ','.join([x['Meldings nummer'], parsecoord(ret.text)])
except Exception as e:
logging.exception(e)
print '%s,"%s"' % (x['Meldings nummer'], x['address'])
return ''
else:
s = x['Straatnaam']
n = ('' if x['Huisnummer Melding'] == '0'
else str(x['Huisnummer Melding']))
q = {'zoekterm': ' '.join([s, n, 'Rotterdam'])}
try:
ret = requests.get(RESTURL, params=q)
return ','.join([s, n, parsecoord(ret.text)])
except Exception as e:
logging.exception(e)
print ','.join([s, n])
return ''
def test():
""" test """
w = Worker('')
print w.work_on({'Straatnaam': 'FEIJENOORDHAVEN',
'Huisnummer Melding': '44'})
print w.work_on({'Straatnaam': 'FEIJENOORDHAVEN',
'Huisnummer Melding': '0'})
print w.work_on({'Meldings nummer': '1',
'address': 'Science Park Amsterdam'})
def console():
""" running in console
:returns: @todo
"""
if len(sys.argv) != 3:
print >> sys.stderr, 'Usage: tweet_crawler.py <input> <output>'
sys.exit()
controller = Controller.start(Worker, [1],
CSVReader(sys.argv[1]),
FileWriter(sys.argv[2]),
poolsize=15)
controller.actor_stopped.wait()
if __name__ == '__main__':
console()
# test()
|
{
"content_hash": "8b00b6fca2cc86959414a5f85b5f5981",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 77,
"avg_line_length": 26.880733944954127,
"alnum_prop": 0.5341296928327645,
"repo_name": "spacelis/crawler.kka",
"id": "f37a524ffee9ced2ae67f9b645a4cd3302d8e289",
"size": "2976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/geocoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46623"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import tensorflow as tf
import argparse
from antk.core import config
from antk.core import generic_model
from antk.core import loader
from antk.models import tree_mf_model
def return_parser():
parser = argparse.ArgumentParser(description="For testing")
parser.add_argument("datadir", metavar="DATA_DIRECTORY", type=str,
help="The directory where train, dev, and test data resides. ")
parser.add_argument("config", metavar="CONFIG", type=str,
help="The config file for building the ant architecture.")
parser.add_argument("-initrange", metavar="INITRANGE", type=float, default=0.00001,
help="A value determining the initial size of the weights.")
parser.add_argument("-kfactors", metavar="KFACTORS", type=int, default=50,
help="The rank of the low rank factorization.")
parser.add_argument("-lamb", metavar="LAMBDA", type=float, default=0.0001,
help="The coefficient for l2 regularization")
parser.add_argument("-mb", metavar="MINIBATCH", type=int, default=50,
help="The size of minibatches for stochastic gradient descent.")
parser.add_argument("-learnrate", metavar="LEARNRATE", type=float, default=0.00001,
help="The stepsize for gradient descent.")
parser.add_argument("-verbose", metavar="VERBOSE", type=bool, default=True,
help="Whether or not to print dev evaluations during training.")
parser.add_argument("-maxbadcount", metavar="MAXBADCOUNT", type=int, default=20,
help="The threshold for early stopping.")
parser.add_argument("-epochs", metavar="EPOCHS", type=int, default=100,
help="The maximum number of epochs to train for.")
parser.add_argument("-random_seed", metavar="RANDOM_SEED", type=int, default=500,
help="For reproducible results.")
parser.add_argument("-eval_rate", metavar="EVAL_RATE", type=int, default=10000,
help="How often (in terms of number of data points) to evaluate on dev.")
return parser
if __name__ == '__main__':
args = return_parser().parse_args()
data = loader.read_data_sets(args.datadir, folders=['train', 'test', 'dev', 'user', 'item'], mix=False)
data.train.labels['ratings'] = loader.center(data.train.labels['ratings'], axis=None)
data.dev.labels['ratings'] = loader.center(data.dev.labels['ratings'], axis=None)
data.user.features['age'] = loader.center(data.user.features['age'], axis=None)
data.item.features['year'] = loader.center(data.item.features['year'], axis=None)
data.user.features['age'] = loader.maxnormalize(data.user.features['age'])
data.item.features['year'] = loader.maxnormalize(data.item.features['year'])
x = tree_mf_model.tree(data, args.config,
initrange=args.initrange,
kfactors=args.kfactors,
lamb =args.lamb,
mb=args.mb,
learnrate=args.learnrate,
verbose=args.verbose,
maxbadcount=args.maxbadcount,
epochs=args.epochs,
random_seed=args.random_seed,
eval_rate=args.eval_rate)
#print stuff here to file.
|
{
"content_hash": "060c6bab24a6f0cd6fa16fd36568048f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 107,
"avg_line_length": 57.35,
"alnum_prop": 0.6160999709386806,
"repo_name": "aarontuor/antk",
"id": "cb8753b4649b44be04f6f0480c06dea688858c3e",
"size": "3441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/modelwrappers/tree_mf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "47995"
},
{
"name": "Python",
"bytes": "332504"
}
],
"symlink_target": ""
}
|
import antlr3
import testbase
import unittest
class t006lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('fofaaooa')
lexer = self.getLexer(stream)
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.FOO)
self.assertEqual(token.start, 0)
self.assertEqual(token.stop, 1)
self.assertEqual(token.text, 'fo')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.FOO)
self.assertEqual(token.start, 2)
self.assertEqual(token.stop, 7)
self.assertEqual(token.text, 'faaooa')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.EOF)
def testMalformedInput(self):
stream = antlr3.StringStream('fofoaooaoa2')
lexer = self.getLexer(stream)
lexer.nextToken()
lexer.nextToken()
try:
token = lexer.nextToken()
self.fail(token)
except antlr3.MismatchedTokenException as exc:
self.assertEqual(exc.expecting, 'f')
self.assertEqual(exc.unexpectedType, '2')
self.assertEqual(exc.charPositionInLine, 10)
self.assertEqual(exc.line, 1)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0ef64d5bb0817eb8deed9ab99e62eb40",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 58,
"avg_line_length": 27.918032786885245,
"alnum_prop": 0.5854374633000587,
"repo_name": "openstack/congress",
"id": "daa5d29dd30abbcb8e4f0018158b20c189f7aed3",
"size": "1703",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t006lexer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "228"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "2614028"
},
{
"name": "Shell",
"bytes": "45786"
}
],
"symlink_target": ""
}
|
import logging
import asyncio
from hailtop.google_storage import GCS
from .spec_writer import SpecWriter
log = logging.getLogger('logstore')
class LogStore:
def __init__(self, batch_logs_bucket_name, worker_logs_bucket_name, instance_id, blocking_pool, *, project=None, credentials=None):
self.batch_logs_bucket_name = batch_logs_bucket_name
self.worker_logs_bucket_name = worker_logs_bucket_name
self.instance_id = instance_id
self.worker_logs_root = f'gs://{worker_logs_bucket_name}/batch/logs/{instance_id}/worker'
self.batch_logs_root = f'gs://{batch_logs_bucket_name}/batch/logs/{instance_id}/batch'
self.gcs = GCS(blocking_pool, project=project, credentials=credentials)
def worker_log_path(self, machine_name, log_file):
# this has to match worker startup-script
return f'{self.worker_logs_root}/{machine_name}/{log_file}'
def batch_log_dir(self, batch_id):
return f'{self.batch_logs_root}/{batch_id}'
def log_path(self, format_version, batch_id, job_id, attempt_id, task):
if not format_version.has_attempt_in_log_path():
return f'{self.batch_log_dir(batch_id)}/{job_id}/{task}/log'
return f'{self.batch_log_dir(batch_id)}/{job_id}/{attempt_id}/{task}/log'
async def read_log_file(self, format_version, batch_id, job_id, attempt_id, task):
path = self.log_path(format_version, batch_id, job_id, attempt_id, task)
return await self.gcs.read_gs_file(path)
async def write_log_file(self, format_version, batch_id, job_id, attempt_id, task, data):
path = self.log_path(format_version, batch_id, job_id, attempt_id, task)
return await self.gcs.write_gs_file_from_string(path, data)
async def delete_batch_logs(self, batch_id):
await self.gcs.delete_gs_files(
self.batch_log_dir(batch_id))
def status_path(self, batch_id, job_id, attempt_id):
return f'{self.batch_log_dir(batch_id)}/{job_id}/{attempt_id}/status.json'
async def read_status_file(self, batch_id, job_id, attempt_id):
path = self.status_path(batch_id, job_id, attempt_id)
return await self.gcs.read_gs_file(path)
async def write_status_file(self, batch_id, job_id, attempt_id, status):
path = self.status_path(batch_id, job_id, attempt_id)
return await self.gcs.write_gs_file_from_string(path, status)
async def delete_status_file(self, batch_id, job_id, attempt_id):
path = self.status_path(batch_id, job_id, attempt_id)
return await self.gcs.delete_gs_file(path)
def specs_dir(self, batch_id, token):
return f'{self.batch_logs_root}/{batch_id}/bunch/{token}'
def specs_path(self, batch_id, token):
return f'{self.specs_dir(batch_id, token)}/specs'
def specs_index_path(self, batch_id, token):
return f'{self.specs_dir(batch_id, token)}/specs.idx'
async def read_spec_file(self, batch_id, token, start_job_id, job_id):
idx_path = self.specs_index_path(batch_id, token)
idx_start, idx_end = SpecWriter.get_index_file_offsets(job_id, start_job_id)
offsets = await self.gcs.read_binary_gs_file(idx_path, start=idx_start, end=idx_end)
spec_path = self.specs_path(batch_id, token)
spec_start, spec_end = SpecWriter.get_spec_file_offsets(offsets)
return await self.gcs.read_gs_file(spec_path, start=spec_start, end=spec_end)
async def write_spec_file(self, batch_id, token, data_bytes, offsets_bytes):
idx_path = self.specs_index_path(batch_id, token)
write1 = self.gcs.write_gs_file_from_string(idx_path, offsets_bytes,
content_type='application/octet-stream')
specs_path = self.specs_path(batch_id, token)
write2 = self.gcs.write_gs_file_from_string(specs_path, data_bytes)
await asyncio.gather(write1, write2)
async def delete_spec_file(self, batch_id, token):
await self.gcs.delete_gs_files(self.specs_dir(batch_id, token))
|
{
"content_hash": "d91d259cb8e81ef2d667b5b310c3042d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 135,
"avg_line_length": 45.640449438202246,
"alnum_prop": 0.6602658788774003,
"repo_name": "cseed/hail",
"id": "9ed70cc9b6362b209d2ed37e22af14a5958ef299",
"size": "4062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batch/batch/log_store.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "289"
},
{
"name": "C++",
"bytes": "170210"
},
{
"name": "CSS",
"bytes": "20423"
},
{
"name": "Dockerfile",
"bytes": "7426"
},
{
"name": "HTML",
"bytes": "43106"
},
{
"name": "Java",
"bytes": "22564"
},
{
"name": "JavaScript",
"bytes": "730"
},
{
"name": "Jupyter Notebook",
"bytes": "162397"
},
{
"name": "Makefile",
"bytes": "58348"
},
{
"name": "PLpgSQL",
"bytes": "23163"
},
{
"name": "Python",
"bytes": "3477764"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "Scala",
"bytes": "3496240"
},
{
"name": "Shell",
"bytes": "41254"
},
{
"name": "TSQL",
"bytes": "10385"
},
{
"name": "TeX",
"bytes": "7125"
},
{
"name": "XSLT",
"bytes": "9787"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('productores', '0008_auto_20170919_0822'),
]
operations = [
migrations.AlterModelOptions(
name='sistemacacao',
options={'verbose_name': 'Sistema de cacao', 'verbose_name_plural': 'Sistema de cacao'},
),
]
|
{
"content_hash": "9466f88a0be32175c80789b62ad48b3c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 100,
"avg_line_length": 23.764705882352942,
"alnum_prop": 0.6212871287128713,
"repo_name": "CARocha/plasystem",
"id": "1dfc8bf10d742cec2267db569cb27eb55a00fb72",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "productores/migrations/0009_auto_20170921_1336.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "331579"
},
{
"name": "HTML",
"bytes": "246730"
},
{
"name": "JavaScript",
"bytes": "617974"
},
{
"name": "Python",
"bytes": "314836"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class GridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="gridcolor", parent_name="layout.smith.realaxis", **kwargs
):
super(GridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
{
"content_hash": "8c26d83294633c426266abc3e8a9876b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 33.07692307692308,
"alnum_prop": 0.6139534883720931,
"repo_name": "plotly/plotly.py",
"id": "b1ad35ff0ebcf85a03f3b28926619a40bc77dde2",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/smith/realaxis/_gridcolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from dbt.logger import GLOBAL_LOGGER as logger # noqa
import dbt.exceptions
import google.cloud.bigquery
class Column(object):
TYPE_LABELS = {
'STRING': 'TEXT',
'TIMESTAMP': 'TIMESTAMP',
'FLOAT': 'FLOAT',
'INTEGER': 'INT'
}
def __init__(self, column, dtype, char_size=None, numeric_size=None):
self.column = column
self.dtype = dtype
self.char_size = char_size
self.numeric_size = numeric_size
@classmethod
def translate_type(cls, dtype):
return cls.TYPE_LABELS.get(dtype.upper(), dtype)
@classmethod
def create(cls, name, label_or_dtype):
column_type = cls.translate_type(label_or_dtype)
return cls(name, column_type)
@property
def name(self):
return self.column
@property
def quoted(self):
return '"{}"'.format(self.column)
@property
def data_type(self):
if self.is_string():
return Column.string_type(self.string_size())
elif self.is_numeric():
return Column.numeric_type(self.dtype, self.numeric_size)
else:
return self.dtype
def is_string(self):
return self.dtype.lower() in ['text', 'character varying']
def is_numeric(self):
return self.dtype.lower() in ['numeric', 'number']
def string_size(self):
if not self.is_string():
raise RuntimeError("Called string_size() on non-string field!")
if self.dtype == 'text' or self.char_size is None:
# char_size should never be None. Handle it reasonably just in case
return 255
else:
return int(self.char_size)
def can_expand_to(self, other_column):
"""returns True if this column can be expanded to the size of the
other column"""
if not self.is_string() or not other_column.is_string():
return False
return other_column.string_size() > self.string_size()
def literal(self, value):
return "{}::{}".format(value, self.data_type)
@classmethod
def string_type(cls, size):
return "character varying({})".format(size)
@classmethod
def numeric_type(cls, dtype, size):
# This could be decimal(...), numeric(...), number(...)
# Just use whatever was fed in here -- don't try to get too clever
if size is None:
return dtype
else:
return "{}({})".format(dtype, size)
def __repr__(self):
return "<Column {} ({})>".format(self.name, self.data_type)
class BigQueryColumn(Column):
TYPE_LABELS = {
'STRING': 'STRING',
'TIMESTAMP': 'TIMESTAMP',
'FLOAT': 'FLOAT64',
'INTEGER': 'INT64',
'RECORD': 'RECORD',
}
def __init__(self, column, dtype, fields=None, mode='NULLABLE'):
super(BigQueryColumn, self).__init__(column, dtype)
if fields is None:
fields = []
self.fields = self.wrap_subfields(fields)
self.mode = mode
@classmethod
def wrap_subfields(cls, fields):
return [BigQueryColumn.create_from_field(field) for field in fields]
@classmethod
def create_from_field(cls, field):
return BigQueryColumn(field.name, cls.translate_type(field.field_type),
field.fields, field.mode)
@classmethod
def _flatten_recursive(cls, col, prefix=None):
if prefix is None:
prefix = []
if len(col.fields) == 0:
prefixed_name = ".".join(prefix + [col.column])
new_col = BigQueryColumn(prefixed_name, col.dtype, col.fields,
col.mode)
return [new_col]
new_fields = []
for field in col.fields:
new_prefix = prefix + [col.column]
new_fields.extend(cls._flatten_recursive(field, new_prefix))
return new_fields
def flatten(self):
return self._flatten_recursive(self)
@property
def quoted(self):
return '`{}`'.format(self.column)
def literal(self, value):
return "cast({} as {})".format(value, self.dtype)
def to_bq_schema_object(self):
kwargs = {}
if len(self.fields) > 0:
fields = [field.to_bq_schema_object() for field in self.fields]
kwargs = {"fields": fields}
return google.cloud.bigquery.SchemaField(self.name, self.dtype,
self.mode, **kwargs)
@property
def data_type(self):
if self.dtype.upper() == 'RECORD':
subcols = [
"{} {}".format(col.name, col.data_type) for col in self.fields
]
field_type = 'STRUCT<{}>'.format(", ".join(subcols))
else:
field_type = self.dtype
if self.mode.upper() == 'REPEATED':
return 'ARRAY<{}>'.format(field_type)
else:
return field_type
def is_string(self):
return self.dtype.lower() == 'string'
def is_numeric(self):
return False
def can_expand_to(self, other_column):
"""returns True if both columns are strings"""
return self.is_string() and other_column.is_string()
def __repr__(self):
return "<BigQueryColumn {} ({}, {})>".format(self.name, self.data_type,
self.mode)
|
{
"content_hash": "bd3e87d3c895794b26764f23d023e26f",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 29.456521739130434,
"alnum_prop": 0.5592250922509225,
"repo_name": "nave91/dbt",
"id": "3a34903a867c14679b68b51841f6cb18eb72da32",
"size": "5420",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "dbt/schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "814"
},
{
"name": "Python",
"bytes": "447357"
},
{
"name": "Shell",
"bytes": "617"
}
],
"symlink_target": ""
}
|
"""Axis related components."""
import copy
import logging
import os
from math import floor
import h5py
import numpy as np
from crispy import resourceAbsolutePath
from crispy.config import Config
from crispy.items import BaseItem, ComboItem, DoubleItem, IntItem, Vector3DItem
from crispy.quanty import XDB
logger = logging.getLogger(__name__)
settings = Config().read()
class Broadening(DoubleItem):
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
return
if value < 0.0:
raise ValueError("The broadening cannot be negative.")
self._value = value
self.dataChanged.emit(1)
class Lorentzian(Broadening):
MINIMUM = 0.1
def __init__(self, parent=None, name="Lorentzian", value=None):
super().__init__(parent=parent, name=name)
self._value = value
# TODO: Implement these for variable broadening.
self.energies = BaseItem(self, "Energies")
self.fwhms = BaseItem(self, "FWHM")
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
return
if value < self.MINIMUM:
raise ValueError(
f"The Lorentzian broadening cannot be smaller than {self.MINIMUM}."
)
self._value = value
self.dataChanged.emit(1)
@property
def replacements(self):
replacements = {}
if self.ancestor.experiment.isTwoDimensional:
# Energy dependent Lorentzian broadening of 2D spectra is not supported
# in Quanty, so we use the value set for the Lorentzian broadening
# as Gamma.
replacements["Gamma"] = self.value
else:
axis = self.parent()
start = axis.start.value
stop = axis.stop.value
points = [(start, self.value), (stop, self.value)]
replacement = "{"
for i, (energy, fwhm) in enumerate(points):
replacement += f"{{{energy}, {fwhm}}}"
if i != len(points) - 1:
replacement += ", "
else:
replacement += "}"
replacements["Lorentzian"] = replacement
replacements["Gamma"] = self.MINIMUM
return replacements
def copyFrom(self, item):
super().copyFrom(item)
self.energies.copyFrom(item.energies)
self.fwhms.copyFrom(item.fwhms)
class Gaussian(Broadening):
def __init__(self, parent=None, name="Gaussian", value=None):
super().__init__(parent=parent, name=name)
self._value = value
@property
def replacements(self):
replacements = {}
# Use zero by default, but write the actual value as a comment.
replacements["Gaussian"] = f"0.0 -- {self.value}"
return replacements
class LightVector(Vector3DItem):
@property
def normalized(self):
return self.value / np.linalg.norm(self.value)
@property
def replacements(self):
# Normalize the vector.
v = self.normalized
return f"{{{v[0]:.8g}, {v[1]:.8g}, {v[2]:.8g}}}"
class WaveVector(LightVector):
def __init__(self, parent=None, name="Wave Vector", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if np.all(value == 0):
raise ValueError("The wave vector cannot be null.")
photon = self.parent()
k, e1 = value, photon.e1.value
# If the wave and polarization vectors are not perpendicular, select a new
# perpendicular vector for the polarization.
if np.dot(k, e1) != 0:
if k[2] != 0 or (-k[0] - k[1]) != 0:
e1 = np.array([k[2], k[2], -k[0] - k[1]])
else:
e1 = np.array([-k[2] - k[1], k[0], k[0]])
self._value = value
photon.e1.value = e1
photon.e2.value = np.cross(e1, k)
class FirstPolarization(LightVector):
def __init__(self, parent=None, name="First Polarization", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if np.all(value == 0):
raise ValueError("The polarization vector cannot be null.")
photon = self.parent()
if np.dot(photon.k.value, value) != 0:
raise ValueError(
"The wave and polarization vectors need to be perpendicular."
)
self._value = value
photon.e2.value = np.cross(value, photon.k.value)
class SecondPolarization(LightVector):
def __init__(self, parent=None, name="Second Polarization", value=None):
super().__init__(parent=parent, name=name, value=value)
class Photon(BaseItem):
def __init__(self, parent=None, name="Photon"):
super().__init__(parent=parent, name=name)
self.k = WaveVector(parent=self, value=np.array([0, 0, 1]))
self.e1 = FirstPolarization(parent=self, value=np.array([0, 1, 0]))
self.e2 = SecondPolarization(parent=self, value=np.array([1, 0, 0]))
@property
def replacements(self):
return {
"WaveVector": self.k.replacements,
"FirstPolarization": self.e1.replacements,
"SecondPolarization": self.e2.replacements,
}
def copyFrom(self, item):
super().copyFrom(item)
self.k.copyFrom(item.k)
self.e1.copyFrom(item.e1)
self.e2.copyFrom(item.e2)
class IncidentPhoton(Photon):
def __init__(self, parent=None, name="Incident Photon"):
super().__init__(parent=parent, name=name)
class ScatteredPhoton(Photon):
def __init__(self, parent=None, name="Scattered Photon"):
super().__init__(parent=parent, name=name)
class Start(DoubleItem):
def __init__(self, parent=None, name="Start", value=None):
super().__init__(parent=parent, name=name)
self._value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
stop = self.parent().stop
if stop.value is not None and value > stop.value:
raise ValueError(
"The lower energy limit cannot be larger than the upper limit."
)
self._value = value
class Stop(DoubleItem):
def __init__(self, parent=None, name="Stop", value=None):
super().__init__(parent=parent, name=name)
self._value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
start = self.parent().start
if start.value is not None and value < start.value:
raise ValueError(
"The upper energy limit cannot be larger than the lower limit."
)
self._value = value
class NPoints(IntItem):
def __init__(self, parent=None, name="Number of Points", value=None):
super().__init__(parent=parent, name=name)
self._value = value
@property
def minimum(self):
axis = self.parent()
start, stop = axis.start, axis.stop
lorentzian = axis.lorentzian
return int(floor(stop.value - start.value) / lorentzian.value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value < self.minimum:
raise ValueError(
f"The number of points must be greater than {self.minimum}."
)
self._value = value
self.dataChanged.emit(1)
def reset(self):
self.value = self.minimum
class Shift(DoubleItem):
def __init__(self, parent=None, value=None):
super().__init__(parent=parent, name="User Defined Shift", value=value)
class Axis(BaseItem):
# pylint: disable=too-many-instance-attributes
def __init__(self, parent=None, name="Axis"):
super().__init__(parent=parent, name=name)
self.idx = 0
if name == "Y-axis":
self.idx = 1
# A user defined shift applied to the axis.
self.shift = Shift(parent=self, value=0.0)
start, stop = self.limits
self.start = Start(parent=self, value=start)
self.stop = Stop(parent=self, value=stop)
self.npoints = NPoints(parent=self, value=2000)
self.gaussian = Gaussian(parent=self, value=0.1)
self.lorentzian = Lorentzian(parent=self, value=self.coreholeWidth)
self.photon = None
@property
def interval(self):
# Checked and it is consistent with Quanty.
return np.abs(self.stop.value - self.start.value) / self.npoints.value
@property
def configuration(self):
# Get the configuration for the axis.
return self.ancestor.configurations[self.idx + 1]
@property
def zeroShift(self):
# Calculate the shift required to approximately align the edge to zero. The
# shift is calculated using the spin-orbit coupling parameter of the
# core subshell for the electronic configuration of the element in the
# 2+ oxidation state.
calculation = self.ancestor
element = calculation.element
chargeDelta = element.chargeDifference(charge="2+")
# Add the charge delta to the configuration string.
subshells = self.configuration.subshells
occupancies = list(self.configuration.occupancies)
occupancies[-1] = occupancies[-1] + chargeDelta
occupancies = tuple(occupancies)
baseConfigurationValue = ",".join(
f"{subshell}{occupancy}"
for subshell, occupancy in zip(subshells, occupancies)
)
if self.configuration.hasCore:
coreSubshell = self.configuration.subshells[0]
name = f"ζ({coreSubshell})"
path = resourceAbsolutePath(
os.path.join("quanty", "parameters", f"{element.symbol}.h5")
)
with h5py.File(path, "r") as h5:
try:
value = h5[baseConfigurationValue]["Atomic"][name][()]
except KeyError:
value = 0.0
factor = 0.5 if "p" in coreSubshell else 1.0
value *= factor
else:
# The value is zero for configurations without core-hole.
# These can be the final configurations RIXS or XES calculations.
value = 0.0
return value
@property
def limits(self):
calculation = self.ancestor
STEP = 10
if calculation.experiment.isEmission:
limits = [-3 * STEP, STEP]
else:
label = calculation.edge.labels[self.idx]
if label in ("K", "L1", "M1", "N1"):
limits = [-STEP, STEP]
else:
subshell = calculation.element.valenceSubshell
if subshell in ("3d",):
limits = [-STEP, 3 * STEP]
else:
limits = [-STEP, 2 * STEP]
# Shift the limits to focus on the lower-energy edge or line.
limits = [limit - self.zeroShift for limit in limits]
shift = 0.0
shiftSpectra = settings.value("Quanty/ShiftSpectra", type=bool)
if shiftSpectra:
shift = self.zeroShift + self.experimentalShift
# Update and round the limits.
return [round(limit + shift, 0) for limit in limits]
@property
def experimentalShift(self):
"""Experimental edges/lines energies."""
calculation = self.ancestor
label = calculation.edge.labels[self.idx]
if calculation.experiment.isEmission:
energy = XDB.xray_lines(calculation.element.symbol)[label].energy
else:
try:
energy = XDB.xray_edges(calculation.element.symbol)[label].energy
except (TypeError, AttributeError, KeyError):
energy = 0.0
logger.debug(
"%s %s %s %s",
calculation.element,
calculation.experiment,
calculation.edge,
label,
)
return energy
@property
def coreholeWidth(self):
calculation = self.ancestor
label = calculation.edge.labels[self.idx]
coreholeWidth = XDB.corehole_width(calculation.element.symbol, label)
try:
coreholeWidth = float(coreholeWidth)
except TypeError:
coreholeWidth = 0.1
coreholeWidth = max(coreholeWidth, 0.1)
return round(coreholeWidth, 2)
@property
def replacements(self):
replacements = {}
replacements["Emin"] = self.start.value
replacements["Emax"] = self.stop.value
replacements["NPoints"] = self.npoints.value
replacements["ZeroShift"] = self.zeroShift
replacements["ExperimentalShift"] = self.experimentalShift
# The Gaussian broadening is done in the interface, but we still
# want the user to easily change this value if the script is run from
# outside.
replacements.update(self.lorentzian.replacements)
replacements.update(self.gaussian.replacements)
replacements.update(self.photon.replacements)
prefix = self.name[0]
replacements = {prefix + name: value for name, value in replacements.items()}
return replacements
def copyFrom(self, item):
super().copyFrom(item)
self.idx = copy.deepcopy(item.idx)
self.start.copyFrom(item.start)
self.stop.copyFrom(item.stop)
self.npoints.copyFrom(item.npoints)
self.gaussian.copyFrom(item.gaussian)
self.lorentzian.copyFrom(item.lorentzian)
self.shift.copyFrom(item.shift)
self.photon.copyFrom(item.photon)
class XAxis(Axis):
def __init__(self, parent=None, name="X-axis"):
super().__init__(parent=parent, name=name)
self.photon = IncidentPhoton(parent=self)
@property
def label(self):
calculation = self.ancestor
value = calculation.experiment.value
if value == "XAS":
return "Absorption Energy"
if value == "XES":
return "Emission Energy"
if value == "XPS":
return "Binding Energy"
return "Incident Energy"
class YAxis(Axis):
def __init__(self, parent=None, name="Y-axis"):
super().__init__(parent=parent, name=name)
self.photon = ScatteredPhoton(parent=self)
@property
def label(self):
return "Energy Transfer"
class Axes(BaseItem):
def __init__(self, parent=None, name="Axes"):
super().__init__(parent=parent, name=name)
self.scale = DoubleItem(parent=self, name="Scale Factor", value=1.0)
self.normalization = ComboItem(parent=self, name="Normalization", value="None")
self.normalization.items = ["None", "Maximum", "Area"]
self.xaxis = XAxis(parent=self)
calculation = self.ancestor
self.labels = [f"{self.xaxis.label} (eV)", "Intensity (a.u.)"]
if calculation.experiment.isTwoDimensional:
self.xaxis.npoints.reset()
self.xaxis.lorentzian.dataChanged.connect(self.xaxis.npoints.reset)
self.yaxis = YAxis(parent=self)
self.labels = [f"{l} (eV)" for l in (self.xaxis.label, self.yaxis.label)]
def copyFrom(self, item):
super().copyFrom(item)
self.scale.copyFrom(item.scale)
self.normalization.copyFrom(item.normalization)
self.xaxis.copyFrom(item.xaxis)
if getattr(self, "yaxis", None) is not None:
self.yaxis.copyFrom(item.yaxis)
|
{
"content_hash": "6776d7710518ebba607baba343f1351a",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 87,
"avg_line_length": 31.524752475247524,
"alnum_prop": 0.5912688442211055,
"repo_name": "mretegan/crispy",
"id": "7d2c8c942e774834e96d5c4dea67dfb4bd295e49",
"size": "16465",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "crispy/quanty/axes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "204589"
},
{
"name": "Shell",
"bytes": "4595"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
from ..core import DataFrame, Series
from ...base import tokenize
def getnanos(rule):
try:
return getattr(rule, 'nanos', None)
except ValueError:
return None
if LooseVersion(pd.__version__) >= '0.18.0':
def _resample_apply(s, rule, how, resample_kwargs):
return getattr(s.resample(rule, **resample_kwargs), how)()
def _resample(obj, rule, how, **kwargs):
resampler = Resampler(obj, rule, **kwargs)
if how is not None:
raise FutureWarning(("how in .resample() is deprecated "
"the new syntax is .resample(...)"
".{0}()").format(how))
return getattr(resampler, how)()
return resampler
else:
def _resample_apply(s, rule, how, resample_kwargs):
return s.resample(rule, how=how, **resample_kwargs)
def _resample(obj, rule, how, **kwargs):
how = how or 'mean'
return getattr(Resampler(obj, rule, **kwargs), how)()
def _resample_series(series, start, end, reindex_closed, rule,
resample_kwargs, how, fill_value):
out = _resample_apply(series, rule, how, resample_kwargs)
return out.reindex(pd.date_range(start, end, freq=rule,
closed=reindex_closed),
fill_value=fill_value)
def _resample_bin_and_out_divs(divisions, rule, closed='left', label='left'):
rule = pd.datetools.to_offset(rule)
g = pd.TimeGrouper(rule, how='count', closed=closed, label=label)
# Determine bins to apply `how` to. Disregard labeling scheme.
divs = pd.Series(range(len(divisions)), index=divisions)
temp = divs.resample(rule, how='count', closed=closed, label='left')
tempdivs = temp.loc[temp > 0].index
# Cleanup closed == 'right' and label == 'right'
res = pd.offsets.Nano() if hasattr(rule, 'delta') else pd.offsets.Day()
if g.closed == 'right':
newdivs = tempdivs + res
else:
newdivs = tempdivs
if g.label == 'right':
outdivs = tempdivs + rule
else:
outdivs = tempdivs
newdivs = newdivs.tolist()
outdivs = outdivs.tolist()
# Adjust ends
if newdivs[0] < divisions[0]:
newdivs[0] = divisions[0]
if newdivs[-1] < divisions[-1]:
if len(newdivs) < len(divs):
setter = lambda a, val: a.append(val)
else:
setter = lambda a, val: a.__setitem__(-1, val)
setter(newdivs, divisions[-1])
if outdivs[-1] > divisions[-1]:
setter(outdivs, outdivs[-1])
elif outdivs[-1] < divisions[-1]:
setter(outdivs, temp.index[-1])
return tuple(map(pd.Timestamp, newdivs)), tuple(map(pd.Timestamp, outdivs))
class Resampler(object):
def __init__(self, obj, rule, **kwargs):
self.obj = obj
rule = pd.datetools.to_offset(rule)
day_nanos = pd.datetools.Day().nanos
if getnanos(rule) and day_nanos % rule.nanos:
raise NotImplementedError('Resampling frequency %s that does'
' not evenly divide a day is not '
'implemented' % rule)
self._rule = rule
self._kwargs = kwargs
def _agg(self, how, columns=None, fill_value=np.nan):
rule = self._rule
kwargs = self._kwargs
name = 'resample-' + tokenize(self.obj, rule, kwargs, how)
# Create a grouper to determine closed and label conventions
newdivs, outdivs = _resample_bin_and_out_divs(self.obj.divisions, rule,
**kwargs)
# Repartition divs into bins. These won't match labels after mapping
partitioned = self.obj.repartition(newdivs, force=True)
keys = partitioned._keys()
dsk = partitioned.dask
args = zip(keys, outdivs, outdivs[1:], ['left']*(len(keys)-1) + [None])
for i, (k, s, e, c) in enumerate(args):
dsk[(name, i)] = (_resample_series, k, s, e, c,
rule, kwargs, how, fill_value)
if columns:
return DataFrame(dsk, name, columns, outdivs)
return Series(dsk, name, self.obj.name, outdivs)
def count(self):
return self._agg('count', fill_value=0)
def first(self):
return self._agg('first')
def last(self):
return self._agg('last')
def mean(self):
return self._agg('mean')
def min(self):
return self._agg('min')
def median(self):
return self._agg('median')
def max(self):
return self._agg('max')
def ohlc(self):
return self._agg('ohlc', columns=['open', 'high', 'low', 'close'])
def prod(self):
return self._agg('prod')
def sem(self):
return self._agg('sem')
def std(self):
return self._agg('std')
def sum(self):
return self._agg('sum')
def var(self):
return self._agg('var')
|
{
"content_hash": "fc5aab2d5808b84b960c8f408e10203b",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 31.906832298136646,
"alnum_prop": 0.569398481604049,
"repo_name": "mikegraham/dask",
"id": "356348e0053eb0ea247c7dec942ec33c715aaeb6",
"size": "5137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/dataframe/tseries/resample.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1187699"
}
],
"symlink_target": ""
}
|
from webmarks.storage import viewsets
from django.conf.urls import include
from django.conf.urls import url
from rest_framework import routers
apiRouter = routers.DefaultRouter()
# apiRouter.register(r'upload', viewsets.FileUploaderViewSet)
# apiRouter.register(r'crawler', viewsets.CrawlerViewSet, base_name='crawler')
# apiRouter.register(r'archives', viewsets.ArchiveViewSet, base_name='archive')
urlpatterns = [
# API V1
# url(r'v1/', include(apiRouter.urls, namespace='external_apis')),
]
|
{
"content_hash": "a88a68d7d011fa920707d7a0acaf6410",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 36,
"alnum_prop": 0.7718253968253969,
"repo_name": "EricMuller/mynotes-backend",
"id": "24c107c519387b78fac96338611795bc84db40e5",
"size": "504",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/webmarks/storage/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "11880"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "6613"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "233863"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "22991176"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "13496"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
}
|
from supermonkey import Monkey
class JanitorMonkey(Monkey):
def __init__(self, config_file, scheduler):
self.config_name = 'janitor'
super(JanitorMonkey, self).__init__(config_file, scheduler)
|
{
"content_hash": "33a9a08d232f9e454c4a084326533bcd",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 67,
"avg_line_length": 30.714285714285715,
"alnum_prop": 0.6837209302325581,
"repo_name": "bogdanap/pySimians",
"id": "f241f9ddd886cde50a7f38461f076cfdaf5111b9",
"size": "215",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monkeys/janitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9466"
},
{
"name": "Shell",
"bytes": "3303"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-easy-split',
version='0.2.0',
packages=['easy_split'],
install_requires=[''],
include_package_data=True,
license='MIT License',
description='Super easy split testing for your Django projects.',
long_description=README,
url='https://github.com/Miserlou/django-easy-split',
author='Rich Jones',
author_email='rich@openwatch.net',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
{
"content_hash": "28c6291a13f6706b1d2a0957598217a7",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 33.96969696969697,
"alnum_prop": 0.6297948260481713,
"repo_name": "Miserlou/django-easy-split",
"id": "6994c365852d8634320d468ace1ee67a5239a706",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2013"
},
{
"name": "Python",
"bytes": "150341"
}
],
"symlink_target": ""
}
|
import numpy as np
from .base import LinearClassifierMixin, SparseCoefMixin
from ..feature_selection.selector_mixin import SelectorMixin
from ..svm.base import BaseLibLinear
class LogisticRegression(BaseLibLinear, LinearClassifierMixin, SelectorMixin,
SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses a one-vs.-all (OvA)
scheme, rather than the "true" multinomial LR.
This class implements L1 and L2 regularized logistic regression using the
`liblinear` library. It can handle both dense and sparse input. Use
C-ordered arrays or CSR matrices containing 64-bit floats for optimal
performance; any other input format will be converted (and copied).
Parameters
----------
penalty : string, 'l1' or 'l2'
Used to specify the norm used in the penalization.
dual : boolean
Dual or primal formulation. Dual formulation is only
implemented for l2 penalty. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
tol: float, optional
Tolerance for stopping criteria.
Attributes
----------
`coef_` : array, shape = [n_classes-1, n_features]
Coefficient of the features in the decision function.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [n_classes-1]
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
See also
--------
LinearSVC
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
References:
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None):
super(LogisticRegression, self).__init__(
penalty=penalty, dual=dual, loss='lr', tol=tol, C=C,
fit_intercept=fit_intercept, intercept_scaling=intercept_scaling,
class_weight=class_weight, random_state=None)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
|
{
"content_hash": "9d5cb59de9eecd3fbdd297a9ce3c5d91",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 78,
"avg_line_length": 37.652482269503544,
"alnum_prop": 0.6553023168204934,
"repo_name": "kmike/scikit-learn",
"id": "74abcf7c8a57ac38a909a235525958a25ab162d8",
"size": "5309",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/logistic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11070763"
},
{
"name": "C++",
"bytes": "257092"
},
{
"name": "JavaScript",
"bytes": "4775"
},
{
"name": "Python",
"bytes": "3808272"
},
{
"name": "Shell",
"bytes": "3770"
}
],
"symlink_target": ""
}
|
"""This is a simple HTTP server for manually testing exponential
back-off functionality in Chrome.
"""
import BaseHTTPServer
import sys
import urlparse
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
keep_running = True
def do_GET(self):
if self.path == '/quitquitquit':
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('QUITTING')
RequestHandler.keep_running = False
return
params = urlparse.parse_qs(urlparse.urlparse(self.path).query)
if not params or not 'code' in params or params['code'][0] == '200':
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('OK')
else:
self.send_error(int(params['code'][0]))
def main():
if len(sys.argv) != 2:
print "Usage: %s PORT" % sys.argv[0]
sys.exit(1)
port = int(sys.argv[1])
print "To stop the server, go to http://localhost:%d/quitquitquit" % port
httpd = BaseHTTPServer.HTTPServer(('', port), RequestHandler)
while RequestHandler.keep_running:
httpd.handle_request()
if __name__ == '__main__':
main()
|
{
"content_hash": "6404ea279f1bff80197d323c061b9e2a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 26.425531914893618,
"alnum_prop": 0.6352657004830918,
"repo_name": "meego-tablet-ux/meego-app-browser",
"id": "4188cd103897b60c0e6fbb049af674aa46d15491",
"size": "1433",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "net/tools/testserver/backoff_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "5599"
},
{
"name": "AppleScript",
"bytes": "6772"
},
{
"name": "Assembly",
"bytes": "1871"
},
{
"name": "C",
"bytes": "1646303"
},
{
"name": "C++",
"bytes": "72324607"
},
{
"name": "CSS",
"bytes": "221604"
},
{
"name": "Diff",
"bytes": "11193"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "HTML",
"bytes": "21930015"
},
{
"name": "Java",
"bytes": "11354"
},
{
"name": "JavaScript",
"bytes": "5339242"
},
{
"name": "Makefile",
"bytes": "2412"
},
{
"name": "Objective-C",
"bytes": "691329"
},
{
"name": "Objective-C++",
"bytes": "3786548"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "PLpgSQL",
"bytes": "70415"
},
{
"name": "Perl",
"bytes": "63704"
},
{
"name": "Protocol Buffer",
"bytes": "96399"
},
{
"name": "Python",
"bytes": "2296716"
},
{
"name": "QML",
"bytes": "452612"
},
{
"name": "QMake",
"bytes": "435"
},
{
"name": "Shell",
"bytes": "200146"
}
],
"symlink_target": ""
}
|
"""
Auto-generated class for Cat
"""
from six import string_types
from . import client_support
class Cat(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(**kwargs):
"""
:type kind: string_types
:rtype: Cat
"""
return Cat(**kwargs)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Cat'
data = json or kwargs
# set attributes
data_types = [string_types]
self.kind = client_support.set_property('kind', data, data_types, False, [], False, True, class_name)
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
|
{
"content_hash": "230ab8f68eae63516aa2d7ca7c00cb1e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 109,
"avg_line_length": 22.317073170731707,
"alnum_prop": 0.5792349726775956,
"repo_name": "Jumpscale/go-raml",
"id": "1340006bd93fd79f795064bb6affdea407a24abf",
"size": "996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codegen/python/fixtures/class/Cat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "235840"
},
{
"name": "Cap'n Proto",
"bytes": "3900"
},
{
"name": "Go",
"bytes": "409613"
},
{
"name": "HTML",
"bytes": "3119"
},
{
"name": "JavaScript",
"bytes": "4324203"
},
{
"name": "Lua",
"bytes": "3255"
},
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Nim",
"bytes": "24445"
},
{
"name": "Python",
"bytes": "146272"
},
{
"name": "RAML",
"bytes": "188859"
},
{
"name": "Shell",
"bytes": "728"
}
],
"symlink_target": ""
}
|
import sys
import os
import sys
import subprocess
import tempfile
import time
import Qt
from Qt import QtCore, QtWidgets
import imath
import IECore
import Gaffer
import GafferScene
import GafferUI
import GafferSceneUI
# Delay for x seconds
def __delay( delay ) :
endtime = time.time() + delay
while time.time() < endtime :
GafferUI.EventLoop.waitForIdle( 1 )
# Create a random directory in `/tmp` for the dispatcher's `jobsDirectory`, so we don't clutter the user's `~gaffer` directory
temporaryDirectory = tempfile.mkdtemp( prefix = "gafferDocs" )
def __getTempFilePath( fileName, directory = temporaryDirectory ) :
filePath = "/".join( ( directory, fileName ) )
return filePath
def __dispatchScript( script, tasks, settings ) :
command = "gaffer dispatch -script {} -tasks {} -dispatcher Local -settings {} -dispatcher.jobsDirectory '\"{}/dispatcher/local\"'".format(
script,
" ".join( tasks ),
" ".join( settings ),
temporaryDirectory
)
subprocess.check_call( command, shell = True )
# Create a plug context menu from a Node Editor
def __spawnPlugContextMenu( nodeEditor, plugWidget ) :
plugWidget._PlugValueWidget__contextMenu()
plugWidget._PlugValueWidget__popupMenu.setVisible( False )
contextMenuWidget = plugWidget._PlugValueWidget__popupMenu._qtWidget()
contextMenuWidget.popup(
QtCore.QPoint(
mainWindow._qtWidget().geometry().x(),
mainWindow._qtWidget().geometry().y()
)
)
return contextMenuWidget
# Find a target action in a plug context menu and highlight it
def __selectPlugContextMenuAction( contextMenuWidget, targetActionName ) :
actions = contextMenuWidget.actions()
targetActionName = targetActionName
targetAction = None
for action in actions :
text = action.text()
if text == targetActionName :
targetAction = action
contextMenuWidget.setActiveAction( targetAction )
__delay(0.1)
actionWidget = targetAction.parent()
return actionWidget
# Screengrab a plug context menu and submenu
def __grabPlugContextSubmenu( plugWidget, contextMenuWidget, submenuWidget, menuPath, submenuPath ) :
screen = QtWidgets.QApplication.primaryScreen()
windowHandle = plugWidget._qtWidget().windowHandle()
if windowHandle :
screen = windowHandle.screen()
qtVersion = [ int( x ) for x in Qt.__qt_version__.split( "." ) ]
pixmapMain = screen.grabWindow( mainWindow._qtWidget().winId() )
## Screengrab the context menu. The frame dimensions are too big by
# one pixel on each axis.
menuScreenPos = QtCore.QPoint( 0, 0 )
if sys.platform == "darwin" :
menuScreenPos = QtCore.QPoint(
mainWindow._qtWidget().geometry().x(),
mainWindow._qtWidget().geometry().y()
)
menuSize = QtCore.QSize(
contextMenuWidget.frameGeometry().width() - 1,
contextMenuWidget.frameGeometry().height() - 1
)
menuRect = QtCore.QRect( menuScreenPos, menuSize )
pixmap = pixmapMain.copy( menuRect )
pixmap.save( menuPath )
## Screengrab the sub-menu
submenuScreenPos = submenuWidget.pos()
if sys.platform != "darwin" :
submenuScreenPos = submenuScreenPos - contextMenuWidget.pos()
submenuSize = QtCore.QSize(
submenuWidget.frameGeometry().width() - 1,
submenuWidget.frameGeometry().height() - 1
)
submenuRect = QtCore.QRect( submenuScreenPos, submenuSize )
pixmap = pixmapMain.copy( submenuRect )
pixmap.save( submenuPath )
# Default layout's editors
mainWindow = GafferUI.ScriptWindow.acquire( script )
viewer = mainWindow.getLayout().editors( GafferUI.Viewer )[0]
graphEditor = mainWindow.getLayout().editors( GafferUI.GraphEditor )[0]
nodeEditor = mainWindow.getLayout().editors( GafferUI.NodeEditor )[0]
sceneInspector = mainWindow.getLayout().editors( GafferSceneUI.SceneInspector )[0]
hierarchyView = mainWindow.getLayout().editors( GafferSceneUI.HierarchyView )[0]
pythonEditor = mainWindow.getLayout().editors( GafferUI.PythonEditor )[0]
# Interface: A Spreadsheet node in the Graph Editor
imageName = "interfaceSpreadsheetNode"
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
script["Spreadsheet"] = Gaffer.Spreadsheet()
with GafferUI.Window() as window :
graphEditorWindow = GafferUI.GraphEditor( script )
graphEditorWindow.parent().reveal()
graphEditorWindow.parent()._qtWidget().resize( 400, 100 )
__delay( 0.1 )
graphEditorWindow.frame( Gaffer.StandardSet( [ script["Spreadsheet"] ] ) )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = graphEditorWindow, imagePath = imagePath )
graphEditorWindow.parent().close()
del graphEditorWindow
# Interface: Spreadsheet node with full name in Graph Editor
imageName = "interfaceSpreadsheetNodeFullName"
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
Gaffer.Metadata.registerValue( script["Spreadsheet"], 'nodeGadget:type', 'GafferUI::StandardNodeGadget' )
with GafferUI.Window() as window :
graphEditorWindow = GafferUI.GraphEditor( script )
graphEditorWindow.parent().reveal()
graphEditorWindow.parent()._qtWidget().resize( 300, 100 )
__delay( 0.1 )
graphEditorWindow.frame( Gaffer.StandardSet( [ script["Spreadsheet"] ] ) )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = graphEditorWindow, imagePath = imagePath )
graphEditorWindow.parent().close()
del graphEditorWindow
# Interface: The Spreadsheet node's interface in a Node Editor
imageName = "interfaceSpreadsheetNodeInterface"
imagePathInterface = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
script["fileName"].setValue( os.path.abspath( "scripts/{scriptName}.gfr".format( scriptName = imageName ) ) )
script.load()
__delay( 0.1 )
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Spreadsheet"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = imagePathInterface )
nodeEditorWindow.parent().close()
del nodeEditorWindow
# Interface: Render options network before and after Spreadsheet node
imageName = "interfaceSpreadsheetNodeRenderNetwork"
tempImagePath1 = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Before" ) )
tempImagePath2 = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "After" ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
script["fileName"].setValue( os.path.abspath( "scripts/{scriptName}.gfr".format( scriptName = imageName ) ) )
script.load()
__delay( 0.1 )
with GafferUI.Window() as window :
graphEditorWindow = GafferUI.GraphEditor( script )
#graphEditorWindow.parent()._qtWidget().setWindowFlags( QtCore.Qt.WindowFlags( QtCore.Qt.WindowStaysOnTopHint ) )
graphEditorWindow.parent().reveal()
graphEditorWindow.parent()._qtWidget().resize( 800, 400 )
__delay( 0.1 )
graphEditorWindow.frame( Gaffer.StandardSet( [ script["Backdrop_OptionsBefore"] ] ) )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = graphEditorWindow, imagePath = tempImagePath1 )
graphEditorWindow.frame( Gaffer.StandardSet( [ script["Backdrop_OptionsAfter"] ] ) )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = graphEditorWindow, imagePath = tempImagePath2 )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Before.fileName '\"{tempPath}\"'".format( tempPath = tempImagePath1 ),
"-ImageReader_After.fileName '\"{tempPath}\"'".format( tempPath = tempImagePath2 ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Interface: Breakdown of the Spreadsheet node's interface in a Node Editor
imageName = "interfaceSpreadsheetNodeBreakdown"
tempImagePath = imagePathInterface
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader.fileName '\"{imagePath}\"'".format( imagePath = tempImagePath ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Interface: Spreadsheet node's auxiliary connections
imageName = "interfaceSpreadsheetNodeAuxiliaryConnections"
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
script["fileName"].setValue( os.path.abspath( "scripts/{scriptName}.gfr".format( scriptName = imageName ) ) )
script.load()
__delay( 0.1 )
with GafferUI.Window() as window :
graphEditorWindow = GafferUI.GraphEditor( script )
graphEditorWindow.parent().reveal()
graphEditorWindow.parent()._qtWidget().resize( 300, 200 )
__delay( 0.1 )
graphEditorWindow.frame( Gaffer.StandardSet( [ script["Dot"] ] ) )
script.removeChild( script["Dot"] )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = graphEditorWindow, imagePath = imagePath )
graphEditorWindow.parent().close()
del graphEditorWindow
# Task: Add a basic plug
imageName = "taskSpreadsheetNodeAddPlugBasic"
tempImagePathEditor = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Editor" ) )
tempImagePathMenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Menu" ) )
tempImagePathSubmenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Submenu" ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
script["Sphere"] = GafferScene.Sphere()
script["Spreadsheet"] = Gaffer.Spreadsheet()
# Screengrab the Node Editor
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Sphere"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
plugWidget = GafferUI.PlugValueWidget.acquire( script["Sphere"]["radius"] )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathEditor )
# Spawn the context menu
contextMenuWidget = __spawnPlugContextMenu(
nodeEditor = nodeEditorWindow,
plugWidget = plugWidget
)
# Find the target action in the menu and highlight it
actionWidget = __selectPlugContextMenuAction(
contextMenuWidget = contextMenuWidget,
targetActionName = "Add to Spreadsheet"
)
# Screengrab the menu and submenu, get submenu position
submenuOrigin = __grabPlugContextSubmenu(
plugWidget = plugWidget,
contextMenuWidget = contextMenuWidget,
submenuWidget = actionWidget,
menuPath = tempImagePathMenu,
submenuPath = tempImagePathSubmenu
)
contextMenuWidget.close()
nodeEditorWindow.parent().close()
del nodeEditorWindow
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathEditor ),
"-ImageReader_Menu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathMenu ),
"-ImageReader_Submenu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathSubmenu ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Task: Add a vector plug (whole)
imageName = "taskSpreadsheetNodeAddPlugVectorWhole"
tempImagePathEditor = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Editor" ) )
tempImagePathMenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Menu" ) )
tempImagePathSubmenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Submenu" ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
# Screengrab the Node Editor
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Sphere"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
plugWidget = GafferUI.PlugValueWidget.acquire( script["Sphere"]["transform"]["rotate"] )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathEditor )
# Spawn the context menu
contextMenuWidget = __spawnPlugContextMenu(
nodeEditor = nodeEditorWindow,
plugWidget = plugWidget
)
# Find the target action in the menu and highlight it
actionWidget = __selectPlugContextMenuAction(
contextMenuWidget = contextMenuWidget,
targetActionName = "Add to Spreadsheet"
)
# Screengrab the menu and submenu, get submenu position
submenuOrigin = __grabPlugContextSubmenu(
plugWidget = plugWidget,
contextMenuWidget = contextMenuWidget,
submenuWidget = actionWidget,
menuPath = tempImagePathMenu,
submenuPath = tempImagePathSubmenu
)
contextMenuWidget.close()
nodeEditorWindow.parent().close()
del nodeEditorWindow
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathEditor ),
"-ImageReader_Menu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathMenu ),
"-ImageReader_Submenu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathSubmenu ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Task: Add a vector plug (single element)
imageName = "taskSpreadsheetNodeAddPlugVectorSingle"
tempImagePathEditor = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Editor" ) )
tempImagePathMenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Menu" ) )
tempImagePathSubmenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Submenu" ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
# Screengrab the Node Editor
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Sphere"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
plugWidget = GafferUI.PlugValueWidget.acquire( script["Sphere"]["transform"]["rotate"] )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathEditor )
# Spawn the context menu
contextMenuWidget = __spawnPlugContextMenu(
nodeEditor = nodeEditorWindow,
plugWidget = plugWidget
)
# Find the target action in the menu and highlight it
actionWidget = __selectPlugContextMenuAction(
contextMenuWidget = contextMenuWidget,
targetActionName = "Add to Spreadsheet"
)
# Screengrab the menu and submenu, get submenu position
submenuOrigin = __grabPlugContextSubmenu(
plugWidget = plugWidget,
contextMenuWidget = contextMenuWidget,
submenuWidget = actionWidget,
menuPath = tempImagePathMenu,
submenuPath = tempImagePathSubmenu
)
contextMenuWidget.close()
nodeEditorWindow.parent().close()
del nodeEditorWindow
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathEditor ),
"-ImageReader_Menu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathMenu ),
"-ImageReader_Submenu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathSubmenu ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Task: Add a compound plug
imageName = "taskSpreadsheetNodeAddPlugCompound"
tempImagePathEditor = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Editor" ) )
tempImagePathMenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Menu" ) )
tempImagePathSubmenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Submenu" ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
# Screengrab the Node Editor
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Sphere"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
plugWidget = GafferUI.PlugValueWidget.acquire( script["Sphere"]["transform"]["rotate"] )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathEditor )
# Spawn the context menu
contextMenuWidget = __spawnPlugContextMenu(
nodeEditor = nodeEditorWindow,
plugWidget = plugWidget
)
# Find the target action in the menu and highlight it
actionWidget = __selectPlugContextMenuAction(
contextMenuWidget = contextMenuWidget,
targetActionName = "Add to Spreadsheet (Transform)"
)
# Screengrab the menu and submenu, get submenu position
submenuOrigin = __grabPlugContextSubmenu(
plugWidget = plugWidget,
contextMenuWidget = contextMenuWidget,
submenuWidget = actionWidget,
menuPath = tempImagePathMenu,
submenuPath = tempImagePathSubmenu
)
contextMenuWidget.close()
nodeEditorWindow.parent().close()
del nodeEditorWindow
script.removeChild( script["Sphere"] )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathEditor ),
"-ImageReader_Menu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathMenu ),
"-ImageReader_Submenu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathSubmenu ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Interface: Compound plug with enabled switch
imageName = "interfaceSpreadsheetNodeCompoundEnabledSwitch"
tempImagePathSpreadsheet = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Spreadsheet" ) )
tempImagePathOptions = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Options" ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
script["fileName"].setValue( os.path.abspath( "scripts/{scriptName}.gfr".format( scriptName = imageName ) ) )
script.load()
# Screengrab the Node Editor (Spreadsheet node)
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Spreadsheet"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathSpreadsheet )
nodeEditorWindow.parent().close()
del nodeEditorWindow
# Screengrab the Node Editor (StandardOptions node)
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["StandardOptions"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
GafferUI.PlugValueWidget.acquire( script["StandardOptions"]["options"]["renderCamera"] )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathOptions )
nodeEditorWindow.parent().close()
del nodeEditorWindow
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Spreadsheet.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathSpreadsheet ),
"-ImageReader_Options.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathOptions ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Interface: Disabled cell
imageName = "interfaceSpreadsheetNodeDisabledCell"
tempImagePathEditor = tempImagePathSpreadsheet
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathSpreadsheet ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Task: Add a tweak plug
imageName = "taskSpreadsheetNodeAddPlugTweak"
tempImagePathEditor = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Editor" ) )
tempImagePathMenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Menu" ) )
tempImagePathSubmenu = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Submenu" ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
script["CameraTweaks"] = GafferScene.CameraTweaks()
script["CameraTweaks"]["tweaks"].addChild( Gaffer.TweakPlug( Gaffer.V2iPlug( "value", defaultValue = imath.V2i( 1920, 1050 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), "resolution", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ) )
script["CameraTweaks"]["tweaks"]["resolution"]["name"].setValue( 'resolution' )
# Screengrab the Node Editor
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["CameraTweaks"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathEditor )
# Spawn the context menu
plugWidget = GafferUI.PlugValueWidget.acquire( script["CameraTweaks"]["tweaks"]["resolution"]["value"] )
contextMenuWidget = __spawnPlugContextMenu(
nodeEditor = nodeEditorWindow,
plugWidget = plugWidget
)
# Find the target action in the menu and highlight it
actionWidget = __selectPlugContextMenuAction(
contextMenuWidget = contextMenuWidget,
targetActionName = "Add to Spreadsheet (Tweak)"
)
# Screengrab the menu and submenu, get submenu position
submenuOrigin = __grabPlugContextSubmenu(
plugWidget = plugWidget,
contextMenuWidget = contextMenuWidget,
submenuWidget = actionWidget,
menuPath = tempImagePathMenu,
submenuPath = tempImagePathSubmenu
)
contextMenuWidget.close()
nodeEditorWindow.parent().close()
del nodeEditorWindow
script.removeChild( script["CameraTweaks"] )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathEditor ),
"-ImageReader_Menu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathMenu ),
"-ImageReader_Submenu.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathSubmenu ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Interface: Column sections in the Node Editor
imageName = "interfaceSpreadsheetNodeColumnSections"
tempImagePathColumns = __getTempFilePath( "{imageName}.png".format( imageName = imageName ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
script["fileName"].setValue( os.path.abspath( "scripts/{scriptName}.gfr".format( scriptName = imageName ) ) )
script.load()
__delay( 0.1 )
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Spreadsheet"], floating = True )
nodeEditorWindow.parent()._qtWidget().resize( 600, 350 )
nodeEditorWindow._qtWidget().setFocus()
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathColumns )
nodeEditorWindow.parent().close()
del nodeEditorWindow
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathColumns ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Task: Reordering a section
imageName = "taskSpreadsheetNodeReorderSection"
tempImagePath = tempImagePathColumns
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePath ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Task: Reordering a column
imageName = "taskSpreadsheetNodeReorderColumn"
tempImagePath = tempImagePathColumns
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePath ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Task: Automatically resizing a column
imageName = "taskSpreadsheetNodeResizeColumnAutomatic"
tempImagePath = tempImagePathColumns
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePath ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Task: Manually resizing a column
imageName = "taskSpreadsheetNodeResizeColumnManual"
tempImagePath = tempImagePathColumns
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Editor.fileName '\"{imagePath}\"'".format( imagePath = tempImagePath ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Interface: Pattern widths
imageName = "interfaceSpreadsheetNodePatternWidths"
tempImagePathHalf = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Half" ) )
tempImagePathSingle = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Single" ) )
tempImagePathDouble = __getTempFilePath( "{tempName}.png".format( tempName = imageName + "Double" ) )
imagePath = os.path.abspath( "images/{imageName}.png".format( imageName = imageName ) )
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Spreadsheet"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathSingle )
Gaffer.Metadata.registerValue( script["Spreadsheet"]["rows"][0], 'spreadsheet:rowNameWidth', 75.0 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathHalf )
Gaffer.Metadata.registerValue( script["Spreadsheet"]["rows"][0], 'spreadsheet:rowNameWidth', 300.0 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = tempImagePathDouble )
nodeEditorWindow.parent().close()
del nodeEditorWindow
__dispatchScript(
script = "scripts/{scriptName}_edit.gfr".format( scriptName = imageName ),
tasks = [ "ImageWriter" ],
settings = [
"-ImageReader_Half.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathHalf ),
"-ImageReader_Single.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathSingle ),
"-ImageReader_Double.fileName '\"{imagePath}\"'".format( imagePath = tempImagePathDouble ),
"-ImageWriter.fileName '\"{imagePath}\"'".format( imagePath = imagePath )
]
)
# Example: Per-location Transform Spreadsheet
exampleName = "PerLocationTransformSpreadsheet"
script["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/resources/examples/sceneProcessing/perLocationTransformSpreadsheet.gfr" ) )
script.load()
viewerWindow = GafferUI.Viewer.acquire( script["Transform"], floating = True )
viewerWindow._qtWidget().setFocus()
viewerWindow.parent().reveal()
viewerWindow.parent()._qtWidget().resize( 696, 300 )
viewerWindow.view()["minimumExpansionDepth"].setValue( 999 )
__delay( 0.1 )
viewerWindow.view().viewportGadget().frame( script["Transform"]["out"].bound( "/" ) )
GafferUI.WidgetAlgo.grab( widget = viewerWindow, imagePath = "images/example{exampleName}.png".format( exampleName = exampleName ) )
viewerWindow.parent().close()
del viewerWindow
# Example: Per-location Transform Spreadsheet
exampleName = "PerLocationLightTweakSpreadsheet"
script["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/resources/examples/rendering/perLocationLightTweakSpreadsheet.gfr" ) )
script.load()
viewerWindow = GafferUI.Viewer.acquire( script["ShaderTweaks"], floating = True )
viewerWindow._qtWidget().setFocus()
viewerWindow.parent().reveal()
viewerWindow.parent()._qtWidget().resize( 696, 300 )
viewerWindow.view()["minimumExpansionDepth"].setValue( 999 )
__delay( 0.1 )
viewerWindow.view().viewportGadget().frame( script["ShaderTweaks"]["out"].bound( "/" ) )
GafferUI.WidgetAlgo.grab( widget = viewerWindow, imagePath = "images/example{exampleName}.png".format( exampleName = exampleName ) )
viewerWindow.parent().close()
del viewerWindow
# Example: Multi-shot Render Spreadsheet
exampleName = "MultiShotRenderSpreadsheet"
script["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/resources/examples/rendering/multiShotRenderSpreadsheet.gfr" ) )
script.load()
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Spreadsheet_RenderOptions"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
nodeEditorWindow._qtWidget().parent().resize( 696, 325 )
__delay( 0.1 )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = "images/example{exampleName}.png".format( exampleName = exampleName ) )
nodeEditorWindow.parent().close()
del nodeEditorWindow
|
{
"content_hash": "5dea10eaa8d613f16365b2ef5e4676ca",
"timestamp": "",
"source": "github",
"line_count": 635,
"max_line_length": 272,
"avg_line_length": 44.870866141732286,
"alnum_prop": 0.752219843470326,
"repo_name": "andrewkaufman/gaffer",
"id": "4c57e1d540d38c3a65552e31b1e5b71a40c5050a",
"size": "29870",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "doc/source/WorkingWithTheNodeGraph/SpreadsheetNode/screengrab.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9572701"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10279312"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14580"
}
],
"symlink_target": ""
}
|
from django.contrib.admin.models import LogEntry, LogEntryManager
UNKNOWN = 0
ACCEPT_PREREG = 10
REJECT_PREREG = 11
COMMENT_PREREG = 12
CONFIRM_SPAM = 20
CONFIRM_HAM = 21
NODE_REMOVED = 30
NODE_RESTORED = 31
CONTRIBUTOR_REMOVED = 32
NODE_MADE_PRIVATE = 33
USER_REMOVED = 40
USER_RESTORED = 41
USER_2_FACTOR = 42
USER_EMAILED = 43
def update_admin_log(user_id, object_id, object_repr, message, action_flag=UNKNOWN):
AdminLogEntry.objects.log_action(
user_id=user_id,
content_type_id=None,
object_id=object_id,
object_repr=object_repr,
change_message=message,
action_flag=action_flag
)
class AdminLogEntryManager(LogEntryManager):
pass
class AdminLogEntry(LogEntry):
primary_identifier_name = 'id'
def message(self):
return self.change_message
message.allow_tags = True
objects = AdminLogEntryManager()
|
{
"content_hash": "1288fcfe488d4df410938c78c0a6f60c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 20.837209302325583,
"alnum_prop": 0.6986607142857143,
"repo_name": "acshi/osf.io",
"id": "9f68dc295b929559fd81b0eb26d367182ea1ff42",
"size": "896",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "osf/models/admin_log_entry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176516"
},
{
"name": "HTML",
"bytes": "181969"
},
{
"name": "JavaScript",
"bytes": "2017102"
},
{
"name": "Mako",
"bytes": "756427"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "8555915"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.