text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_vxlan
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxVxlanModule(TestOnyxModule):
module = onyx_vxlan
arp_suppression = True
def setUp(self):
super(TestOnyxVxlanModule, self).setUp()
self.mock_get_vxlan_config = patch.object(
onyx_vxlan.OnyxVxlanModule, "_show_vxlan_config")
self.get_vxlan_config = self.mock_get_vxlan_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_nve_detail = patch.object(
onyx_vxlan.OnyxVxlanModule, "_show_nve_detail")
self.get_nve_detail = self.mock_get_nve_detail.start()
def tearDown(self):
super(TestOnyxVxlanModule, self).tearDown()
self.mock_get_vxlan_config.stop()
self.mock_load_config.stop()
self.mock_get_nve_detail.stop()
def load_fixtures(self, commands=None, transport='cli'):
interfaces_nve_config_file = 'onyx_show_interfaces_nve.cfg'
interfaces_nve_detail_config_file = 'onyx_show_interfaces_nve_detail.cfg'
self.get_nve_detail.return_value = None
interfaces_nve_detail_data = load_fixture(interfaces_nve_detail_config_file)
interfaces_nv_data = load_fixture(interfaces_nve_config_file)
self.get_nve_detail.return_value = interfaces_nve_detail_data
if self.arp_suppression is False:
interfaces_nve_detail_data[0]["10"][0]["Neigh Suppression"] = "Disable"
interfaces_nve_detail_data[0]["6"][0]["Neigh Suppression"] = "Disable"
self.get_nve_detail.return_value = interfaces_nve_detail_data
self.get_vxlan_config.return_value = interfaces_nv_data
self.load_config.return_value = None
def test_configure_vxlan_no_change(self):
set_module_args(dict(nve_id=1, loopback_id=1, bgp=True, mlag_tunnel_ip='192.10.10.1',
vni_vlan_list=[dict(vlan_id=10, vni_id=10010), dict(vlan_id=6, vni_id=10060)],
arp_suppression=True))
self.execute_module(changed=False)
def test_configure_vxlan_with_change(self):
set_module_args(dict(nve_id=2, loopback_id=1, bgp=True, mlag_tunnel_ip='192.10.10.1',
vni_vlan_list=[dict(vlan_id=10, vni_id=10010), dict(vlan_id=6, vni_id=10060)],
arp_suppression=True))
commands = [
"no interface nve 1", "interface nve 2", "exit",
"interface nve 2 vxlan source interface loopback 1 ",
"interface nve 2 nve controller bgp", "interface nve 2 vxlan mlag-tunnel-ip 192.10.10.1",
"interface nve 2 nve neigh-suppression", "interface nve 2 nve vni 10010 vlan 10",
"interface vlan 10", "exit", "interface nve 2 nve vni 10060 vlan 6", "interface vlan 6", "exit"
]
self.execute_module(changed=True, commands=commands)
def test_loopback_id_with_change(self):
set_module_args(dict(nve_id=1, loopback_id=2, bgp=True, mlag_tunnel_ip='192.10.10.1',
vni_vlan_list=[dict(vlan_id=10, vni_id=10010), dict(vlan_id=6, vni_id=10060)],
arp_suppression=True))
commands = ["interface nve 1 vxlan source interface loopback 2 "]
self.execute_module(changed=True, commands=commands)
def test_mlag_tunnel_ip_with_change(self):
set_module_args(dict(nve_id=1, loopback_id=1, bgp=True, mlag_tunnel_ip='192.10.10.10',
vni_vlan_list=[dict(vlan_id=10, vni_id=10010), dict(vlan_id=6, vni_id=10060)],
arp_suppression=True))
commands = ["interface nve 1 vxlan mlag-tunnel-ip 192.10.10.10"]
self.execute_module(changed=True, commands=commands)
def test_vni_vlan_list_with_change(self):
set_module_args(dict(nve_id=1, loopback_id=1, bgp=True, mlag_tunnel_ip='192.10.10.1',
vni_vlan_list=[dict(vlan_id=11, vni_id=10011), dict(vlan_id=7, vni_id=10061)],
arp_suppression=False))
commands = ["interface nve 1 nve vni 10011 vlan 11", "interface nve 1 nve vni 10061 vlan 7"]
self.execute_module(changed=True, commands=commands)
def test_arp_suppression_with_change(self):
self.arp_suppression = False
set_module_args(dict(nve_id=1, loopback_id=1, bgp=True, mlag_tunnel_ip='192.10.10.1',
vni_vlan_list=[dict(vlan_id=10, vni_id=10010), dict(vlan_id=6, vni_id=10060)],
arp_suppression=True))
commands = ["interface vlan 10", "exit", "interface vlan 6", "exit"]
self.execute_module(changed=True, commands=commands)
|
{
"content_hash": "7f0259c00c4864af4036390c8c094ec5",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 107,
"avg_line_length": 52.291666666666664,
"alnum_prop": 0.6256972111553785,
"repo_name": "thaim/ansible",
"id": "5800d0fda7198f6adc4a42f9e7918239b1a29b76",
"size": "5176",
"binary": false,
"copies": "35",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/onyx/test_onyx_vxlan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
"""
Gole topology parser.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Jeroen van der Ham <vdham@uva.nl>
Copyright: NORDUnet (2011-2012)
"""
import re
import StringIO
from xml.etree import ElementTree as ET
from twisted.python import log
from opennsa import nsa, error
from opennsa.topology import topology
LOG_SYSTEM = 'opennsa.gole'
# Constants for parsing GOLE topology format
RDF_SCHEMA_NS = 'http://www.w3.org/2000/01/rdf-schema#'
RDF_SYNTAX_NS = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
OWL_NS = 'http://www.w3.org/2002/07/owl#'
DTOX_NS = 'http://www.glif.is/working-groups/tech/dtox#'
RDF_DESCRIPTION = '{%s}Description'% RDF_SYNTAX_NS
RDF_ABOUT = '{%s}about' % RDF_SYNTAX_NS
RDF_TYPE = '{%s}type' % RDF_SYNTAX_NS
RDF_RESOURCE = '{%s}resource' % RDF_SYNTAX_NS
RDF_COMMENT = '{%s}comment' % RDF_SCHEMA_NS
RDF_LABEL = '{%s}label' % RDF_SCHEMA_NS
NAMED_INDIVIDUAL = '{%s}NamedIndividual' % OWL_NS
GLIF_HAS_STP = '{%s}hasSTP' % DTOX_NS
GLIF_CONNECTED_TO = '{%s}connectedTo' % DTOX_NS
GLIF_MAPS_TO = '{%s}mapsTo' % DTOX_NS
GLIF_MAX_CAPACITY = '{%s}maxCapacity' % DTOX_NS
GLIF_AVAILABLE_CAPACITY = '{%s}availableCapacity' % DTOX_NS
GLIF_MANAGING = '{%s}managing' % DTOX_NS
GLIF_MANAGED_BY = '{%s}managedBy' % DTOX_NS
GLIF_LOCATED_AT = '{%s}locatedAt' % DTOX_NS
GLIF_LATITUDE = '{%s}lat' % DTOX_NS
GLIF_LONGITUDE = '{%s}long' % DTOX_NS
GLIF_ADMIN_CONTACT = '{%s}adminContact' % DTOX_NS
GLIF_PROVIDER_ENDPOINT = '{%s}csProviderEndpoint' % DTOX_NS
GLIF_NETWORK = DTOX_NS + 'NSNetwork'
URN_NSNETWORK_PREFIX = 'urn:ogf:network:nsnetwork:'
URN_NSA_PREFIX = 'urn:ogf:network:nsa:'
URN_STP_PREFIX = 'urn:ogf:network:stp:'
URN_NRM_PORT = 'urn:ogf:network:nrmport:'
NRM_PORT_TYPE = 'http://nordu.net/ns/2012/opennsa#InternalPort'
STP_PREFIX = 'stp:'
LINK_PREFIX = 'link:'
def _stripPrefix(text, prefix):
assert text.startswith(prefix), 'Text did not start with specified prefix (text: %s, prefix: %s)' % (text, prefix)
ul = len(prefix)
return text[ul:]
def _createNRMPort(backend, local_port):
return URN_NRM_PORT + (backend or '') + ':' + local_port
def _parseOWLTopology(topology_source):
if isinstance(topology_source, file) or isinstance(topology_source, StringIO.StringIO):
doc = ET.parse(topology_source)
elif isinstance(topology_source, str):
doc = ET.fromstring(topology_source)
else:
raise error.TopologyError('Invalid topology source')
triples = set()
root = doc.getroot()
for e in root.getchildren():
if e.tag in (NAMED_INDIVIDUAL, RDF_DESCRIPTION):
resource = e.attrib[RDF_ABOUT]
for el in e.getchildren():
if el.tag == RDF_TYPE: triples.add( (resource, RDF_TYPE, el.attrib.values()[0]) )
elif el.tag == RDF_LABEL: triples.add( (resource, RDF_LABEL, el.text) )
elif el.tag == GLIF_CONNECTED_TO: triples.add( (resource, GLIF_CONNECTED_TO, el.attrib.values()[0]) )
elif el.tag == GLIF_HAS_STP: triples.add( (resource, GLIF_HAS_STP, el.attrib.values()[0]) )
elif el.tag == GLIF_MAPS_TO: triples.add( (resource, GLIF_MAPS_TO, el.text or el.attrib.values()[0]) )
elif el.tag == GLIF_PROVIDER_ENDPOINT: triples.add( (resource, GLIF_PROVIDER_ENDPOINT, el.text) )
elif el.tag == GLIF_MANAGED_BY: triples.add( (resource, GLIF_MANAGED_BY, el.attrib.values()[0]) )
# We don't care about these
elif el.tag in (RDF_COMMENT, GLIF_MANAGING, GLIF_ADMIN_CONTACT, GLIF_LOCATED_AT, GLIF_LATITUDE, GLIF_LONGITUDE):
pass
else:
print 'Unknow tag type in topology: %s' % el.tag
return triples
def _parseNRMMapping(nrm_mapping_source):
# regular expression for matching nrm mapping lines
# basically we allow two type of lines (with and without backend identifier), i.e.:
# stp:stp_name "nrm_port"
# stp:stp_name backend "nrm_port"
STP_MAP_RX = re.compile('''(.+?)\s+(\w+?)?\s*"(.+)"''')
# link:link_name backend1 "nrm_port" - backend2 "nrm_port"
LINK_RX = re.compile('''(.+?)\s+(\w+?)\s+"(.+)"\s+-\s+(\w+?)\s+"(.+)"''')
def parseSTP(entry):
m = STP_MAP_RX.match(line)
if not m:
log.msg('Error parsing stp map %s in NRM description.' % entry, system=LOG_SYSTEM)
return
stp, backend, local_port = m.groups()
if stp.startswith(STP_PREFIX):
stp = 'urn:ogf:network:' + stp
nrm_port = _createNRMPort(backend, local_port)
triples = [ (stp, GLIF_MAPS_TO, nrm_port ),
(nrm_port, RDF_TYPE, NRM_PORT_TYPE) ]
return triples
def parseLink(entry):
m = LINK_RX.match(entry)
if not m:
log.msg('Error parsing link entry %s in NRM description.' % entry, system=LOG_SYSTEM)
return
_, backend_1, nrm_port_1, backend_2, nrm_port_2 = m.groups()
nrm_port_1 = _createNRMPort(backend_1, nrm_port_1)
nrm_port_2 = _createNRMPort(backend_2, nrm_port_2)
triples = [ (nrm_port_1, GLIF_CONNECTED_TO, nrm_port_2),
(nrm_port_2, GLIF_CONNECTED_TO, nrm_port_1),
(nrm_port_1, RDF_TYPE, NRM_PORT_TYPE),
(nrm_port_2, RDF_TYPE, NRM_PORT_TYPE) ]
return triples
if isinstance(nrm_mapping_source, file) or isinstance(nrm_mapping_source, StringIO.StringIO):
source = nrm_mapping_source
elif isinstance(nrm_mapping_source, str):
from StringIO import StringIO
source = StringIO(nrm_mapping_source)
else:
raise error.TopologyError('Invalid NRM Mapping Source')
triples = set()
for line in source:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith(URN_STP_PREFIX) or line.startswith(STP_PREFIX):
stp_triples = parseSTP(line)
if stp_triples:
triples.update(stp_triples)
elif line.startswith(LINK_PREFIX):
link_triples = parseLink(line)
if link_triples:
triples.update(link_triples)
else:
# we don't want to have invalid topology descriptions so just raise error
raise error.TopologyError('Invalid entry in NRM file: %s' % line)
return triples
def buildTopology(triples):
getSubject = lambda pred, obj : [ t[0] for t in triples if t[1] == pred and t[2] == obj ]
getObjects = lambda subj, pred : [ t[2] for t in triples if t[0] == subj and t[1] == pred ]
topo = topology.Topology()
networks = getSubject(RDF_TYPE, GLIF_NETWORK)
for network in networks:
nsas = getObjects(network, GLIF_MANAGED_BY)
endpoints = getObjects(nsas[0], GLIF_PROVIDER_ENDPOINT)
t_network_name = _stripPrefix(network, URN_NSNETWORK_PREFIX)
t_nsa_name = _stripPrefix(nsas[0], URN_NSA_PREFIX)
t_nsa_endpoint = endpoints[0]
t_network_nsa = nsa.NetworkServiceAgent(t_nsa_name, t_nsa_endpoint)
t_network = nsa.Network(t_network_name, t_network_nsa)
stps = getObjects(network, GLIF_HAS_STP)
for stp in stps:
t_stp_name = _stripPrefix(stp, URN_STP_PREFIX).split(':')[-1]
maps_to = getObjects(stp, GLIF_MAPS_TO)
t_maps_to = _stripPrefix(maps_to[0], URN_NRM_PORT) if maps_to else None
# this is for default/single backend to work, remove initial colon (backend seperator)
if t_maps_to is not None and t_maps_to.startswith(':'):
t_maps_to = t_maps_to[1:]
dest_stps = getObjects(stp, GLIF_CONNECTED_TO)
if dest_stps:
dest_network, dest_port = _stripPrefix(dest_stps[0], URN_STP_PREFIX).split(':',1)
t_dest_stp = nsa.STP(dest_network, dest_port)
else:
t_dest_stp = None
ep = nsa.NetworkEndpoint(t_network_name, t_stp_name, t_maps_to, t_dest_stp, None, None)
t_network.addEndpoint(ep)
topo.addNetwork(t_network)
return topo
def buildInternalTopology(triples):
getSubject = lambda pred, obj : [ t[0] for t in triples if t[1] == pred and t[2] == obj ]
getObjects = lambda subj, pred : [ t[2] for t in triples if t[0] == subj and t[1] == pred ]
node_ports = {}
urn_internal_ports = getSubject(RDF_TYPE, NRM_PORT_TYPE)
for uip in sorted(urn_internal_ports):
sp = _stripPrefix(uip, URN_NRM_PORT)
node, port = sp.split(':',1)
node_ports.setdefault(node, []).append(port)
internal_topology = topology.Topology()
for node, ports in node_ports.items():
nw = nsa.Network(node, None)
for p in ports:
dest_ports = getObjects(_createNRMPort(node, p), GLIF_CONNECTED_TO)
if dest_ports:
dest_port = dest_ports[0] if dest_ports else None
dsp = _stripPrefix(dest_port, URN_NRM_PORT)
d_node, d_port = dsp.split(':',1)
dest_stp = nsa.STP(d_node, d_port)
else:
dest_stp = None
ep = nsa.NetworkEndpoint(node, p, dest_stp=dest_stp)
nw.addEndpoint(ep)
internal_topology.addNetwork(nw)
return internal_topology
def parseTopology(topology_sources, nrm_mapping_source=None):
triples = set()
for ts in topology_sources:
topo_triples = _parseOWLTopology(ts)
triples = triples.union(topo_triples)
if nrm_mapping_source:
topo_triples = _parseNRMMapping(nrm_mapping_source)
triples = triples.union(topo_triples)
topo = buildTopology(triples)
int_topo = buildInternalTopology(triples)
return topo, int_topo
|
{
"content_hash": "d96bb10a6f5c9ed1c390f8220acdb05c",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 135,
"avg_line_length": 35.545138888888886,
"alnum_prop": 0.583667089967764,
"repo_name": "jeroenh/OpenNSA",
"id": "021367f1844bc9f05a1c17ee69c4b08e43cddafd",
"size": "10237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opennsa/topology/gole.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "637"
},
{
"name": "Python",
"bytes": "263030"
},
{
"name": "Shell",
"bytes": "1581"
}
],
"symlink_target": ""
}
|
from twisted.internet.defer import Deferred
from axiom.test.historic.stubloader import StubbedTest
from axiom.test.historic.stub_processor1to2 import DummyProcessor
class ProcessorUpgradeTest(StubbedTest):
def setUp(self):
# Ick, we need to catch the run event of DummyProcessor, and I can't
# think of another way to do it.
self.dummyRun = DummyProcessor.run
self.calledBack = Deferred()
def dummyRun(calledOn):
self.calledBack.callback(calledOn)
DummyProcessor.run = dummyRun
return StubbedTest.setUp(self)
def tearDown(self):
# Okay this is a pretty irrelevant method on a pretty irrelevant class,
# but we'll fix it anyway.
DummyProcessor.run = self.dummyRun
return StubbedTest.tearDown(self)
def test_pollingRemoval(self):
"""
Test that processors lose their idleInterval but none of the rest of
their stuff, and that they get scheduled by the upgrader so they can
figure out what state they should be in.
"""
proc = self.store.findUnique(DummyProcessor)
self.assertEqual(proc.busyInterval, 100)
self.assertNotEqual(proc.scheduled, None)
def assertion(result):
self.assertEqual(result, proc)
return self.calledBack.addCallback(assertion)
|
{
"content_hash": "282073cca1fbc802663884d496dcde86",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 34.76923076923077,
"alnum_prop": 0.6806784660766961,
"repo_name": "twisted/axiom",
"id": "d9affc35f44c505d834b26cbc4062a1bc1b99675",
"size": "1357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "axiom/test/historic/test_processor1to2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "807997"
}
],
"symlink_target": ""
}
|
from django.db import models
class Settings(models.Model):
address = models.TextField(verbose_name='Server address')
port = models.IntegerField(verbose_name='Server port')
|
{
"content_hash": "a4c0249b69cb969c41cbb8dea75a4127",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 61,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.7527472527472527,
"repo_name": "chyla/slas",
"id": "28d3f5c7d504b914e9736febcab62d889578bab8",
"size": "207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/slas-web/general/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2019"
},
{
"name": "Awk",
"bytes": "968"
},
{
"name": "C",
"bytes": "3991659"
},
{
"name": "C++",
"bytes": "930942"
},
{
"name": "CSS",
"bytes": "6991"
},
{
"name": "Groff",
"bytes": "344055"
},
{
"name": "HTML",
"bytes": "1257796"
},
{
"name": "JavaScript",
"bytes": "17331"
},
{
"name": "M4",
"bytes": "444529"
},
{
"name": "Makefile",
"bytes": "146580"
},
{
"name": "Perl",
"bytes": "146942"
},
{
"name": "Python",
"bytes": "55962"
},
{
"name": "Shell",
"bytes": "144697"
},
{
"name": "TeX",
"bytes": "325892"
},
{
"name": "Yacc",
"bytes": "181259"
}
],
"symlink_target": ""
}
|
from jupyter_client.client import *
|
{
"content_hash": "a001d62d00fbeb0f1ef4e77e5d8c5e3d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.8055555555555556,
"repo_name": "bdh1011/wau",
"id": "a98690b74cc408238aa6ceb4e7e09aba7c999fa3",
"size": "36",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/IPython/kernel/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
}
|
"""Representation for the MongoDB internal MaxKey type.
"""
from typing import Any
class MaxKey(object):
"""MongoDB internal MaxKey type."""
__slots__ = ()
_type_marker = 127
def __getstate__(self) -> Any:
return {}
def __setstate__(self, state: Any) -> None:
pass
def __eq__(self, other: Any) -> bool:
return isinstance(other, MaxKey)
def __hash__(self) -> int:
return hash(self._type_marker)
def __ne__(self, other: Any) -> bool:
return not self == other
def __le__(self, other: Any) -> bool:
return isinstance(other, MaxKey)
def __lt__(self, dummy: Any) -> bool:
return False
def __ge__(self, dummy: Any) -> bool:
return True
def __gt__(self, other: Any) -> bool:
return not isinstance(other, MaxKey)
def __repr__(self):
return "MaxKey()"
|
{
"content_hash": "ef8e564d8efef1b6eb2124e85d8b3bcb",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 55,
"avg_line_length": 21.658536585365855,
"alnum_prop": 0.5506756756756757,
"repo_name": "mongodb/mongo-python-driver",
"id": "b4f38d072eb509459464c016de74c9ac2ce0e89d",
"size": "1470",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bson/max_key.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "183641"
},
{
"name": "Python",
"bytes": "2983153"
},
{
"name": "Shell",
"bytes": "30026"
}
],
"symlink_target": ""
}
|
import sys, cbor, json
from subprocess import Popen, PIPE, STDOUT
## emulate 'jq' argument pattern
## no options for now
fltr=sys.argv[1]
try:
input=sys.argv[2]
except IndexError:
input=sys.stdin
with open(input, 'rb') as f:
data = json.dumps(cbor.load(f))
p = Popen(['jq', fltr], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
# jq_stdout = p.communicate(input=b'one\ntwo\nthree\nfour\nfive\nsix\n')[0]
input = data.decode('utf-8')
# all = p.communicate(input=b'one\ntwo\nthree\nfour\nfive\nsix\n')
all = p.communicate(input=input)
jq_stdout = all[0]
sys.stdout.write(jq_stdout.decode())
|
{
"content_hash": "0024996bf1d1c4348ef99a92113019ca",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6629746835443038,
"repo_name": "usc-isi-i2/dig-tools",
"id": "a31c5f6d04501435598ce77bab44a59df509399f",
"size": "651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/dig/cq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Python",
"bytes": "14536"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
}
|
from JumpScale.clients.racktivity.energyswitch.common import convert
from JumpScale.clients.racktivity.energyswitch.common.GUIDTable import Value
from JumpScale.clients.racktivity.energyswitch.modelfactory.models.common.Master_0_1_2_16 import Model as Master
class Model(Master):
def __init__(self, parent):
super(Model, self).__init__(parent)
self._guidTable.update({
# ModuleManagement
40026: Value(u"type='TYPE_COMMAND'\nsize=1\nlength=1\nunit=''\nscale=0"),
})
# Attribute 'SNMPTrapRecvIP' GUID 10020 Data type TYPE_IP
# SNMP trap server IP-address
def getSNMPTrapRecvIP(self, portnumber): # pylint: disable=W0222
guid = 10020
moduleID = 'M1'
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
def setSNMPTrapRecvIP(self, value, portnumber): # pylint: disable=W0222
guid = 10020
moduleID = 'M1'
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
# Attribute 'SNMPTrapRecvPort' GUID 10021 Data type TYPE_UNSIGNED_NUMBER
def getSNMPTrapRecvPort(self, portnumber=1):
guid = 10021
moduleID = 'M1'
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
def setSNMPTrapRecvPort(self, value, portnumber=1):
guid = 10021
moduleID = 'M1'
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
def setSNMPCommunityRead(self, value):
guid = 10022
moduleID = 'M1'
portnumber = 0
valDef = self._guidTable[guid]
data = self._parent.client.setAttribute(
moduleID, guid, convert.value2bin(value, valDef), portnumber)
return self._parent.getObjectFromData(data, valDef, setter=True)
|
{
"content_hash": "271df091632ba7de0e99c0383b5ec43c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 112,
"avg_line_length": 40.779661016949156,
"alnum_prop": 0.6579384871155445,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "8626d9cbe85850e094c91cc4d6fde86f2f46540a",
"size": "2406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/clients/racktivity/energyswitch/modelfactory/models/common/Master_0_1_2_21.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import # to enable import io from stdlib
from collections import defaultdict, deque
import errno
from functools import wraps, partial
from heapq import heappush, heappop
import io
import logging
import six
from six.moves import range
import socket
import struct
import sys
from threading import Thread, Event, RLock
import time
try:
import ssl
except ImportError:
ssl = None # NOQA
if 'gevent.monkey' in sys.modules:
from gevent.queue import Queue, Empty
else:
from six.moves.queue import Queue, Empty # noqa
from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut
from cassandra.marshal import int32_pack
from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage,
StartupMessage, ErrorMessage, CredentialsMessage,
QueryMessage, ResultMessage, ProtocolHandler,
InvalidRequestException, SupportedMessage,
AuthResponseMessage, AuthChallengeMessage,
AuthSuccessMessage, ProtocolException,
MAX_SUPPORTED_VERSION, RegisterMessage)
from cassandra.util import OrderedDict
log = logging.getLogger(__name__)
# We use an ordered dictionary and specifically add lz4 before
# snappy so that lz4 will be preferred. Changing the order of this
# will change the compression preferences for the driver.
locally_supported_compressions = OrderedDict()
try:
import lz4
except ImportError:
pass
else:
# Cassandra writes the uncompressed message length in big endian order,
# but the lz4 lib requires little endian order, so we wrap these
# functions to handle that
def lz4_compress(byts):
# write length in big-endian instead of little-endian
return int32_pack(len(byts)) + lz4.compress(byts)[4:]
def lz4_decompress(byts):
# flip from big-endian to little-endian
return lz4.decompress(byts[3::-1] + byts[4:])
locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress)
try:
import snappy
except ImportError:
pass
else:
# work around apparently buggy snappy decompress
def decompress(byts):
if byts == '\x00':
return ''
return snappy.decompress(byts)
locally_supported_compressions['snappy'] = (snappy.compress, decompress)
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
frame_header_v1_v2 = struct.Struct('>BbBi')
frame_header_v3 = struct.Struct('>BhBi')
class _Frame(object):
def __init__(self, version, flags, stream, opcode, body_offset, end_pos):
self.version = version
self.flags = flags
self.stream = stream
self.opcode = opcode
self.body_offset = body_offset
self.end_pos = end_pos
def __eq__(self, other): # facilitates testing
if isinstance(other, _Frame):
return (self.version == other.version and
self.flags == other.flags and
self.stream == other.stream and
self.opcode == other.opcode and
self.body_offset == other.body_offset and
self.end_pos == other.end_pos)
return NotImplemented
def __str__(self):
return "ver({0}); flags({1:04b}); stream({2}); op({3}); offset({4}); len({5})".format(self.version, self.flags, self.stream, self.opcode, self.body_offset, self.end_pos - self.body_offset)
NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK)
class ConnectionException(Exception):
"""
An unrecoverable error was hit when attempting to use a connection,
or the connection was already closed or defunct.
"""
def __init__(self, message, host=None):
Exception.__init__(self, message)
self.host = host
class ConnectionShutdown(ConnectionException):
"""
Raised when a connection has been marked as defunct or has been closed.
"""
pass
class ProtocolVersionUnsupported(ConnectionException):
"""
Server rejected startup message due to unsupported protocol version
"""
def __init__(self, host, startup_version):
super(ProtocolVersionUnsupported, self).__init__("Unsupported protocol version on %s: %d",
(host, startup_version))
self.startup_version = startup_version
class ConnectionBusy(Exception):
"""
An attempt was made to send a message through a :class:`.Connection` that
was already at the max number of in-flight operations.
"""
pass
class ProtocolError(Exception):
"""
Communication did not match the protocol that this driver expects.
"""
pass
def defunct_on_error(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as exc:
self.defunct(exc)
return wrapper
DEFAULT_CQL_VERSION = '3.0.0'
if six.PY3:
def int_from_buf_item(i):
return i
else:
int_from_buf_item = ord
class Connection(object):
CALLBACK_ERR_THREAD_THRESHOLD = 100
in_buffer_size = 4096
out_buffer_size = 4096
cql_version = None
protocol_version = MAX_SUPPORTED_VERSION
keyspace = None
compression = True
compressor = None
decompressor = None
ssl_options = None
last_error = None
# The current number of operations that are in flight. More precisely,
# the number of request IDs that are currently in use.
in_flight = 0
# A set of available request IDs. When using the v3 protocol or higher,
# this will not initially include all request IDs in order to save memory,
# but the set will grow if it is exhausted.
request_ids = None
# Tracks the highest used request ID in order to help with growing the
# request_ids set
highest_request_id = 0
is_defunct = False
is_closed = False
lock = None
user_type_map = None
msg_received = False
is_unsupported_proto_version = False
is_control_connection = False
signaled_error = False # used for flagging at the pool level
_iobuf = None
_current_frame = None
_socket = None
_socket_impl = socket
_ssl_impl = ssl
def __init__(self, host='127.0.0.1', port=9042, authenticator=None,
ssl_options=None, sockopts=None, compression=True,
cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False,
user_type_map=None, connect_timeout=None):
self.host = host
self.port = port
self.authenticator = authenticator
self.ssl_options = ssl_options
self.sockopts = sockopts
self.compression = compression
self.cql_version = cql_version
self.protocol_version = protocol_version
self.is_control_connection = is_control_connection
self.user_type_map = user_type_map
self.connect_timeout = connect_timeout
self._push_watchers = defaultdict(set)
self._requests = {}
self._iobuf = io.BytesIO()
if protocol_version >= 3:
self.max_request_id = (2 ** 15) - 1
# Don't fill the deque with 2**15 items right away. Start with 300 and add
# more if needed.
self.request_ids = deque(range(300))
self.highest_request_id = 299
else:
self.max_request_id = (2 ** 7) - 1
self.request_ids = deque(range(self.max_request_id + 1))
self.highest_request_id = self.max_request_id
self.lock = RLock()
self.connected_event = Event()
@classmethod
def initialize_reactor(cls):
"""
Called once by Cluster.connect(). This should be used by implementations
to set up any resources that will be shared across connections.
"""
pass
@classmethod
def handle_fork(cls):
"""
Called after a forking. This should cleanup any remaining reactor state
from the parent process.
"""
pass
@classmethod
def create_timer(cls, timeout, callback):
raise NotImplementedError()
@classmethod
def factory(cls, host, timeout, *args, **kwargs):
"""
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
"""
start = time.time()
kwargs['connect_timeout'] = timeout
conn = cls(host, *args, **kwargs)
elapsed = time.time() - start
conn.connected_event.wait(timeout - elapsed)
if conn.last_error:
if conn.is_unsupported_proto_version:
raise ProtocolVersionUnsupported(host, conn.protocol_version)
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout)
else:
return conn
def _connect_socket(self):
sockerr = None
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
for (af, socktype, proto, canonname, sockaddr) in addresses:
try:
self._socket = self._socket_impl.socket(af, socktype, proto)
if self.ssl_options:
if not self._ssl_impl:
raise Exception("This version of Python was not compiled with SSL support")
self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options)
self._socket.settimeout(self.connect_timeout)
self._socket.connect(sockaddr)
sockerr = None
break
except socket.error as err:
if self._socket:
self._socket.close()
self._socket = None
sockerr = err
if sockerr:
raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror or sockerr))
if self.sockopts:
for args in self.sockopts:
self._socket.setsockopt(*args)
def close(self):
raise NotImplementedError()
def defunct(self, exc):
with self.lock:
if self.is_defunct or self.is_closed:
return
self.is_defunct = True
log.debug("Defuncting connection (%s) to %s:",
id(self), self.host, exc_info=exc)
self.last_error = exc
self.close()
self.error_all_requests(exc)
self.connected_event.set()
return exc
def error_all_requests(self, exc):
with self.lock:
requests = self._requests
self._requests = {}
if not requests:
return
new_exc = ConnectionShutdown(str(exc))
def try_callback(cb):
try:
cb(new_exc)
except Exception:
log.warning("Ignoring unhandled exception while erroring requests for a "
"failed connection (%s) to host %s:",
id(self), self.host, exc_info=True)
# run first callback from this thread to ensure pool state before leaving
cb, _ = requests.popitem()[1]
try_callback(cb)
if not requests:
return
# additional requests are optionally errored from a separate thread
# The default callback and retry logic is fairly expensive -- we don't
# want to tie up the event thread when there are many requests
def err_all_callbacks():
for cb, _ in requests.values():
try_callback(cb)
if len(requests) < Connection.CALLBACK_ERR_THREAD_THRESHOLD:
err_all_callbacks()
else:
# daemon thread here because we want to stay decoupled from the cluster TPE
# TODO: would it make sense to just have a driver-global TPE?
t = Thread(target=err_all_callbacks)
t.daemon = True
t.start()
def get_request_id(self):
"""
This must be called while self.lock is held.
"""
try:
return self.request_ids.popleft()
except IndexError:
self.highest_request_id += 1
# in_flight checks should guarantee this
assert self.highest_request_id <= self.max_request_id
return self.highest_request_id
def handle_pushed(self, response):
log.debug("Message pushed from server: %r", response)
for cb in self._push_watchers.get(response.event_type, []):
try:
cb(response.event_args)
except Exception:
log.exception("Pushed event handler errored, ignoring:")
def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message):
if self.is_defunct:
raise ConnectionShutdown("Connection to %s is defunct" % self.host)
elif self.is_closed:
raise ConnectionShutdown("Connection to %s is closed" % self.host)
# queue the decoder function with the request
# this allows us to inject custom functions per request to encode, decode messages
self._requests[request_id] = (cb, decoder)
self.push(encoder(msg, request_id, self.protocol_version, compressor=self.compressor))
return request_id
def wait_for_response(self, msg, timeout=None):
return self.wait_for_responses(msg, timeout=timeout)[0]
def wait_for_responses(self, *msgs, **kwargs):
"""
Returns a list of (success, response) tuples. If success
is False, response will be an Exception. Otherwise, response
will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised.
"""
if self.is_closed or self.is_defunct:
raise ConnectionShutdown("Connection %s is already closed" % (self, ))
timeout = kwargs.get('timeout')
fail_on_error = kwargs.get('fail_on_error', True)
waiter = ResponseWaiter(self, len(msgs), fail_on_error)
# busy wait for sufficient space on the connection
messages_sent = 0
while True:
needed = len(msgs) - messages_sent
with self.lock:
available = min(needed, self.max_request_id - self.in_flight)
request_ids = [self.get_request_id() for _ in range(available)]
self.in_flight += available
for i, request_id in enumerate(request_ids):
self.send_msg(msgs[messages_sent + i],
request_id,
partial(waiter.got_response, index=messages_sent + i))
messages_sent += available
if messages_sent == len(msgs):
break
else:
if timeout is not None:
timeout -= 0.01
if timeout <= 0.0:
raise OperationTimedOut()
time.sleep(0.01)
try:
return waiter.deliver(timeout)
except OperationTimedOut:
raise
except Exception as exc:
self.defunct(exc)
raise
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
"""
Register multiple callback/event type pairs, expressed as a dict.
"""
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()),
timeout=register_timeout)
def control_conn_disposed(self):
self.is_control_connection = False
self._push_watchers = {}
@defunct_on_error
def _read_frame_header(self):
buf = self._iobuf.getvalue()
pos = len(buf)
if pos:
version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK
if version > MAX_SUPPORTED_VERSION:
raise ProtocolError("This version of the driver does not support protocol version %d" % version)
frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2
# this frame header struct is everything after the version byte
header_size = frame_header.size + 1
if pos >= header_size:
flags, stream, op, body_len = frame_header.unpack_from(buf, 1)
if body_len < 0:
raise ProtocolError("Received negative body length: %r" % body_len)
self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size)
return pos
def _reset_frame(self):
self._iobuf = io.BytesIO(self._iobuf.read())
self._iobuf.seek(0, 2) # io.SEEK_END == 2 (constant not present in 2.6)
self._current_frame = None
def process_io_buffer(self):
while True:
if not self._current_frame:
pos = self._read_frame_header()
else:
pos = self._iobuf.tell()
if not self._current_frame or pos < self._current_frame.end_pos:
# we don't have a complete header yet or we
# already saw a header, but we don't have a
# complete message yet
return
else:
frame = self._current_frame
self._iobuf.seek(frame.body_offset)
msg = self._iobuf.read(frame.end_pos - frame.body_offset)
self.process_msg(frame, msg)
self._reset_frame()
@defunct_on_error
def process_msg(self, header, body):
stream_id = header.stream
if stream_id < 0:
callback = None
decoder = ProtocolHandler.decode_message
else:
callback, decoder = self._requests.pop(stream_id, None)
with self.lock:
self.request_ids.append(stream_id)
self.msg_received = True
try:
response = decoder(header.version, self.user_type_map, stream_id,
header.flags, header.opcode, body, self.decompressor)
except Exception as exc:
log.exception("Error decoding response from Cassandra. "
"%s; buffer: %r", header, self._iobuf.getvalue())
if callback is not None:
callback(exc)
self.defunct(exc)
return
try:
if stream_id >= 0:
if isinstance(response, ProtocolException):
if 'unsupported protocol version' in response.message:
self.is_unsupported_proto_version = True
log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg())
self.defunct(response)
if callback is not None:
callback(response)
else:
self.handle_pushed(response)
except Exception:
log.exception("Callback handler errored, ignoring:")
@defunct_on_error
def _send_options_message(self):
if self.cql_version is None and (not self.compression or not locally_supported_compressions):
log.debug("Not sending options message for new connection(%s) to %s "
"because compression is disabled and a cql version was not "
"specified", id(self), self.host)
self._compressor = None
self.cql_version = DEFAULT_CQL_VERSION
self._send_startup_message()
else:
log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.host)
self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response)
@defunct_on_error
def _handle_options_response(self, options_response):
if self.is_defunct:
return
if not isinstance(options_response, SupportedMessage):
if isinstance(options_response, ConnectionException):
raise options_response
else:
log.error("Did not get expected SupportedMessage response; "
"instead, got: %s", options_response)
raise ConnectionException("Did not get expected SupportedMessage "
"response; instead, got: %s"
% (options_response,))
log.debug("Received options response on new connection (%s) from %s",
id(self), self.host)
supported_cql_versions = options_response.cql_versions
remote_supported_compressions = options_response.options['COMPRESSION']
if self.cql_version:
if self.cql_version not in supported_cql_versions:
raise ProtocolError(
"cql_version %r is not supported by remote (w/ native "
"protocol). Supported versions: %r"
% (self.cql_version, supported_cql_versions))
else:
self.cql_version = supported_cql_versions[0]
self._compressor = None
compression_type = None
if self.compression:
overlap = (set(locally_supported_compressions.keys()) &
set(remote_supported_compressions))
if len(overlap) == 0:
log.debug("No available compression types supported on both ends."
" locally supported: %r. remotely supported: %r",
locally_supported_compressions.keys(),
remote_supported_compressions)
else:
compression_type = None
if isinstance(self.compression, six.string_types):
# the user picked a specific compression type ('snappy' or 'lz4')
if self.compression not in remote_supported_compressions:
raise ProtocolError(
"The requested compression type (%s) is not supported by the Cassandra server at %s"
% (self.compression, self.host))
compression_type = self.compression
else:
# our locally supported compressions are ordered to prefer
# lz4, if available
for k in locally_supported_compressions.keys():
if k in overlap:
compression_type = k
break
# set the decompressor here, but set the compressor only after
# a successful Ready message
self._compressor, self.decompressor = \
locally_supported_compressions[compression_type]
self._send_startup_message(compression_type)
@defunct_on_error
def _send_startup_message(self, compression=None):
log.debug("Sending StartupMessage on %s", self)
opts = {}
if compression:
opts['COMPRESSION'] = compression
sm = StartupMessage(cqlversion=self.cql_version, options=opts)
self.send_msg(sm, self.get_request_id(), cb=self._handle_startup_response)
log.debug("Sent StartupMessage on %s", self)
@defunct_on_error
def _handle_startup_response(self, startup_response, did_authenticate=False):
if self.is_defunct:
return
if isinstance(startup_response, ReadyMessage):
log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.host)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(startup_response, AuthenticateMessage):
log.debug("Got AuthenticateMessage on new connection (%s) from %s: %s",
id(self), self.host, startup_response.authenticator)
if self.authenticator is None:
raise AuthenticationFailed('Remote end requires authentication.')
self.authenticator_class = startup_response.authenticator
if isinstance(self.authenticator, dict):
log.debug("Sending credentials-based auth response on %s", self)
cm = CredentialsMessage(creds=self.authenticator)
callback = partial(self._handle_startup_response, did_authenticate=True)
self.send_msg(cm, self.get_request_id(), cb=callback)
else:
log.debug("Sending SASL-based auth response on %s", self)
initial_response = self.authenticator.initial_response()
initial_response = "" if initial_response is None else initial_response
self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response)
elif isinstance(startup_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.host, startup_response.summary_msg())
if did_authenticate:
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.host, startup_response.summary_msg()))
else:
raise ConnectionException(
"Failed to initialize new connection to %s: %s"
% (self.host, startup_response.summary_msg()))
elif isinstance(startup_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the startup handshake", (self.host))
raise startup_response
else:
msg = "Unexpected response during Connection setup: %r"
log.error(msg, startup_response)
raise ProtocolError(msg % (startup_response,))
@defunct_on_error
def _handle_auth_response(self, auth_response):
if self.is_defunct:
return
if isinstance(auth_response, AuthSuccessMessage):
log.debug("Connection %s successfully authenticated", self)
self.authenticator.on_authentication_success(auth_response.token)
if self._compressor:
self.compressor = self._compressor
self.connected_event.set()
elif isinstance(auth_response, AuthChallengeMessage):
response = self.authenticator.evaluate_challenge(auth_response.challenge)
msg = AuthResponseMessage("" if response is None else response)
log.debug("Responding to auth challenge on %s", self)
self.send_msg(msg, self.get_request_id(), self._handle_auth_response)
elif isinstance(auth_response, ErrorMessage):
log.debug("Received ErrorMessage on new connection (%s) from %s: %s",
id(self), self.host, auth_response.summary_msg())
raise AuthenticationFailed(
"Failed to authenticate to %s: %s" %
(self.host, auth_response.summary_msg()))
elif isinstance(auth_response, ConnectionShutdown):
log.debug("Connection to %s was closed during the authentication process", self.host)
raise auth_response
else:
msg = "Unexpected response during Connection authentication to %s: %r"
log.error(msg, self.host, auth_response)
raise ProtocolError(msg % (self.host, auth_response))
def set_keyspace_blocking(self, keyspace):
if not keyspace or keyspace == self.keyspace:
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
try:
result = self.wait_for_response(query)
except InvalidRequestException as ire:
# the keyspace probably doesn't exist
raise ire.to_exception()
except Exception as exc:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (exc,), self.host)
self.defunct(conn_exc)
raise conn_exc
if isinstance(result, ResultMessage):
self.keyspace = keyspace
else:
conn_exc = ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.host)
self.defunct(conn_exc)
raise conn_exc
def set_keyspace_async(self, keyspace, callback):
"""
Use this in order to avoid deadlocking the event loop thread.
When the operation completes, `callback` will be called with
two arguments: this connection and an Exception if an error
occurred, otherwise :const:`None`.
"""
if not keyspace or keyspace == self.keyspace:
callback(self, None)
return
query = QueryMessage(query='USE "%s"' % (keyspace,),
consistency_level=ConsistencyLevel.ONE)
def process_result(result):
if isinstance(result, ResultMessage):
self.keyspace = keyspace
callback(self, None)
elif isinstance(result, InvalidRequestException):
callback(self, result.to_exception())
else:
callback(self, self.defunct(ConnectionException(
"Problem while setting keyspace: %r" % (result,), self.host)))
request_id = None
# we use a busy wait on the lock here because:
# - we'll only spin if the connection is at max capacity, which is very
# unlikely for a set_keyspace call
# - it allows us to avoid signaling a condition every time a request completes
while True:
with self.lock:
if self.in_flight < self.max_request_id:
request_id = self.get_request_id()
self.in_flight += 1
break
time.sleep(0.001)
self.send_msg(query, request_id, process_result)
@property
def is_idle(self):
return not self.msg_received
def reset_idle(self):
self.msg_received = False
def __str__(self):
status = ""
if self.is_defunct:
status = " (defunct)"
elif self.is_closed:
status = " (closed)"
return "<%s(%r) %s:%d%s>" % (self.__class__.__name__, id(self), self.host, self.port, status)
__repr__ = __str__
class ResponseWaiter(object):
def __init__(self, connection, num_responses, fail_on_error):
self.connection = connection
self.pending = num_responses
self.fail_on_error = fail_on_error
self.error = None
self.responses = [None] * num_responses
self.event = Event()
def got_response(self, response, index):
with self.connection.lock:
self.connection.in_flight -= 1
if isinstance(response, Exception):
if hasattr(response, 'to_exception'):
response = response.to_exception()
if self.fail_on_error:
self.error = response
self.event.set()
else:
self.responses[index] = (False, response)
else:
if not self.fail_on_error:
self.responses[index] = (True, response)
else:
self.responses[index] = response
self.pending -= 1
if not self.pending:
self.event.set()
def deliver(self, timeout=None):
"""
If fail_on_error was set to False, a list of (success, response)
tuples will be returned. If success is False, response will be
an Exception. Otherwise, response will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised. Otherwise,
the normal response will be returned.
"""
self.event.wait(timeout)
if self.error:
raise self.error
elif not self.event.is_set():
raise OperationTimedOut()
else:
return self.responses
class HeartbeatFuture(object):
def __init__(self, connection, owner):
self._exception = None
self._event = Event()
self.connection = connection
self.owner = owner
log.debug("Sending options message heartbeat on idle connection (%s) %s",
id(connection), connection.host)
with connection.lock:
if connection.in_flight < connection.max_request_id:
connection.in_flight += 1
connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback)
else:
self._exception = Exception("Failed to send heartbeat because connection 'in_flight' exceeds threshold")
self._event.set()
def wait(self, timeout):
self._event.wait(timeout)
if self._event.is_set():
if self._exception:
raise self._exception
else:
raise OperationTimedOut()
def _options_callback(self, response):
if not isinstance(response, SupportedMessage):
if isinstance(response, ConnectionException):
self._exception = response
else:
self._exception = ConnectionException("Received unexpected response to OptionsMessage: %s"
% (response,))
log.debug("Received options response on connection (%s) from %s",
id(self.connection), self.connection.host)
self._event.set()
class ConnectionHeartbeat(Thread):
def __init__(self, interval_sec, get_connection_holders):
Thread.__init__(self, name="Connection heartbeat")
self._interval = interval_sec
self._get_connection_holders = get_connection_holders
self._shutdown_event = Event()
self.daemon = True
self.start()
class ShutdownException(Exception):
pass
def run(self):
self._shutdown_event.wait(self._interval)
while not self._shutdown_event.is_set():
start_time = time.time()
futures = []
failed_connections = []
try:
for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]:
for connection in connections:
self._raise_if_stopped()
if not (connection.is_defunct or connection.is_closed):
if connection.is_idle:
try:
futures.append(HeartbeatFuture(connection, owner))
except Exception:
log.warning("Failed sending heartbeat message on connection (%s) to %s",
id(connection), connection.host, exc_info=True)
failed_connections.append((connection, owner))
else:
connection.reset_idle()
else:
# make sure the owner sees this defunt/closed connection
owner.return_connection(connection)
self._raise_if_stopped()
for f in futures:
self._raise_if_stopped()
connection = f.connection
try:
f.wait(self._interval)
# TODO: move this, along with connection locks in pool, down into Connection
with connection.lock:
connection.in_flight -= 1
connection.reset_idle()
except Exception:
log.warning("Heartbeat failed for connection (%s) to %s",
id(connection), connection.host, exc_info=True)
failed_connections.append((f.connection, f.owner))
for connection, owner in failed_connections:
self._raise_if_stopped()
connection.defunct(Exception('Connection heartbeat failure'))
owner.return_connection(connection)
except self.ShutdownException:
pass
except Exception:
log.error("Failed connection heartbeat", exc_info=True)
elapsed = time.time() - start_time
self._shutdown_event.wait(max(self._interval - elapsed, 0.01))
def stop(self):
self._shutdown_event.set()
self.join()
def _raise_if_stopped(self):
if self._shutdown_event.is_set():
raise self.ShutdownException()
class Timer(object):
canceled = False
def __init__(self, timeout, callback):
self.end = time.time() + timeout
self.callback = callback
if timeout < 0:
self.callback()
def cancel(self):
self.canceled = True
def finish(self, time_now):
if self.canceled:
return True
if time_now >= self.end:
self.callback()
return True
return False
class TimerManager(object):
def __init__(self):
self._queue = []
self._new_timers = []
def add_timer(self, timer):
"""
called from client thread with a Timer object
"""
self._new_timers.append((timer.end, timer))
def service_timeouts(self):
"""
run callbacks on all expired timers
Called from the event thread
:return: next end time, or None
"""
queue = self._queue
if self._new_timers:
new_timers = self._new_timers
while new_timers:
heappush(queue, new_timers.pop())
if queue:
now = time.time()
while queue:
try:
timer = queue[0][1]
if timer.finish(now):
heappop(queue)
else:
return timer.end
except Exception:
log.exception("Exception while servicing timeout callback: ")
@property
def next_timeout(self):
try:
return self._queue[0][0]
except IndexError:
pass
|
{
"content_hash": "614002caca83102b9b2922ad6d300185",
"timestamp": "",
"source": "github",
"line_count": 1045,
"max_line_length": 196,
"avg_line_length": 37.65071770334928,
"alnum_prop": 0.5769220993773033,
"repo_name": "tempbottle/python-driver",
"id": "32ffa85710167cd92d199e470672b8a725d7a24b",
"size": "39925",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "cassandra/connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28918"
},
{
"name": "Python",
"bytes": "1711786"
}
],
"symlink_target": ""
}
|
import sys
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerProtocol, \
WebSocketServerFactory, \
listenWS
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept
class EchoServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
print("WebSocket connection request by {}".format(request.peer))
def onOpen(self):
print("WebSocket extensions in use: {}".format(self.websocket_extensions_in_use))
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
if __name__ == '__main__':
log.startLogging(sys.stdout)
factory = WebSocketServerFactory(u"ws://127.0.0.1:9000")
factory.protocol = EchoServerProtocol
# Enable WebSocket extension "permessage-deflate".
# Function to accept offers from the client ..
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
factory.setProtocolOptions(perMessageCompressionAccept=accept)
# run server
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
|
{
"content_hash": "5d406062092e7ee33305dd585ba76781",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 89,
"avg_line_length": 26.48076923076923,
"alnum_prop": 0.7167755991285403,
"repo_name": "Jenselme/AutobahnPython",
"id": "ffd61150d98690901dc27a7fc6c122dc18c8e43e",
"size": "2654",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/twisted/websocket/echo_compressed/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3849"
},
{
"name": "Python",
"bytes": "1065688"
}
],
"symlink_target": ""
}
|
from haystack.utils.geo import Point, Distance
def point_from_lat_long(value):
if isinstance(value, Point):
return value
if isinstance(value, basestring):
lat, lng = value.split(',')
elif isinstance(value, (list, tuple)):
lat, lng = value
else:
raise ValueError("I don't know what to do with this.")
return Point(float(lng), float(lat))
def point_from_long_lat(value):
if isinstance(value, Point):
return value
if isinstance(value, basestring):
lng, lat = value.split(',')
elif isinstance(value, (list, tuple)):
lng, lat = value
else:
raise ValueError("I don't know what to do with this.")
return Point(float(lng), float(lat))
|
{
"content_hash": "b1d707f6edafc71c253de740298fc299",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 62,
"avg_line_length": 29.4,
"alnum_prop": 0.6244897959183674,
"repo_name": "apnarm/django-apn-search",
"id": "df00ce81be255c9b60cfa8ee0dd72bfae80312f6",
"size": "735",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apn_search/utils/geo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82253"
}
],
"symlink_target": ""
}
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'SSHCommand',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': 'This module will send a command via ssh.',
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
'http://stackoverflow.com/questions/17118239/how-to-give-subprocess-a-password-and-get-stdout-at-the-same-time'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to use ssh from.',
'Required' : True,
'Value' : ''
},
'Login' : {
'Description' : 'user@127.0.0.1',
'Required' : True,
'Value' : ''
},
'Password' : {
'Description' : 'Password',
'Required' : True,
'Value' : ''
},
'Command' : {
'Description' : 'Command',
'Required' : True,
'Value' : 'id'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
login = self.options['Login']['Value']
password = self.options['Password']['Value']
command = self.options['Command']['Value']
# generate the launcher code
script = """
import os
import pty
def wall(host, pw):
import os,pty
pid, fd = pty.fork()
if pid == 0: # Child
os.execvp('ssh', ['ssh', '-o StrictHostKeyChecking=no', host, '%s'])
os._exit(1) # fail to execv
# read '..... password:', write password
os.read(fd, 1024)
os.write(fd, '\\n' + pw + '\\n')
result = []
while True:
try:
data = os.read(fd, 1024)
if data == "Password:":
os.write(fd, pw + '\\n')
except OSError:
break
if not data:
break
result.append(data)
pid, status = os.waitpid(pid, 0)
return status, ''.join(result)
status, output = wall('%s','%s')
print status
print output
""" % (command, login, password)
return script
|
{
"content_hash": "e0b9ead781a1bc2c0e0f20ce54a8d74c",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 127,
"avg_line_length": 31.515873015873016,
"alnum_prop": 0.5021405187610174,
"repo_name": "api0cradle/Empire",
"id": "39ec4d2bb0652e5370d63eb6f225d3d3ea224619",
"size": "3971",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/modules/python/lateral_movement/multi/ssh_command.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "2563"
},
{
"name": "PowerShell",
"bytes": "10014305"
},
{
"name": "Python",
"bytes": "875497"
},
{
"name": "Shell",
"bytes": "2054"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=15, unique=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, unique=True)),
('namespace', models.PositiveIntegerField()),
('backlink', models.TextField(null=True)),
('category', models.TextField(null=True)),
('acl', models.TextField(null=True)),
('is_deleted', models.BooleanField(default=False)),
('is_created', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Revision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('createDate', models.DateTimeField(auto_now_add=True)),
('comment', models.CharField(max_length=255, null=True)),
('rev', models.PositiveIntegerField()),
('increase', models.IntegerField()),
('ip', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='mywiki.Ip')),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mywiki.Page')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
{
"content_hash": "f45d662429e03aa9b9eb07d4eb0e246a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 130,
"avg_line_length": 44.05769230769231,
"alnum_prop": 0.5766041030117852,
"repo_name": "Oriwiki/DuckPy",
"id": "bd4e524003590815b6fa40200f9836f18346e00c",
"size": "2364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mywiki/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "766"
},
{
"name": "HTML",
"bytes": "55356"
},
{
"name": "JavaScript",
"bytes": "319"
},
{
"name": "Python",
"bytes": "117879"
}
],
"symlink_target": ""
}
|
import pathlib
import typing
from clize import Clize, run, converters # type: ignore
def main(
# The ideal case: Clize and mypy understand the same annotation
same_annotation_for_both_str: str,
same_annotation_for_both_converter: pathlib.Path,
*,
# Unfortunately, Clize doesn't understand typing.Optional yet, and just uses int.
# You'll have to separate the typing and Clize annotation using typing.Annotated
optional_value: typing.Annotated[typing.Optional[int], Clize[int]] = None,
# Perhaps confusingly, typing.Optional does not refer to whether a parameter is required,
# only whether None is an acceptable value.
optional_parameter: typing.Annotated[int, Clize[int]] = 1,
# If you're using other clize annotations, like parameter aliases,
# you'll have to use typing.Annotated
aliased: typing.Annotated[int, Clize["n"]],
# Value converters do not yet create typing annotations,
# so you have to define the type separately using typing.Annotated.
# Additionally, the type created by converters.file() is not public, so you have to rely on Any for now.
file_opener: typing.Annotated[typing.Any, Clize[converters.file()]]
):
"""
Example CLI that uses typing and Clize together
In Clize 5.0 this remains fairly rudimentary,
so you may have to repeat yourself
when Clize and your type checker (e.g. mypy) do not understand the same annotation.
"""
print(
same_annotation_for_both_str.join(["abc"]),
same_annotation_for_both_converter.exists(),
optional_value + 1 if optional_value is not None else 0,
optional_parameter + 1,
aliased + 1,
file_opener,
)
if __name__ == "__main__":
run(main)
|
{
"content_hash": "48d118f610ce4bb5bfd1d9dc5ce7e914",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 108,
"avg_line_length": 39.77272727272727,
"alnum_prop": 0.6948571428571428,
"repo_name": "epsy/clize",
"id": "8af7d75ba440d8f5b81e37a59696a7fed0a2f5fa",
"size": "1750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/typed_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "319968"
}
],
"symlink_target": ""
}
|
from django.db import models
from daiquiri.core.managers import AccessLevelManager
from daiquiri.jobs.managers import JobManager
class QueryJobManager(JobManager):
def get_size(self, user):
# get the size of all the tables of this user
return self.filter_by_owner(user).exclude(phase=self.model.PHASE_ARCHIVED).aggregate(models.Sum('size'))['size__sum'] or 0
class ExampleManager(AccessLevelManager):
pass
|
{
"content_hash": "85ad845e2f69a1fcb789d7bc77a72829",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 130,
"avg_line_length": 27.3125,
"alnum_prop": 0.7505720823798627,
"repo_name": "aipescience/django-daiquiri",
"id": "f786e0411807df4a2117109ad65e09f10eacef6b",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daiquiri/query/managers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28598"
},
{
"name": "HTML",
"bytes": "236579"
},
{
"name": "JavaScript",
"bytes": "97087"
},
{
"name": "Python",
"bytes": "602159"
}
],
"symlink_target": ""
}
|
from heat.common.i18n import _
from heat.engine import clients
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class MonascaAlarmDefinition(resource.Resource):
"""Heat Template Resource for Monasca Alarm definition.
This plug-in requires python-monascaclient>=1.0.22. So to enable this
plug-in, install this client library and restart the heat-engine.
"""
support_status = support.SupportStatus(
version='5.0.0',
status=support.UNSUPPORTED)
default_client_name = 'monasca'
entity = 'alarm_definitions'
SEVERITY_LEVELS = (
LOW, MEDIUM, HIGH, CRITICAL
) = (
'low', 'medium', 'high', 'critical'
)
PROPERTIES = (
NAME, DESCRIPTION, EXPRESSION, MATCH_BY, SEVERITY,
OK_ACTIONS, ALARM_ACTIONS, UNDETERMINED_ACTIONS,
ACTIONS_ENABLED
) = (
'name', 'description', 'expression', 'match_by', 'severity',
'ok_actions', 'alarm_actions', 'undetermined_actions',
'actions_enabled'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the alarm. By default, physical resource name is '
'used.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the alarm.'),
update_allowed=True
),
EXPRESSION: properties.Schema(
properties.Schema.STRING,
_('Expression of the alarm to evaluate.'),
update_allowed=False,
required=True
),
MATCH_BY: properties.Schema(
properties.Schema.LIST,
_('The metric dimensions to match to the alarm dimensions. '
'One or more dimension key names separated by a comma.')
),
SEVERITY: properties.Schema(
properties.Schema.STRING,
_('Severity of the alarm.'),
update_allowed=True,
constraints=[constraints.AllowedValues(
SEVERITY_LEVELS
)],
default=LOW
),
OK_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('The notification methods to use when an alarm state is OK.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.STRING,
_('Monasca notification'),
constraints=[constraints.CustomConstraint(
'monasca.notification')
]
)
),
ALARM_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('The notification methods to use when an alarm state is ALARM.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.STRING,
_('Monasca notification'),
constraints=[constraints.CustomConstraint(
'monasca.notification')
]
)
),
UNDETERMINED_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('The notification methods to use when an alarm state is '
'UNDETERMINED.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.STRING,
_('Monasca notification'),
constraints=[constraints.CustomConstraint(
'monasca.notification')
]
)
),
ACTIONS_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether to enable the actions or not.'),
update_allowed=True,
default=True,
),
}
def handle_create(self):
args = dict(
name=(self.properties[self.NAME] or
self.physical_resource_name()),
description=self.properties[self.DESCRIPTION],
expression=self.properties[self.EXPRESSION],
match_by=self.properties[self.MATCH_BY],
severity=self.properties[self.SEVERITY],
ok_actions=self.properties[self.OK_ACTIONS],
alarm_actions=self.properties[self.ALARM_ACTIONS],
undetermined_actions=self.properties[
self.UNDETERMINED_ACTIONS]
)
alarm = self.client().alarm_definitions.create(**args)
self.resource_id_set(alarm['id'])
# Monasca enables action by default
actions_enabled = self.properties[self.ACTIONS_ENABLED]
if not actions_enabled:
self.client().alarm_definitions.patch(
alarm_id=self.resource_id,
actions_enabled=actions_enabled
)
def handle_update(self, prop_diff, json_snippet=None, tmpl_diff=None):
args = dict(alarm_id=self.resource_id)
if prop_diff.get(self.NAME):
args['name'] = prop_diff.get(self.NAME)
if prop_diff.get(self.DESCRIPTION):
args['description'] = prop_diff.get(self.DESCRIPTION)
if prop_diff.get(self.SEVERITY):
args['severity'] = prop_diff.get(self.SEVERITY)
if prop_diff.get(self.OK_ACTIONS):
args['ok_actions'] = prop_diff.get(self.OK_ACTIONS)
if prop_diff.get(self.ALARM_ACTIONS):
args['alarm_actions'] = prop_diff.get(self.ALARM_ACTIONS)
if prop_diff.get(self.UNDETERMINED_ACTIONS):
args['undetermined_actions'] = prop_diff.get(
self.UNDETERMINED_ACTIONS
)
if prop_diff.get(self.ACTIONS_ENABLED):
args['actions_enabled'] = prop_diff.get(self.ACTIONS_ENABLED)
self.client().alarm_definitions.patch(**args)
def handle_delete(self):
if self.resource_id is not None:
try:
self.client().alarm_definitions.delete(
alarm_id=self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
# FIXME(kanagaraj-manickam) Remove this method once monasca defect 1484900
# is fixed.
def _show_resource(self):
return self.client().alarm_definitions.get(self.resource_id)
def resource_mapping():
return {
'OS::Monasca::AlarmDefinition': MonascaAlarmDefinition
}
def available_resource_mapping():
if not clients.has_client(MonascaAlarmDefinition.default_client_name):
return {}
return resource_mapping()
|
{
"content_hash": "350b60ca971d6f2822f4cf00d1322642",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 33.896907216494846,
"alnum_prop": 0.5799878345498783,
"repo_name": "pratikmallya/heat",
"id": "063a6e1cd5efd015390f42c893ddb0bcefa275bd",
"size": "7151",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/monasca/alarm_definition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6929579"
},
{
"name": "Shell",
"bytes": "33092"
}
],
"symlink_target": ""
}
|
import os
import random
import struct
import unittest2
from mock import MagicMock, patch
from kafka import KafkaClient
from kafka.common import (
ProduceRequest, BrokerMetadata, PartitionMetadata,
TopicAndPartition, KafkaUnavailableError,
LeaderUnavailableError, PartitionUnavailableError
)
from kafka.protocol import (
create_message, KafkaProtocol
)
class TestKafkaClient(unittest2.TestCase):
def test_init_with_list(self):
with patch.object(KafkaClient, 'load_metadata_for_topics'):
client = KafkaClient(hosts=['kafka01:9092', 'kafka02:9092', 'kafka03:9092'])
self.assertItemsEqual(
[('kafka01', 9092), ('kafka02', 9092), ('kafka03', 9092)],
client.hosts)
def test_init_with_csv(self):
with patch.object(KafkaClient, 'load_metadata_for_topics'):
client = KafkaClient(hosts='kafka01:9092,kafka02:9092,kafka03:9092')
self.assertItemsEqual(
[('kafka01', 9092), ('kafka02', 9092), ('kafka03', 9092)],
client.hosts)
def test_init_with_unicode_csv(self):
with patch.object(KafkaClient, 'load_metadata_for_topics'):
client = KafkaClient(hosts=u'kafka01:9092,kafka02:9092,kafka03:9092')
self.assertItemsEqual(
[('kafka01', 9092), ('kafka02', 9092), ('kafka03', 9092)],
client.hosts)
def test_send_broker_unaware_request_fail(self):
'Tests that call fails when all hosts are unavailable'
mocked_conns = {
('kafka01', 9092): MagicMock(),
('kafka02', 9092): MagicMock()
}
# inject KafkaConnection side effects
mocked_conns[('kafka01', 9092)].send.side_effect = RuntimeError("kafka01 went away (unittest)")
mocked_conns[('kafka02', 9092)].send.side_effect = RuntimeError("Kafka02 went away (unittest)")
def mock_get_conn(host, port):
return mocked_conns[(host, port)]
# patch to avoid making requests before we want it
with patch.object(KafkaClient, 'load_metadata_for_topics'):
with patch.object(KafkaClient, '_get_conn', side_effect=mock_get_conn):
client = KafkaClient(hosts=['kafka01:9092', 'kafka02:9092'])
with self.assertRaises(KafkaUnavailableError):
client._send_broker_unaware_request(1, 'fake request')
for key, conn in mocked_conns.iteritems():
conn.send.assert_called_with(1, 'fake request')
def test_send_broker_unaware_request(self):
'Tests that call works when at least one of the host is available'
mocked_conns = {
('kafka01', 9092): MagicMock(),
('kafka02', 9092): MagicMock(),
('kafka03', 9092): MagicMock()
}
# inject KafkaConnection side effects
mocked_conns[('kafka01', 9092)].send.side_effect = RuntimeError("kafka01 went away (unittest)")
mocked_conns[('kafka02', 9092)].recv.return_value = 'valid response'
mocked_conns[('kafka03', 9092)].send.side_effect = RuntimeError("kafka03 went away (unittest)")
def mock_get_conn(host, port):
return mocked_conns[(host, port)]
# patch to avoid making requests before we want it
with patch.object(KafkaClient, 'load_metadata_for_topics'):
with patch.object(KafkaClient, '_get_conn', side_effect=mock_get_conn):
client = KafkaClient(hosts='kafka01:9092,kafka02:9092')
resp = client._send_broker_unaware_request(1, 'fake request')
self.assertEqual('valid response', resp)
mocked_conns[('kafka02', 9092)].recv.assert_called_with(1)
@patch('kafka.client.KafkaConnection')
@patch('kafka.client.KafkaProtocol')
def test_load_metadata(self, protocol, conn):
"Load metadata for all topics"
conn.recv.return_value = 'response' # anything but None
brokers = {}
brokers[0] = BrokerMetadata(1, 'broker_1', 4567)
brokers[1] = BrokerMetadata(2, 'broker_2', 5678)
topics = {}
topics['topic_1'] = {
0: PartitionMetadata('topic_1', 0, 1, [1, 2], [1, 2])
}
topics['topic_noleader'] = {
0: PartitionMetadata('topic_noleader', 0, -1, [], []),
1: PartitionMetadata('topic_noleader', 1, -1, [], [])
}
topics['topic_no_partitions'] = {}
topics['topic_3'] = {
0: PartitionMetadata('topic_3', 0, 0, [0, 1], [0, 1]),
1: PartitionMetadata('topic_3', 1, 1, [1, 0], [1, 0]),
2: PartitionMetadata('topic_3', 2, 0, [0, 1], [0, 1])
}
protocol.decode_metadata_response.return_value = (brokers, topics)
# client loads metadata at init
client = KafkaClient(hosts=['broker_1:4567'])
self.assertDictEqual({
TopicAndPartition('topic_1', 0): brokers[1],
TopicAndPartition('topic_noleader', 0): None,
TopicAndPartition('topic_noleader', 1): None,
TopicAndPartition('topic_3', 0): brokers[0],
TopicAndPartition('topic_3', 1): brokers[1],
TopicAndPartition('topic_3', 2): brokers[0]},
client.topics_to_brokers)
@patch('kafka.client.KafkaConnection')
@patch('kafka.client.KafkaProtocol')
def test_get_leader_for_partitions_reloads_metadata(self, protocol, conn):
"Get leader for partitions reload metadata if it is not available"
conn.recv.return_value = 'response' # anything but None
brokers = {}
brokers[0] = BrokerMetadata(0, 'broker_1', 4567)
brokers[1] = BrokerMetadata(1, 'broker_2', 5678)
topics = {'topic_no_partitions': {}}
protocol.decode_metadata_response.return_value = (brokers, topics)
client = KafkaClient(hosts=['broker_1:4567'])
# topic metadata is loaded but empty
self.assertDictEqual({}, client.topics_to_brokers)
topics['topic_no_partitions'] = {
0: PartitionMetadata('topic_no_partitions', 0, 0, [0, 1], [0, 1])
}
protocol.decode_metadata_response.return_value = (brokers, topics)
# calling _get_leader_for_partition (from any broker aware request)
# will try loading metadata again for the same topic
leader = client._get_leader_for_partition('topic_no_partitions', 0)
self.assertEqual(brokers[0], leader)
self.assertDictEqual({
TopicAndPartition('topic_no_partitions', 0): brokers[0]},
client.topics_to_brokers)
@patch('kafka.client.KafkaConnection')
@patch('kafka.client.KafkaProtocol')
def test_get_leader_for_unassigned_partitions(self, protocol, conn):
"Get leader raises if no partitions is defined for a topic"
conn.recv.return_value = 'response' # anything but None
brokers = {}
brokers[0] = BrokerMetadata(0, 'broker_1', 4567)
brokers[1] = BrokerMetadata(1, 'broker_2', 5678)
topics = {'topic_no_partitions': {}}
protocol.decode_metadata_response.return_value = (brokers, topics)
client = KafkaClient(hosts=['broker_1:4567'])
self.assertDictEqual({}, client.topics_to_brokers)
with self.assertRaises(PartitionUnavailableError):
client._get_leader_for_partition('topic_no_partitions', 0)
@patch('kafka.client.KafkaConnection')
@patch('kafka.client.KafkaProtocol')
def test_get_leader_returns_none_when_noleader(self, protocol, conn):
"Getting leader for partitions returns None when the partiion has no leader"
conn.recv.return_value = 'response' # anything but None
brokers = {}
brokers[0] = BrokerMetadata(0, 'broker_1', 4567)
brokers[1] = BrokerMetadata(1, 'broker_2', 5678)
topics = {}
topics['topic_noleader'] = {
0: PartitionMetadata('topic_noleader', 0, -1, [], []),
1: PartitionMetadata('topic_noleader', 1, -1, [], [])
}
protocol.decode_metadata_response.return_value = (brokers, topics)
client = KafkaClient(hosts=['broker_1:4567'])
self.assertDictEqual(
{
TopicAndPartition('topic_noleader', 0): None,
TopicAndPartition('topic_noleader', 1): None
},
client.topics_to_brokers)
self.assertIsNone(client._get_leader_for_partition('topic_noleader', 0))
self.assertIsNone(client._get_leader_for_partition('topic_noleader', 1))
topics['topic_noleader'] = {
0: PartitionMetadata('topic_noleader', 0, 0, [0, 1], [0, 1]),
1: PartitionMetadata('topic_noleader', 1, 1, [1, 0], [1, 0])
}
protocol.decode_metadata_response.return_value = (brokers, topics)
self.assertEqual(brokers[0], client._get_leader_for_partition('topic_noleader', 0))
self.assertEqual(brokers[1], client._get_leader_for_partition('topic_noleader', 1))
@patch('kafka.client.KafkaConnection')
@patch('kafka.client.KafkaProtocol')
def test_send_produce_request_raises_when_noleader(self, protocol, conn):
"Send producer request raises LeaderUnavailableError if leader is not available"
conn.recv.return_value = 'response' # anything but None
brokers = {}
brokers[0] = BrokerMetadata(0, 'broker_1', 4567)
brokers[1] = BrokerMetadata(1, 'broker_2', 5678)
topics = {}
topics['topic_noleader'] = {
0: PartitionMetadata('topic_noleader', 0, -1, [], []),
1: PartitionMetadata('topic_noleader', 1, -1, [], [])
}
protocol.decode_metadata_response.return_value = (brokers, topics)
client = KafkaClient(hosts=['broker_1:4567'])
requests = [ProduceRequest(
"topic_noleader", 0,
[create_message("a"), create_message("b")])]
with self.assertRaises(LeaderUnavailableError):
client.send_produce_request(requests)
|
{
"content_hash": "4251bc2c73aa4c7a01c6d68f1a08375e",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 103,
"avg_line_length": 40.22489959839358,
"alnum_prop": 0.6129193290734825,
"repo_name": "jerluc/kafka-python",
"id": "fe9beff26bce558a70a7cd89df31a5bba33ca855",
"size": "10016",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "198324"
},
{
"name": "Shell",
"bytes": "408"
}
],
"symlink_target": ""
}
|
class AppSettings(object):
class AuthenticationMethod:
USERNAME = 'username'
EMAIL = 'email'
USERNAME_EMAIL = 'username_email'
class EmailVerificationMethod:
# After signing up, keep the user account inactive until the email
# address is verified
MANDATORY = 'mandatory'
# Allow login with unverified e-mail (e-mail verification is
# still sent)
OPTIONAL = 'optional'
# Don't send e-mail verification mails during signup
NONE = 'none'
def __init__(self, prefix):
self.prefix = prefix
# If login is by email, email must be required
assert (not self.AUTHENTICATION_METHOD ==
self.AuthenticationMethod.EMAIL) or self.EMAIL_REQUIRED
# If login includes email, login must be unique
assert (self.AUTHENTICATION_METHOD ==
self.AuthenticationMethod.USERNAME) or self.UNIQUE_EMAIL
assert (self.EMAIL_VERIFICATION !=
self.EmailVerificationMethod.MANDATORY) \
or self.EMAIL_REQUIRED
if not self.USER_MODEL_USERNAME_FIELD:
assert not self.USERNAME_REQUIRED
assert self.AUTHENTICATION_METHOD \
not in (self.AuthenticationMethod.USERNAME,
self.AuthenticationMethod.USERNAME_EMAIL)
def _setting(self, name, dflt):
from django.conf import settings
getter = getattr(settings,
'ALLAUTH_SETTING_GETTER',
lambda name, dflt: getattr(settings, name, dflt))
return getter(self.prefix + name, dflt)
@property
def DEFAULT_HTTP_PROTOCOL(self):
return self._setting("DEFAULT_HTTP_PROTOCOL", "http").lower()
@property
def EMAIL_CONFIRMATION_EXPIRE_DAYS(self):
"""
Determines the expiration date of e-mail confirmation mails (#
of days)
"""
from django.conf import settings
return self._setting("EMAIL_CONFIRMATION_EXPIRE_DAYS",
getattr(settings, "EMAIL_CONFIRMATION_DAYS", 3))
@property
def EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL(self):
"""
The URL to redirect to after a successful e-mail confirmation, in
case of an authenticated user
"""
return self._setting("EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL",
None)
@property
def EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL(self):
"""
The URL to redirect to after a successful e-mail confirmation, in
case no user is logged in
"""
from django.conf import settings
return self._setting("EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL",
settings.LOGIN_URL)
@property
def EMAIL_CONFIRMATION_COOLDOWN(self):
"""
The cooldown in seconds during which, after an email confirmation has
been sent, a second confirmation email will not be sent.
"""
return self._setting("EMAIL_CONFIRMATION_COOLDOWN", 3 * 60)
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
return self._setting("EMAIL_REQUIRED", False)
@property
def EMAIL_VERIFICATION(self):
"""
See e-mail verification method
"""
ret = self._setting("EMAIL_VERIFICATION",
self.EmailVerificationMethod.OPTIONAL)
# Deal with legacy (boolean based) setting
if ret is True:
ret = self.EmailVerificationMethod.MANDATORY
elif ret is False:
ret = self.EmailVerificationMethod.OPTIONAL
return ret
@property
def AUTHENTICATION_METHOD(self):
ret = self._setting("AUTHENTICATION_METHOD",
self.AuthenticationMethod.USERNAME)
return ret
@property
def EMAIL_MAX_LENGTH(self):
"""
Adjust max_length of e-mail addresses
"""
return self._setting("EMAIL_MAX_LENGTH", 254)
@property
def UNIQUE_EMAIL(self):
"""
Enforce uniqueness of e-mail addresses
"""
return self._setting("UNIQUE_EMAIL", True)
@property
def SIGNUP_EMAIL_ENTER_TWICE(self):
"""
Signup email verification
"""
return self._setting("SIGNUP_EMAIL_ENTER_TWICE", False)
@property
def SIGNUP_PASSWORD_ENTER_TWICE(self):
"""
Signup password verification
"""
legacy = self._setting('SIGNUP_PASSWORD_VERIFICATION', True)
return self._setting('SIGNUP_PASSWORD_ENTER_TWICE', legacy)
@property
def PASSWORD_MIN_LENGTH(self):
"""
Minimum password Length
"""
from django.conf import settings
ret = None
if not settings.AUTH_PASSWORD_VALIDATORS:
ret = self._setting("PASSWORD_MIN_LENGTH", 6)
return ret
@property
def EMAIL_SUBJECT_PREFIX(self):
"""
Subject-line prefix to use for email messages sent
"""
return self._setting("EMAIL_SUBJECT_PREFIX", None)
@property
def SIGNUP_FORM_CLASS(self):
"""
Signup form
"""
return self._setting("SIGNUP_FORM_CLASS", None)
@property
def USERNAME_REQUIRED(self):
"""
The user is required to enter a username when signing up
"""
return self._setting("USERNAME_REQUIRED", True)
@property
def USERNAME_MIN_LENGTH(self):
"""
Minimum username Length
"""
return self._setting("USERNAME_MIN_LENGTH", 1)
@property
def USERNAME_BLACKLIST(self):
"""
List of usernames that are not allowed
"""
return self._setting("USERNAME_BLACKLIST", [])
@property
def PASSWORD_INPUT_RENDER_VALUE(self):
"""
render_value parameter as passed to PasswordInput fields
"""
return self._setting("PASSWORD_INPUT_RENDER_VALUE", False)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.account.adapter.DefaultAccountAdapter')
@property
def CONFIRM_EMAIL_ON_GET(self):
return self._setting('CONFIRM_EMAIL_ON_GET', False)
@property
def AUTHENTICATED_LOGIN_REDIRECTS(self):
return self._setting('AUTHENTICATED_LOGIN_REDIRECTS', True)
@property
def LOGIN_ON_EMAIL_CONFIRMATION(self):
"""
Automatically log the user in once they confirmed their email address
"""
return self._setting('LOGIN_ON_EMAIL_CONFIRMATION', False)
@property
def LOGIN_ON_PASSWORD_RESET(self):
"""
Automatically log the user in immediately after resetting
their password.
"""
return self._setting('LOGIN_ON_PASSWORD_RESET', False)
@property
def LOGOUT_REDIRECT_URL(self):
return self._setting('LOGOUT_REDIRECT_URL', '/')
@property
def LOGOUT_ON_GET(self):
return self._setting('LOGOUT_ON_GET', False)
@property
def LOGOUT_ON_PASSWORD_CHANGE(self):
return self._setting('LOGOUT_ON_PASSWORD_CHANGE', False)
@property
def USER_MODEL_USERNAME_FIELD(self):
return self._setting('USER_MODEL_USERNAME_FIELD', 'username')
@property
def USER_MODEL_EMAIL_FIELD(self):
return self._setting('USER_MODEL_EMAIL_FIELD', 'email')
@property
def SESSION_COOKIE_AGE(self):
"""
Deprecated -- use Django's settings.SESSION_COOKIE_AGE instead
"""
from django.conf import settings
return self._setting('SESSION_COOKIE_AGE', settings.SESSION_COOKIE_AGE)
@property
def SESSION_REMEMBER(self):
"""
Controls the life time of the session. Set to `None` to ask the user
("Remember me?"), `False` to not remember, and `True` to always
remember.
"""
return self._setting('SESSION_REMEMBER', None)
@property
def TEMPLATE_EXTENSION(self):
"""
A string defining the template extension to use, defaults to `html`.
"""
return self._setting('TEMPLATE_EXTENSION', 'html')
@property
def FORMS(self):
return self._setting('FORMS', {})
@property
def LOGIN_ATTEMPTS_LIMIT(self):
"""
Number of failed login attempts. When this number is
exceeded, the user is prohibited from logging in for the
specified `LOGIN_ATTEMPTS_TIMEOUT`
"""
return self._setting('LOGIN_ATTEMPTS_LIMIT', 5)
@property
def LOGIN_ATTEMPTS_TIMEOUT(self):
"""
Time period from last unsuccessful login attempt, during
which the user is prohibited from trying to log in. Defaults to
5 minutes.
"""
return self._setting('LOGIN_ATTEMPTS_TIMEOUT', 60 * 5)
@property
def EMAIL_CONFIRMATION_HMAC(self):
return self._setting('EMAIL_CONFIRMATION_HMAC', True)
@property
def SALT(self):
return self._setting('SALT', 'account')
@property
def PRESERVE_USERNAME_CASING(self):
return self._setting('PRESERVE_USERNAME_CASING', True)
@property
def USERNAME_VALIDATORS(self):
from django.core.exceptions import ImproperlyConfigured
from allauth.utils import import_attribute
from allauth.utils import get_user_model
path = self._setting('USERNAME_VALIDATORS', None)
if path:
ret = import_attribute(path)
if not isinstance(ret, list):
raise ImproperlyConfigured(
'ACCOUNT_USERNAME_VALIDATORS is expected to be a list')
else:
if self.USER_MODEL_USERNAME_FIELD is not None:
ret = get_user_model()._meta.get_field(
self.USER_MODEL_USERNAME_FIELD).validators
else:
ret = []
return ret
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys # noqa
app_settings = AppSettings('ACCOUNT_')
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
|
{
"content_hash": "0aa5e6e9512c9127739eab5f98e185a3",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 79,
"avg_line_length": 31.49079754601227,
"alnum_prop": 0.6043249561659848,
"repo_name": "pztrick/django-allauth",
"id": "a69fd5b77d062a1894b9617d09eca14f46f29f80",
"size": "10266",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "allauth/account/app_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "42111"
},
{
"name": "JavaScript",
"bytes": "3260"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "671309"
}
],
"symlink_target": ""
}
|
from __future__ import division
class ThumbnailTool(object):
def avatar_thumbnail(self):
pass
# 按长边,指定缩放倍数等比例缩放
@classmethod
def constrain_thumbnail(cls, img, times=2):
width, height = img.size
# 按给定的倍数缩放
img.thumbnail((width//times, height//times))
return img
# 将长边,按等比例缩放至指定大小
@classmethod
def constrain_len_thumbnail(cls, img, length):
width, height = img.size
if width > height:
scale = width / height
width = length
height = width / scale
else:
scale = height / width
height = length
width = height / scale
img.thumbnail((width, height))
return img
|
{
"content_hash": "486c1290ffcc44e8d8f04d564a73d7dd",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 52,
"avg_line_length": 23.838709677419356,
"alnum_prop": 0.5615696887686062,
"repo_name": "zer0Black/zer0Blog",
"id": "42fc1990ee1a414a684e3f799f9c1b224cb4bd97",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/thumbnail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "509310"
},
{
"name": "HTML",
"bytes": "95409"
},
{
"name": "JavaScript",
"bytes": "1482817"
},
{
"name": "Python",
"bytes": "51222"
}
],
"symlink_target": ""
}
|
"""
Files views.
"""
from flask import request
from website.util import rubeus
from website.project.decorators import must_be_contributor_or_public, must_not_be_retracted_registration
from website.project.views.node import _view_project
@must_not_be_retracted_registration
@must_be_contributor_or_public
def collect_file_trees(auth, node, **kwargs):
"""Collect file trees for all add-ons implementing HGrid views, then
format data as appropriate.
"""
serialized = _view_project(node, auth, primary=True)
# Add addon static assets
serialized.update(rubeus.collect_addon_assets(node))
return serialized
@must_be_contributor_or_public
def grid_data(auth, node, **kwargs):
"""View that returns the formatted data for rubeus.js/hgrid
"""
data = request.args.to_dict()
return {'data': rubeus.to_hgrid(node, auth, **data)}
|
{
"content_hash": "8a07e6a6d46c86c7bc3b48af164b9f93",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 104,
"avg_line_length": 32,
"alnum_prop": 0.7291666666666666,
"repo_name": "crcresearch/osf.io",
"id": "2ba34a35307d09576e5fe29db81e1dafae499496",
"size": "864",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "website/project/views/file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110148"
},
{
"name": "HTML",
"bytes": "225000"
},
{
"name": "JavaScript",
"bytes": "1807027"
},
{
"name": "Mako",
"bytes": "642435"
},
{
"name": "Python",
"bytes": "7499660"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from django.utils import timezone
# Django stuff
from Instanssi.kompomaatti.misc.events import get_upcoming
from Instanssi.screenshow.models import IRCMessage
from Instanssi.kompomaatti.models import Event
def django_get_event(event_id):
try:
return Event.objects.get(pk=event_id)
except Event.DoesNotExist:
return None
def django_get_upcoming(event):
return get_upcoming(event)[:5]
def django_log_cleanup():
limit = 50
n = 0
last_id = 0
for msg in IRCMessage.objects.all().order_by('-id'):
last_id = msg.id
if n >= limit:
break
n += 1
IRCMessage.objects.filter(id__lt=last_id).delete()
def django_log_add(user, msg, event_id):
try:
message = IRCMessage()
message.event_id = event_id
message.date = timezone.now()
message.message = msg
message.nick = user
message.save()
except UnicodeDecodeError:
return False
return True
|
{
"content_hash": "0da62b04ce9432dad9529a8d80810ca5",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 58,
"avg_line_length": 23,
"alnum_prop": 0.6390293225480284,
"repo_name": "Instanssi/KompomaattiBot",
"id": "f39d0c75092ac355c35c58e8305a9bbd78ea3f4e",
"size": "1026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/django_integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2864"
}
],
"symlink_target": ""
}
|
from __future__ import (
absolute_import, division, print_function, with_statement
)
from contextlib import contextmanager
from gevent import GreenletExit
@contextmanager
def ignored(*exceptions):
try:
yield
except GreenletExit as e:
raise e
except exceptions:
pass
|
{
"content_hash": "500a2c0b3b2c5010faf7bf20192b3279",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 61,
"avg_line_length": 20.533333333333335,
"alnum_prop": 0.698051948051948,
"repo_name": "fcwu/docker-ubuntu-vnc-desktop",
"id": "e1198615918814eba657641e88693adee36cbd3b",
"size": "308",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "rootfs/usr/local/lib/web/backend/vnc/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "27187"
},
{
"name": "JavaScript",
"bytes": "25197"
},
{
"name": "Makefile",
"bytes": "1685"
},
{
"name": "Python",
"bytes": "18027"
},
{
"name": "Shell",
"bytes": "2973"
},
{
"name": "Vue",
"bytes": "6771"
}
],
"symlink_target": ""
}
|
"""Helper module for uploading solve status posts to SolveTracker repository."""
import json
import datetime
from util.githandler import GitHandler
from util.ctf_template_resolver import resolve_ctf_template, resolve_stats_template
from util.loghandler import log
from bottypes.invalid_command import InvalidCommand
ST_GIT_CONFIG = {}
ST_GIT_SUPPORT = False
def post_ctf_data(ctf, title):
"""Create a post and a statistic file and upload it to the configured SolveTracker repository."""
if not ST_GIT_SUPPORT:
raise Exception("Sorry, but the SolveTracker support isn't configured...")
try:
now = datetime.datetime.now()
post_data = resolve_ctf_template(ctf, title, "./templates/post_ctf_template",
"./templates/post_challenge_template")
post_filename = "_posts/{}-{}-{}-{}.md".format(now.year, now.month, now.day, ctf.name)
stat_data = resolve_stats_template(ctf)
stat_filename = "_stats/{}.json".format(ctf.name)
git = GitHandler(ST_GIT_CONFIG.get("git_repopath"))
git.add_file(post_data, post_filename)
git.add_file(stat_data, stat_filename)
git.commit("Solve post from {}".format(ctf.name))
git.push(ST_GIT_CONFIG.get("git_repouser"), ST_GIT_CONFIG.get("git_repopass"),
ST_GIT_CONFIG.get("git_remoteuri"), ST_GIT_CONFIG.get("git_branch"))
return ST_GIT_CONFIG.get("git_baseurl")
except InvalidCommand as invalid_cmd:
# Just pass invalid commands on
raise invalid_cmd
except Exception:
log.exception("SolvePostHelper")
raise InvalidCommand(
"Something with your configuration files doesn't seem to be correct. Please check your logfiles...")
def init_solvetracker_config():
"""Initialize the SolveTracker configuration or disable SolveTracker support if config file doesn't exist."""
try:
with open("./config/config_solvetracker.json") as f:
return json.load(f), True
except (IOError, FileNotFoundError) as e:
log.info("Solvetracker configuration couldn't be loaded: %s. Deactivating SolveTracker...", e)
return None, False
ST_GIT_CONFIG, ST_GIT_SUPPORT = init_solvetracker_config()
|
{
"content_hash": "49fcb1ad313f4c118daf03ebb922020b",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 113,
"avg_line_length": 38.42372881355932,
"alnum_prop": 0.6704896338773709,
"repo_name": "OpenToAllCTF/OTA-Challenge-Bot",
"id": "bd022283ef748de5921f4479428dd2970a04acf9",
"size": "2267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/solveposthelper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "214"
},
{
"name": "Makefile",
"bytes": "533"
},
{
"name": "Python",
"bytes": "125037"
}
],
"symlink_target": ""
}
|
import re
def commands():
"""Returns the list of commands that this plugin handles.
"""
return [
{
'type':'text',
'triggers':[re.compile('.*')],
'field':'text'
}
]
def process(status):
return "%s: %s" %(status['user']['screen_name'], status['text'])
|
{
"content_hash": "9105d28793a2fb34c6abbf09057047a7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 22.5,
"alnum_prop": 0.4388888888888889,
"repo_name": "sengupta/spritzbot",
"id": "3645056244801ad29680c2574a0e89811460e0fa",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spritzbot/plugins/mentions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common import utils as utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.db2 import system
from trove.guestagent.datastore import service
from trove.guestagent.db import models
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IGNORE_USERS_LIST = CONF.db2.ignore_users
class DB2App(object):
"""
Handles installation and configuration of DB2
on a Trove instance.
"""
def __init__(self, status, state_change_wait_time=None):
LOG.debug("Initialize DB2App.")
self.state_change_wait_time = (
state_change_wait_time if state_change_wait_time else
CONF.state_change_wait_time
)
LOG.debug("state_change_wait_time = %s." % self.state_change_wait_time)
self.status = status
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def change_ownership(self, mount_point):
"""
When DB2 server instance is installed, it does not have the
DB2 local database directory created (/home/db2inst1/db2inst1).
This gets created when we mount the cinder volume. So we need
to change ownership of this directory to the DB2 instance user
- db2inst1.
"""
LOG.debug("Changing ownership of the DB2 data directory.")
try:
operating_system.chown(mount_point,
system.DB2_INSTANCE_OWNER,
system.DB2_INSTANCE_OWNER,
recursive=False, as_root=True)
except exception.ProcessExecutionError:
raise RuntimeError(_(
"Command to change ownership of DB2 data directory failed."))
def _enable_db_on_boot(self):
LOG.debug("Enable DB on boot.")
try:
run_command(system.ENABLE_AUTOSTART)
except exception.ProcessExecutionError:
raise RuntimeError(_(
"Command to enable DB2 server on boot failed."))
def _disable_db_on_boot(self):
LOG.debug("Disable DB2 on boot.")
try:
run_command(system.DISABLE_AUTOSTART)
except exception.ProcessExecutionError:
raise RuntimeError(_(
"Command to disable DB2 server on boot failed."))
def start_db_with_conf_changes(self, config_contents):
'''
Will not be implementing configuration change API for DB2 in
the Kilo release. Currently all that this method does is to start
the DB2 server without any configuration changes. Looks like
this needs to be implemented to enable volume resize on the guest
agent side.
'''
LOG.info(_("Starting DB2 with configuration changes."))
self.start_db(True)
def start_db(self, update_db=False):
LOG.debug("Start the DB2 server instance.")
self._enable_db_on_boot()
try:
run_command(system.START_DB2)
except exception.ProcessExecutionError:
pass
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start of DB2 server instance failed."))
self.status.end_install_or_restart()
raise RuntimeError(_("Could not start DB2."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.debug("Stop the DB2 server instance.")
if do_not_start_on_reboot:
self._disable_db_on_boot()
try:
run_command(system.STOP_DB2)
except exception.ProcessExecutionError:
pass
if not (self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db)):
LOG.error(_("Could not stop DB2."))
self.status.end_install_or_restart()
raise RuntimeError(_("Could not stop DB2."))
def restart(self):
LOG.debug("Restarting DB2 server instance.")
try:
self.status.begin_restart()
self.stop_db()
self.start_db()
finally:
self.status.end_install_or_restart()
class DB2AppStatus(service.BaseDbStatus):
"""
Handles all of the status updating for the DB2 guest agent.
"""
def _get_actual_db_status(self):
LOG.debug("Getting the status of the DB2 server instance.")
try:
out, err = utils.execute_with_timeout(
system.DB2_STATUS, shell=True)
if "0" not in out:
return rd_instance.ServiceStatuses.RUNNING
else:
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
LOG.exception(_("Error getting the DB2 server status."))
return rd_instance.ServiceStatuses.CRASHED
def run_command(command, superuser=system.DB2_INSTANCE_OWNER,
timeout=system.TIMEOUT):
return utils.execute_with_timeout("sudo", "su", "-", superuser, "-c",
command, timeout=timeout)
class DB2Admin(object):
"""
Handles administrative tasks on the DB2 instance.
"""
def create_database(self, databases):
"""Create the given database(s)."""
dbName = None
db_create_failed = []
LOG.debug("Creating DB2 databases.")
for item in databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(item)
dbName = mydb.name
LOG.debug("Creating DB2 database: %s." % dbName)
try:
run_command(system.CREATE_DB_COMMAND % {'dbname': dbName})
except exception.ProcessExecutionError:
LOG.exception(_(
"There was an error creating database: %s.") % dbName)
db_create_failed.append(dbName)
pass
if len(db_create_failed) > 0:
LOG.exception(_("Creating the following databases failed: %s.") %
db_create_failed)
def delete_database(self, database):
"""Delete the specified database."""
dbName = None
try:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
dbName = mydb.name
LOG.debug("Deleting DB2 database: %s." % dbName)
run_command(system.DELETE_DB_COMMAND % {'dbname': dbName})
except exception.ProcessExecutionError:
LOG.exception(_(
"There was an error while deleting database:%s.") % dbName)
raise exception.GuestError(_("Unable to delete database: %s.") %
dbName)
def list_databases(self, limit=None, marker=None, include_marker=False):
LOG.debug("Listing all the DB2 databases.")
databases = []
next_marker = None
try:
out, err = run_command(system.LIST_DB_COMMAND)
dblist = out.split()
result = iter(dblist)
count = 0
if marker is not None:
try:
item = result.next()
while item != marker:
item = result.next()
if item == marker:
marker = None
except StopIteration:
pass
try:
item = result.next()
while item:
count = count + 1
if (limit and count <= limit) or limit is None:
db2_db = models.MySQLDatabase()
db2_db.name = item
LOG.debug("database = %s ." % item)
db2_db.character_set = None
db2_db.collate = None
next_marker = db2_db.name
databases.append(db2_db.serialize())
item = result.next()
else:
next_marker = None
break
except StopIteration:
next_marker = None
LOG.debug("databases = %s." % str(databases))
except exception.ProcessExecutionError as pe:
LOG.exception(_("An error occured listing databases: %s.") %
pe.message)
pass
return databases, next_marker
def create_user(self, users):
LOG.debug("Creating user(s) for accessing DB2 database(s).")
try:
for item in users:
user = models.MySQLUser()
user.deserialize(item)
try:
LOG.debug("Creating OS user: %s." % user.name)
utils.execute_with_timeout(
system.CREATE_USER_COMMAND % {
'login': user.name, 'login': user.name,
'passwd': user.password}, shell=True)
except exception.ProcessExecutionError as pe:
LOG.exception(_("Error creating user: %s.") % user.name)
continue
for database in user.databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
try:
LOG.debug("Granting user: %s access to database: %s."
% (user.name, mydb.name))
run_command(system.GRANT_USER_ACCESS % {
'dbname': mydb.name, 'login': user.name})
except exception.ProcessExecutionError as pe:
LOG.debug(
"Error granting user: %s access to database: %s."
% (user.name, mydb.name))
LOG.debug(pe)
pass
except exception.ProcessExecutionError as pe:
LOG.exception(_("An error occured creating users: %s.") %
pe.message)
pass
def delete_user(self, user):
LOG.debug("Delete a given user.")
db2_user = models.MySQLUser()
db2_user.deserialize(user)
userName = db2_user.name
user_dbs = db2_user.databases
LOG.debug("For user %s, databases to be deleted = %r." % (
userName, user_dbs))
if len(user_dbs) == 0:
databases = self.list_access(db2_user.name, None)
else:
databases = user_dbs
LOG.debug("databases for user = %r." % databases)
for database in databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
try:
run_command(system.REVOKE_USER_ACCESS % {
'dbname': mydb.name,
'login': userName})
LOG.debug("Revoked access for user:%s on database:%s." % (
userName, mydb.name))
except exception.ProcessExecutionError as pe:
LOG.debug("Error occured while revoking access to %s." %
mydb.name)
pass
try:
utils.execute_with_timeout(system.DELETE_USER_COMMAND % {
'login': db2_user.name.lower()}, shell=True)
except exception.ProcessExecutionError as pe:
LOG.exception(_(
"There was an error while deleting user: %s.") % pe)
raise exception.GuestError(_("Unable to delete user: %s.") %
userName)
def list_users(self, limit=None, marker=None, include_marker=False):
LOG.debug(
"List all users for all the databases in a DB2 server instance.")
users = []
user_map = {}
next_marker = None
count = 0
databases, marker = self.list_databases()
for database in databases:
db2_db = models.MySQLDatabase()
db2_db.deserialize(database)
out = None
try:
out, err = run_command(
system.LIST_DB_USERS % {'dbname': db2_db.name})
except exception.ProcessExecutionError:
LOG.debug(
"There was an error while listing users for database: %s."
% db2_db.name)
continue
userlist = []
for item in out.split('\n'):
LOG.debug("item = %r" % item)
user = item.split() if item != "" else None
LOG.debug("user = %r" % (user))
if user is not None and user[0] not in IGNORE_USERS_LIST \
and user[1] == 'Y':
userlist.append(user[0])
result = iter(userlist)
if marker is not None:
try:
item = result.next()
while item != marker:
item = result.next()
if item == marker:
marker = None
except StopIteration:
pass
try:
item = result.next()
db2db = models.MySQLDatabase()
db2db.name = db2_db.name
while item:
'''
Check if the user has already been discovered. If so,
add this database to the database list for this user.
'''
if item in user_map:
db2user = user_map.get(item)
db2user.databases.append(db2db.serialize())
item = result.next()
continue
'''
If this user was not previously discovered, then add
this to the user's list.
'''
count = count + 1
if (limit and count <= limit) or limit is None:
db2_user = models.MySQLUser()
db2_user.name = item
db2_user.databases.append(db2db.serialize())
users.append(db2_user.serialize())
user_map.update({item: db2_user})
item = result.next()
else:
next_marker = None
break
except StopIteration:
next_marker = None
if count == limit:
break
return users, next_marker
def get_user(self, username, hostname):
LOG.debug("Get details of a given database user.")
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
LOG.debug("Get details of a given database user %s." % username)
user = models.MySQLUser()
user.name = username
databases, marker = self.list_databases()
out = None
for database in databases:
db2_db = models.MySQLDatabase()
db2_db.deserialize(database)
try:
out, err = run_command(
system.LIST_DB_USERS % {'dbname': db2_db.name})
except exception.ProcessExecutionError:
LOG.debug(
"Error while trying to get the users for database: %s." %
db2_db.name)
continue
for item in out.split('\n'):
user_access = item.split() if item != "" else None
if (user_access is not None and
user_access[0].lower() == username.lower() and
user_access[1] == 'Y'):
user.databases = db2_db.name
break
return user
def list_access(self, username, hostname):
"""
Show all the databases to which the user has more than
USAGE granted.
"""
LOG.debug("Listing databases that user: %s has access to." % username)
user = self._get_user(username, hostname)
return user.databases
|
{
"content_hash": "99fe8dcca211ea6651d640e31517f294",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 79,
"avg_line_length": 39.06588235294117,
"alnum_prop": 0.5191230500511955,
"repo_name": "cp16net/trove",
"id": "eccc8175f84b5cce9c7f08e37fb15ec03b1e3ac8",
"size": "17228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove/guestagent/datastore/experimental/db2/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60526"
},
{
"name": "Python",
"bytes": "2872713"
},
{
"name": "Shell",
"bytes": "15002"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
}
|
"""
Created on Thu Jul 27 10:23:21 2017
@author: casari
"""
import sys, os, time, glob, serial
class SerialPort:
def __init__(self):
self.serial = serial.Serial()
self.serial.baudrate = 9600
self.portnames = []
self.timeout = 0.1
## Find all ports
self.find_available_ports()
def setPort(self,port):
self.serial.port = port
def setBaud(self,baud):
self.serial.baudrate = baud
def setParity(self,parity):
self.serial.parity = parity
def setBytesize(self,bytesize):
self.serial.bytesize = bytesize
def setStopbits(self,stopbits):
self.serial.stopbits = stopbits
def setTimeout(self,timeout):
self.serial.timeout = timeout
def start(self):
self.serial.open()
if self.serial.is_open == True:
print("Connected!")
else:
print("Unable to Connect")
def stop(self):
self.serial.close()
def read(self):
self.buffer = self.serial.read_all()
def write(self,value):
self.serial.write(value)
def find_available_ports(self):
if sys.platform.startswith('win'):
ports = ['COM%s' % (i+1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported Platform')
del(self.portnames)
self.portnames = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
self.portnames.append(port)
except (OSError,serial.SerialException):
pass
if __name__ == "__main__":
Port = SerialPort()
|
{
"content_hash": "9464bac10103559486f6f2af2e2e5e51",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 83,
"avg_line_length": 24.129411764705882,
"alnum_prop": 0.5163334958556801,
"repo_name": "MCasari-PMEL/EDD-ICMGUI",
"id": "6cd9ec19f52de226d86ebff8ea875ec05d2843bf",
"size": "2075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icm/port.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80531"
}
],
"symlink_target": ""
}
|
import os
from numpy import array,zeros,ndarray,around,sqrt,arange
from numpy.linalg import norm
from developer import DevBase
from generic import obj
from structure import Structure
from plotting import *
class DensityAnalyzer(DevBase):
def __init__(self,densfile=None,denserrfile=None,transpose=False):
density = None
density_error = None
corner = None
cell = None
grid = None
structure = None
if densfile!=None:
self.read(densfile,denserrfile,transpose)
#end if
#end def __init__
def read(self,densfile,denserrfile=None,transpose=False):
if not os.path.exists(densfile):
self.error('density file {0} does not exist'.format(densfile))
#end if
ext = densfile.rsplit('.',1)[1]
if denserrfile!=None:
if not os.path.exists(denserrfile):
self.error('density error file {0} does not exist'.format(denserrfile))
elif densfile.rsplit('.',1)[1]!=ext:
self.error('file extensions must match\n density file extension: {0}\n density error file extension: {1}'.format(ext,densfile.rsplit('.',1)[1]))
#end if
if ext=='xsf':
self.read_xsf(densfile,transpose)
if denserrfile!=None:
self.read_xsf(denserrfile,transpose,error=True)
#end if
else:
self.error('file format {0} is not yet supported'.format(ext))
#end if
if tuple(self.grid)!=self.density.shape:
self.error('density shape does not match grid\n density.shape = {0}\n grid = {1}'.format(seld.density.shape,self.grid))
#end if
#end def read
def read_xsf(self,densfile,transpose=False,error=False):
primvec = None
natoms = None
elem = None
pos = None
grid = None
corner = None
cell = None
dens = None
lines = open(densfile,'r').read().splitlines()
i=0
while(i<len(lines)):
line = lines[i].strip()
if line=='PRIMVEC':
primvec = array((lines[i+1]+' '+
lines[i+2]+' '+
lines[i+3]).split(),dtype=float)
primvec.shape = 3,3
i+=3
elif line=='PRIMCOORD':
natoms = int(lines[i+1].split()[0])
elem = []
pos = []
for iat in range(natoms):
tokens = lines[i+2+iat].split()
elem.append(tokens[0])
pos.extend(tokens[1:])
#end for
pos = array(pos,dtype=float)
pos.shape = natoms,3
elem = elem
pos = pos
i+=natoms+1
elif line.startswith('DATAGRID_3D'):
grid = array(lines[i+1].split(),dtype=int)
corner = array(lines[i+2].split(),dtype=float)
cell = array((lines[i+3]+' '+
lines[i+4]+' '+
lines[i+5]).split(),dtype=float)
cell.shape = 3,3
i+=6
dstr = ''
line = lines[i].strip()
while line!='END_DATAGRID_3D':
dstr += line+' '
i+=1
line = lines[i].strip()
#end while
dens = array(dstr.split(),dtype=float)
if transpose:
dtrans = zeros(dens.shape)
gdims = grid.copy()
gdims[0] = grid[1]*grid[2]
gdims[1] = grid[2]
gdims[2] = 1
nd=0
for k in xrange(grid[2]):
for j in xrange(grid[1]):
for i in xrange(grid[0]):
p = i*gdims[0]+j*gdims[1]+k*gdims[2]
dtrans[nd] = dens[p]
#end for
#end for
#end for
dens = dtrans
#end if
dens.shape = tuple(grid)
#end if
i+=1
#end while
v=obj(primvec=primvec,natoms=natoms,elem=elem,pos=pos,
grid=grid,corner=corner,cell=cell,dens=dens)
for name,val in v.iteritems():
if val is None:
self.error(name+' not found in xsf file '+densfile)
#end if
#end for
if not error:
self.set(
corner = corner,
cell = cell,
grid = grid,
density = dens,
structure = Structure(
axes = primvec,
elem = elem,
pos = pos,
units = 'A'
)
)
else:
self.density_error = dens
#end if
#end def read_xsf
def sum(self):
return self.density.sum()
#end def sum
def normalize(self,norm):
f = norm/self.sum()
self.density*=f
if self.density_error!=None:
self.density_error*=f
#end if
#end def normalize
def assert_shape(self,other):
if self.density.shape!=other.density.shape:
self.error('two density grids are not the same shape')
#end if
#end def assert_shape
def __add__(self,other):
self.assert_shape(other)
s = self.copy()
s.density += other.density
if s.density_error!=None and other.density_error!=None:
s.density_error = sqrt(s.density_error**2+other.density_error**2)
#end if
return s
#end def __add__
def __sub__(self,other):
self.assert_shape(other)
s = self.copy()
s.density -= other.density
if s.density_error!=None and other.density_error!=None:
s.density_error = sqrt(s.density_error**2+other.density_error**2)
#end if
return s
#end def __sub__
def integrate(self,xr=(0,1),yr=(0,1),zr=(0,1)):
nx,ny,nz = self.grid
xr = array(around(array(xr,dtype=float)*nx),dtype=int)
yr = array(around(array(yr,dtype=float)*ny),dtype=int)
zr = array(around(array(zr,dtype=float)*nz),dtype=int)
ds = self.density[xr[0]:xr[1],yr[0]:yr[1],zr[0]:zr[1]].sum()
if self.density_error is None:
return ds
else:
dse = sqrt((self.density_error[xr[0]:xr[1],yr[0]:yr[1],zr[0]:zr[1]]**2).sum())
return ds,dse
#end if
#end def integrate
def project_line(self,xr=None,yr=None,zr=None):
if (xr==None) + (yr==None) + (zr==None) != 1:
self.error('to project onto a line, two ranges must be given to sum over')
#end if
if xr is None:
d = 0
xr = 0,1
elif yr is None:
d = 1
yr = 0,1
elif zr is None:
d = 2
zr = 0,1
#end if
r = norm(self.cell[d])/self.grid[d]*arange(self.grid[d])
ds = [0,1,2]
ds.pop(d)
ds.reverse()
ds = tuple(ds)
nx,ny,nz = self.grid
xr = array(around(array(xr,dtype=float)*nx),dtype=int)
yr = array(around(array(yr,dtype=float)*ny),dtype=int)
zr = array(around(array(zr,dtype=float)*nz),dtype=int)
lp = self.density[xr[0]:xr[1],yr[0]:yr[1],zr[0]:zr[1]]
for d in ds:
lp = lp.sum(axis=d)
#end for
if self.density_error is None:
return r,lp
else:
lpe = self.density_error[xr[0]:xr[1],yr[0]:yr[1],zr[0]:zr[1]]**2
for d in ds:
lpe = lpe.sum(axis=d)
#end for
lpe = sqrt(lpe)
return r,lp,lpe
#end if
#end def project_line
def plot_line(self,xr=None,yr=None,zr=None,fmt='b.-',label='line dens',disp=False,labels=False):
labels = labels or disp
if disp:
figure()
#end if
if self.density_error is None:
r,lp = self.project_line(xr,yr,zr)
plot(r,lp,fmt,label=label)
else:
r,lp,lpe = self.project_line(xr,yr,zr)
errorbar(r,lp,lpe,fmt=fmt,label=label)
#end if
if labels:
xlabel('r (Angstrom)')
ylabel('density')
title('Line projection of density')
#end if
if disp:
show()
#end if
#end def plot_line
#end class DensityAnalyzer
|
{
"content_hash": "48ebe235d30b0139d330cd92f4029046",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 162,
"avg_line_length": 33.06792452830189,
"alnum_prop": 0.4728974095629351,
"repo_name": "habanero-rice/hclib",
"id": "538f17aa6bef958c4aa3d7e5d56b2259a53f1d0d",
"size": "8764",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/performance-regression/full-apps/qmcpack/nexus/library/density_analyzer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "64597"
},
{
"name": "C",
"bytes": "10643011"
},
{
"name": "C++",
"bytes": "15721482"
},
{
"name": "CMake",
"bytes": "257955"
},
{
"name": "CSS",
"bytes": "20536"
},
{
"name": "Cuda",
"bytes": "630404"
},
{
"name": "Fortran",
"bytes": "260512"
},
{
"name": "HTML",
"bytes": "470710"
},
{
"name": "M4",
"bytes": "4028"
},
{
"name": "MATLAB",
"bytes": "6509"
},
{
"name": "Makefile",
"bytes": "260753"
},
{
"name": "Objective-C",
"bytes": "1671681"
},
{
"name": "Perl",
"bytes": "183420"
},
{
"name": "PostScript",
"bytes": "4546458"
},
{
"name": "Python",
"bytes": "1734658"
},
{
"name": "Raku",
"bytes": "183"
},
{
"name": "Roff",
"bytes": "5051370"
},
{
"name": "Shell",
"bytes": "113750"
},
{
"name": "TeX",
"bytes": "205379"
},
{
"name": "xBase",
"bytes": "5062"
}
],
"symlink_target": ""
}
|
from ftplib import FTP
import StringIO
ftp = FTP('hanna.ccmc.gsfc.nasa.gov') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('pub/FlareScoreboard/in/UFCORIN_1') # change into "debian" directory
str_file = StringIO.StringIO("hello")
#ftp.storlines('STOR test.txt', str_file)
ftp.delete('test.txt')
ftp.quit()
# from ftplib import FTP_TLS
# ftps = FTP_TLS('ftp://hanna.ccmc.gsfc.nasa.gov')
#
# /pub/FlareScoreboard/in/UFCORIN_1/
#
# ftps.login() # login anonymously before securing control channel
# ftps.prot_p() # switch to secure data connection
# ftps.retrlines('LIST') # list directory content securely
# ftps.quit()
|
{
"content_hash": "327e37fe307f431765f82e09823ac697",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 90,
"avg_line_length": 35.95,
"alnum_prop": 0.674547983310153,
"repo_name": "nushio3/UFCORIN",
"id": "c9f0947e29d43c676550b238b83fc6a8b572166a",
"size": "742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/test-ccmc-ftp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3856"
},
{
"name": "Gnuplot",
"bytes": "4440"
},
{
"name": "Haskell",
"bytes": "133616"
},
{
"name": "Makefile",
"bytes": "417"
},
{
"name": "Perl",
"bytes": "66"
},
{
"name": "Python",
"bytes": "263435"
},
{
"name": "Ruby",
"bytes": "351"
},
{
"name": "Shell",
"bytes": "21988"
},
{
"name": "TeX",
"bytes": "5512"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django import shortcuts
import django.views.decorators.vary
import horizon
from horizon import base
from horizon import exceptions
from horizon import notifications
MESSAGES_PATH = getattr(settings, 'MESSAGES_PATH', None)
def get_user_home(user):
dashboard = None
if user.is_superuser:
try:
dashboard = horizon.get_dashboard('admin')
except base.NotRegistered:
pass
if dashboard is None:
dashboard = horizon.get_default_dashboard()
return dashboard.get_absolute_url()
@django.views.decorators.vary.vary_on_cookie
def splash(request):
if not request.user.is_authenticated():
raise exceptions.NotAuthenticated()
response = shortcuts.redirect(horizon.get_user_home(request.user))
if 'logout_reason' in request.COOKIES:
response.delete_cookie('logout_reason')
# Display Message of the Day message from the message files
# located in MESSAGES_PATH
if MESSAGES_PATH:
notifications.process_message_notification(request, MESSAGES_PATH)
return response
|
{
"content_hash": "013cb1bf908d8ea864342bbb0d8a71d5",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 27.7,
"alnum_prop": 0.7184115523465704,
"repo_name": "davidcusatis/horizon",
"id": "365e47a4eb5e965965d7a96806d4682e68d2189f",
"size": "1713",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "91465"
},
{
"name": "HTML",
"bytes": "468841"
},
{
"name": "JavaScript",
"bytes": "1420582"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4813319"
},
{
"name": "Shell",
"bytes": "19004"
}
],
"symlink_target": ""
}
|
import bluebottle.utils.fields
import bluebottle.utils.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members', '0045_auto_20210708_1020'),
]
operations = [
migrations.AlterField(
model_name='member',
name='campaign_notifications',
field=models.BooleanField(default=True, help_text='Updates from initiatives and activities that this person follows', verbose_name='Updates'),
),
migrations.AlterField(
model_name='member',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='geo.Location', verbose_name='Office'),
),
migrations.AlterField(
model_name='member',
name='picture',
field=bluebottle.utils.fields.ImageField(blank=True, upload_to='profiles', validators=[bluebottle.utils.validators.FileMimetypeValidator(['image/png', 'image/jpeg', 'image/gif', 'image/svg+xml'], None, 'invalid_mimetype'), bluebottle.utils.validators.validate_file_infection], verbose_name='picture'),
),
migrations.AlterField(
model_name='member',
name='segments',
field=models.ManyToManyField(blank=True, related_name='users', to='segments.Segment', verbose_name='Segment'),
),
migrations.AlterField(
model_name='member',
name='subscribed',
field=models.BooleanField(default=False, help_text="Monthly overview of activities that match this person's profile", verbose_name='Matching'),
),
migrations.AlterField(
model_name='member',
name='updated',
field=models.DateTimeField(auto_now=True, verbose_name='updated'),
),
migrations.AlterField(
model_name='member',
name='user_type',
field=models.CharField(choices=[('person', 'Person'), ('company', 'Company'), ('foundation', 'Foundation'), ('school', 'School'), ('group', 'Club / association')], default='person', max_length=25, verbose_name='Member Type'),
),
migrations.AlterField(
model_name='member',
name='verified',
field=models.BooleanField(blank=True, default=False, help_text='Was verified for voting by recaptcha.'),
),
migrations.AlterField(
model_name='memberplatformsettings',
name='background',
field=models.ImageField(blank=True, null=True, upload_to='site_content/', validators=[bluebottle.utils.validators.FileMimetypeValidator(['image/png', 'image/jpeg', 'image/gif', 'image/svg+xml'], None, 'invalid_mimetype'), bluebottle.utils.validators.validate_file_infection]),
),
migrations.AlterField(
model_name='memberplatformsettings',
name='consent_link',
field=models.CharField(default='/pages/terms-and-conditions', help_text='Link more information about the platforms policy', max_length=255),
),
]
|
{
"content_hash": "d34caa8140f2e3a368322fd7030c4eb1",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 313,
"avg_line_length": 49.1875,
"alnum_prop": 0.6331003811944091,
"repo_name": "onepercentclub/bluebottle",
"id": "4b6ddd276d1338301d7b85bf778eb621f22a6542",
"size": "3198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/members/migrations/0046_auto_20211012_1242.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
from PyInstaller.hooks.hookutils import eval_statement
hiddenimports = ["PyQt5.QtCore",
"PyQt5.QtWidgets",
"PyQt5.QtGui",
"PyQt5.QtSvg"]
if eval_statement("from PyQt5 import Qwt5; print hasattr(Qwt5, 'toNumpy')"):
hiddenimports.append("numpy")
if eval_statement("from PyQt5 import Qwt5; print hasattr(Qwt5, 'toNumeric')"):
hiddenimports.append("Numeric")
if eval_statement("from PyQt5 import Qwt5; print hasattr(Qwt5, 'toNumarray')"):
hiddenimports.append("numarray")
|
{
"content_hash": "1a99703f34f94289397ab4714bc1423a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 41,
"alnum_prop": 0.6735459662288931,
"repo_name": "timeyyy/PyUpdater",
"id": "22747a703da37a6a528094fe22a40872948fe2f0",
"size": "938",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "pyupdater/vendor/PyInstaller/hooks/hook-PyQt5.Qwt5.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1841"
},
{
"name": "Makefile",
"bytes": "631"
},
{
"name": "PowerShell",
"bytes": "5986"
},
{
"name": "Python",
"bytes": "554777"
}
],
"symlink_target": ""
}
|
from anvil import Anvil
from anvil.entities import KilnRepo
def main():
anvil = Anvil("spectrum")
anvil.create_session_by_prompting()
res = anvil.get_json("/Repo/68219")
repo = KilnRepo.from_json(anvil, res)
subrepos = repo.where_used()
if __name__ == '__main__':
main()
|
{
"content_hash": "8aee301b27d007a831c6c278fa5134ea",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 41,
"avg_line_length": 24.75,
"alnum_prop": 0.6464646464646465,
"repo_name": "luigiberrettini/Anvil",
"id": "5af9fc408d2d867e3e6628f4cd5dc2a1a76eebb4",
"size": "298",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "anvil/examples/repo_where_used.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27921"
}
],
"symlink_target": ""
}
|
import pandas
import os
import utilities
def summarize_flight_history_day(base_day_path, test_day_path = None):
path = os.path.join(base_day_path, "FlightHistory", "flighthistory.csv")
flighthistory = pandas.read_csv(path)
print "In file %s:" % path
print "Number of flight_history_ids: %d, Number unique: %d" % (len(flighthistory.flight_history_id), len(set(flighthistory.flight_history_id)))
date_fields = [
'published_departure',
'published_arrival',
'scheduled_gate_departure',
'actual_gate_departure',
'scheduled_gate_arrival',
'actual_gate_arrival',
'scheduled_runway_departure',
'actual_runway_departure',
'scheduled_runway_arrival',
'actual_runway_arrival',
]
map(lambda field: utilities.summarize_date_field(flighthistory, field), date_fields)
path = os.path.join(base_day_path, "FlightHistory", "flighthistoryevents.csv")
flighthistoryevents = pandas.read_csv(path)
print "Now looking at FlightHistoryEvents"
print path
print "%d rows" % len(flighthistoryevents.date_time_recorded)
utilities.summarize_date_field(flighthistoryevents, "date_time_recorded")
if test_day_path:
print "\n"
summarize_flight_history_day(test_day_path)
else:
print "\n\n"
def summarize_asdi_day(base_day_path, test_day_path = None):
asdi_path = os.path.join(base_day_path, "ASDI")
flightplan = pandas.read_csv(os.path.join(asdi_path, "asdiflightplan.csv"))
print "%d flightplans, %d unique flightplanids, %d distinct flights" % (len(flightplan.asdiflightplanid), len(set(flightplan.asdiflightplanid)), len(set(flightplan.flighthistoryid)))
utilities.summarize_date_field(flightplan, "updatetimeutc")
if test_day_path:
print "\n"
summarize_asdi_day(test_day_path)
else:
print "\n\n"
if __name__=="__main__":
summarize_flight_history_day('''C:\Users\david\Dropbox\GEFlight\Release 1\InitialTrainingSet_rev1\\2012_11_19''',
'''C:\Users\david\Dropbox\GEFlight\Release 1\SampleTestSet\\2012_11_19''')
summarize_asdi_day('''C:\Users\david\Dropbox\GEFlight\Release 1\InitialTrainingSet_rev1\\2012_11_19''',
'''C:\Users\david\Dropbox\GEFlight\Release 1\SampleTestSet\\2012_11_19''')
|
{
"content_hash": "5dea13e682172ad28187ddf5f11e6db5",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 183,
"avg_line_length": 37.875,
"alnum_prop": 0.7392739273927392,
"repo_name": "benhamner/GEFlightQuest",
"id": "351875757925cb204be23148b9f90041aeb8215b",
"size": "2121",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PythonModule/geflight/summarize/summarize_flight.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "102510"
},
{
"name": "Shell",
"bytes": "47"
}
],
"symlink_target": ""
}
|
import os
import sys
import imp
from .collection import Collection
from .exceptions import CollectionNotFound
from .util import debug
DEFAULT_COLLECTION_NAME = 'tasks'
class Loader(object):
"""
Abstract class defining how to load a session's base `.Collection`.
"""
def find(self, name):
"""
Implementation-specific finder method seeking collection ``name``.
Must return a 4-tuple valid for use by `imp.load_module`, which is
typically a name string followed by the contents of the 3-tuple
returned by `imp.find_module` (``file``, ``pathname``,
``description``.)
For a sample implementation, see `.FilesystemLoader`.
"""
raise NotImplementedError
def load(self, name=DEFAULT_COLLECTION_NAME):
"""
Load and return collection identified by ``name``.
This method requires a working implementation of `.find` in order to
function.
In addition to importing the named module, it will add the module's
parent directory to the front of `sys.path` to provide normal Python
import behavior (i.e. so the loaded module may load local-to-it modules
or packages.)
"""
# Find the named tasks module, depending on implementation.
# Will raise an exception if not found.
fd, path, desc = self.find(name)
try:
# Ensure containing directory is on sys.path in case the module
# being imported is trying to load local-to-it names.
parent = os.path.dirname(path)
sys.path.insert(0, parent)
# Actual import
module = imp.load_module(name, fd, path, desc)
# Make a collection from it, and done
return Collection.from_module(module, loaded_from=parent)
finally:
# Ensure we clean up the opened file object returned by find(), if
# there was one (eg found packages, vs modules, don't open any
# file.)
if fd:
fd.close()
class FilesystemLoader(Loader):
"""
Loads Python files from the filesystem (e.g. ``tasks.py``.)
Searches recursively towards filesystem root from a given start point.
"""
def __init__(self, start=None):
self._start = start
@property
def start(self):
# Lazily determine default CWD
return self._start or os.getcwd()
def find(self, name):
# Accumulate all parent directories
start = self.start
debug("FilesystemLoader find starting at {0!r}".format(start))
parents = [os.path.abspath(start)]
parents.append(os.path.dirname(parents[-1]))
while parents[-1] != parents[-2]:
parents.append(os.path.dirname(parents[-1]))
# Make sure we haven't got duplicates on the end
if parents[-1] == parents[-2]:
parents = parents[:-1]
# Use find_module with our list of parents. ImportError from
# find_module means "couldn't find" not "found and couldn't import" so
# we turn it into a more obvious exception class.
try:
tup = imp.find_module(name, parents)
debug("Found module: {0!r}".format(tup[1]))
return tup
except ImportError:
raise CollectionNotFound(name=name, start=start)
|
{
"content_hash": "c1ca72ffd0bd727006cb0bce9f91bf23",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 35.463157894736845,
"alnum_prop": 0.6147224695755417,
"repo_name": "singingwolfboy/invoke",
"id": "7e37f4e050c39be238d0fbce92452f4226acb9a1",
"size": "3369",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "invoke/loader.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "876"
},
{
"name": "Python",
"bytes": "286205"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import re
import sys
from unittest import skipIf
import warnings
from xml.dom.minidom import parseString
try:
import pytz
except ImportError:
pytz = None
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models import Min, Max
from django.http import HttpRequest
from django.template import Context, RequestContext, Template, TemplateSyntaxError
from django.test import TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import six
from django.utils import timezone
from .forms import EventForm, EventSplitForm, EventLocalizedForm, EventModelForm, EventLocalizedModelForm
from .models import Event, MaybeEvent, Session, SessionEvent, Timestamp, AllDayEvent
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=UTC), dt)
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=ICT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@skipIf(pytz is None, "this test requires pytz")
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(TestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"- fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@skipIf(pytz is None, "this test requires pytz")
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@skipIf(pytz is None, "this test requires pytz")
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@skipIf(pytz is None, "this test requires pytz")
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.core.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
self.assertEqual(tpl.render(Context()), "")
self.assertEqual(tpl.render(RequestContext(HttpRequest())), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
fixtures = ['tz_users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
@override_settings(TIME_ZONE='Africa/Nairobi')
class UtilitiesTests(TestCase):
def test_make_aware(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
)
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 10, 20, 30), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
)
def test_make_naive(self):
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30)
)
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30)
)
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30)
)
|
{
"content_hash": "47b2f5a2a971475682bd3d733af14f4a",
"timestamp": "",
"source": "github",
"line_count": 1150,
"max_line_length": 144,
"avg_line_length": 47.89739130434783,
"alnum_prop": 0.6138121346356341,
"repo_name": "frederick-masterton/django",
"id": "d16c7f7fd27cbf3053185a5ffd4bb1ec437374f7",
"size": "55082",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/timezones/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import ctypes
import os, sys
# The plugin .so file has to be loaded at global scope and before `import torch` to avoid cuda version mismatch.
NMS_OPT_PLUGIN_LIBRARY="build/plugins/NMSOptPlugin/libnmsoptplugin.so"
if not os.path.isfile(NMS_OPT_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(NMS_OPT_PLUGIN_LIBRARY),
"Please build the NMS Opt plugin."
))
ctypes.CDLL(NMS_OPT_PLUGIN_LIBRARY)
import argparse
import json
import time
sys.path.insert(0, os.getcwd())
from code.common.runner import EngineRunner, get_input_format
from code.common import logging
import code.common.arguments as common_args
import numpy as np
import torch
import tensorrt as trt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def run_SSDResNet34_accuracy(engine_file, batch_size, num_images, verbose=False, output_file="build/out/SSDResNet34/dump.json"):
threshold = 0.20
runner = EngineRunner(engine_file, verbose=verbose)
input_dtype, input_format = get_input_format(runner.engine)
if input_dtype == trt.DataType.FLOAT:
format_string = "fp32"
elif input_dtype == trt.DataType.INT8:
if input_format == trt.TensorFormat.LINEAR:
format_string = "int8_linear"
elif input_format == trt.TensorFormat.CHW4:
format_string = "int8_chw4"
image_dir = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"),
"coco/val2017/SSDResNet34", format_string)
val_annotate = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"),
"coco/annotations/instances_val2017.json")
coco = COCO(annotation_file=val_annotate)
image_ids = coco.getImgIds()
cat_ids = coco.getCatIds()
# Class 0 is background
cat_ids.insert(0, 0)
num_images = min(num_images, len(image_ids))
logging.info("Running validation on {:} images. Please wait...".format(num_images))
coco_detections = []
batch_idx = 0
for image_idx in range(0, num_images, batch_size):
end_idx = min(image_idx + batch_size, num_images)
img = []
img_sizes = []
for idx in range(image_idx, end_idx):
image_id = image_ids[idx]
img.append(np.load(os.path.join(image_dir, coco.imgs[image_id]["file_name"] + ".npy")))
img_sizes.append([coco.imgs[image_id]["height"], coco.imgs[image_id]["width"]])
img = np.stack(img)
start_time = time.time()
[trt_detections] = runner([img], batch_size=batch_size)
if verbose:
logging.info("Batch {:d} >> Inference time: {:f}".format(batch_idx, time.time() - start_time))
for idx in range(0, end_idx - image_idx):
keep_count = trt_detections[idx * (200 * 7 + 1) + 200 * 7].view('int32')
trt_detections_batch = trt_detections[idx * (200 * 7 + 1):idx * (200 * 7 + 1) + keep_count * 7].reshape(keep_count, 7)
image_height = img_sizes[idx][0]
image_width = img_sizes[idx][1]
for prediction_idx in range(0, keep_count):
loc = trt_detections_batch[prediction_idx, [2, 1, 4, 3]]
label = trt_detections_batch[prediction_idx, 6]
score = float(trt_detections_batch[prediction_idx, 5])
bbox_coco_fmt = [
loc[0] * image_width,
loc[1] * image_height,
(loc[2] - loc[0]) * image_width,
(loc[3] - loc[1]) * image_height,
]
coco_detection = {
"image_id": image_ids[image_idx + idx],
"category_id": cat_ids[int(label)],
"bbox": bbox_coco_fmt,
"score": score,
}
coco_detections.append(coco_detection)
batch_idx += 1
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_file, "w") as f:
json.dump(coco_detections, f)
cocoDt = coco.loadRes(output_file)
eval = COCOeval(coco, cocoDt, 'bbox')
eval.params.imgIds = image_ids[:num_images]
eval.evaluate()
eval.accumulate()
eval.summarize()
map_score = eval.stats[0]
logging.info("Get mAP score = {:f} Target = {:f}".format(map_score, threshold))
return (map_score >= threshold * 0.99)
def main():
args = common_args.parse_args(common_args.ACCURACY_ARGS)
logging.info("Running accuracy test...")
run_SSDResNet34_accuracy(args["engine_file"], args["batch_size"], args["num_images"],
verbose=args["verbose"])
if __name__ == "__main__":
main()
|
{
"content_hash": "d3ee8b91c46704060bf1f21c9c07f366",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 130,
"avg_line_length": 37.22047244094488,
"alnum_prop": 0.6069388618574149,
"repo_name": "mlperf/inference_results_v0.5",
"id": "961e3c2d6d47fdd603d40595c7943534ea769747",
"size": "5361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "closed/NVIDIA/code/ssd-large/tensorrt/infer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3233"
},
{
"name": "C",
"bytes": "3952061"
},
{
"name": "C++",
"bytes": "4248758"
},
{
"name": "CMake",
"bytes": "74513"
},
{
"name": "CSS",
"bytes": "28485"
},
{
"name": "Cuda",
"bytes": "234319"
},
{
"name": "Dockerfile",
"bytes": "18506"
},
{
"name": "HTML",
"bytes": "2890"
},
{
"name": "Makefile",
"bytes": "76919"
},
{
"name": "Python",
"bytes": "1573121"
},
{
"name": "Shell",
"bytes": "151430"
}
],
"symlink_target": ""
}
|
import os
import os.path
import tempfile
import subprocess
def generate_credentials(ip, cakeystore=None, cacert=None):
tmpdir = tempfile.mkdtemp()
if not cakeystore:
cakeystore = generate_cakeypair(tmpdir, 'ca')
if not cacert:
cacert = generate_cert(tmpdir, "ca", cakeystore)
# create keystore with new private key
name = "ip" + ip
jkeystore = generate_ipkeypair(tmpdir, name, ip)
# create signed cert
csr = generate_sign_request(tmpdir, name, jkeystore, ['-ext', 'san=ip:' + ip])
cert = sign_request(tmpdir, "ca", cakeystore, csr, ['-ext', 'san=ip:' + ip])
# import cert chain into keystore
import_cert(tmpdir, "ca", cacert, jkeystore)
import_cert(tmpdir, name, cert, jkeystore)
return SecurityCredentials(jkeystore, cert, cakeystore, cacert)
def generate_cakeypair(dir, name):
return generate_keypair(dir, name, name, ['-ext', 'bc:c'])
def generate_ipkeypair(dir, name, ip):
return generate_keypair(dir, name, ip, ['-ext', 'san=ip:' + ip])
def generate_dnskeypair(dir, name, hostname):
return generate_keypair(dir, name, hostname, ['-ext', 'san=dns:' + hostname])
def generate_keypair(dir, name, cn, opts):
kspath = os.path.join(dir, name + '.keystore')
return _exec_keytool(dir, kspath, ['-alias', name, '-genkeypair', '-keyalg', 'RSA', '-dname',
"cn={}, ou=cassandra, o=apache.org, c=US".format(cn), '-keypass', 'cassandra'] + opts)
def generate_cert(dir, name, keystore, opts=[]):
fn = os.path.join(dir, name + '.pem')
_exec_keytool(dir, keystore, ['-alias', name, '-exportcert', '-rfc', '-file', fn] + opts)
return fn
def generate_sign_request(dir, name, keystore, opts=[]):
fn = os.path.join(dir, name + '.csr')
_exec_keytool(dir, keystore, ['-alias', name, '-keypass', 'cassandra', '-certreq', '-file', fn] + opts)
return fn
def sign_request(dir, name, keystore, csr, opts=[]):
fnout = os.path.splitext(csr)[0] + '.pem'
_exec_keytool(dir, keystore, ['-alias', name, '-keypass', 'cassandra', '-gencert',
'-rfc', '-infile', csr, '-outfile', fnout] + opts)
return fnout
def import_cert(dir, name, cert, keystore, opts=[]):
_exec_keytool(dir, keystore, ['-alias', name, '-keypass', 'cassandra', '-importcert', '-noprompt', '-file', cert] + opts)
return cert
def _exec_keytool(dir, keystore, opts):
args = ['keytool', '-keystore', keystore, '-storepass', 'cassandra'] + opts
subprocess.check_call(args)
return keystore
class SecurityCredentials():
def __init__(self, keystore, cert, cakeystore, cacert):
self.keystore = keystore
self.cert = cert
self.cakeystore = cakeystore
self.cacert = cacert
self.basedir = os.path.dirname(self.keystore)
def __str__(self):
return "keystore: {}, cert: {}, cakeystore: {}, cacert: {}".format(
self.keystore, self.cert, self.cakeystore, self.cacert)
|
{
"content_hash": "6ba22a351aacd685ce7d43437b16fb98",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 125,
"avg_line_length": 33.4,
"alnum_prop": 0.6194278110445776,
"repo_name": "spodkowinski/cassandra-dtest",
"id": "ed90a835582a29e50a825ed28c41feaa642f6d66",
"size": "3006",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/sslkeygen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2481951"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
}
|
import time
from myModbus_udp import *
class ModbusUDPClient():
def __init__( self, address ):
self.instrument = Instrument_UDP() # 10 is instrument address
self.instrument.debug = None
self.instrument.set_ip(address)
def read_bit(self, modbus_address, registeraddress ):
self.instrument.address = modbus_address
return self.instrument.read_bit( registeraddress )
def write_bit(self, address, registeraddress, value ):
self.instrument.address = address
self.instrument.write_bit( registeraddress, value)
def write_registers(self,modbus_address,starting_register,data_list ):
self.instrument.address = modbus_address
return self.instrument.write_registers( starting_register,data_list )
def read_registers( self, modbus_address, starting_register, number ):
self.instrument.address = modbus_address
return self.instrument.read_registers( starting_register,number )
def read_float(self, modbus_address , registeraddress ):
self.instrument.address = modbus_address
return self.instrument.read_float( registeraddress )
def write_float( self, modbus_address, registeraddess,value ):
self.instrument.address = modbus_address
return self.instrument.write_float( registeraddress,address )
def read_long(self, modbus_address, registeraddress ):
self.instrument.address = modbus_address
return self.instrument.read_long( registeraddress )
def write_long( self, modbus_address, registeraddess,value ):
self.instrument.address = modbus_address
return self.instrument.write_long( registeraddress,address )
# string length is 32
def read_string(self, modbus_address, registeraddress ):
self.instrument.address = modbus_address
return self.instrument.read_string( registeraddress )
# string length is 32
def write_string(self, modbus_address,registeraddress, textstring):
self.instrument.address = modbus_address
return self.instrument.write_string(registeraddress, textstring )
def redis_write( self, modbus_address, json_data ):
self.instrument.address = modbus_address
return self.instrument.redis_communicate( modbus_address,255, json_data )
def redis_read( self, modbus_address, json_data):
self.instrument.address = modbus_address
return self.instrument.redis_communicate( modbus_address,254, json_data)
def ping_device( self, modbus_address, address_list ):
self.instrument.address = modbus_address
json_data = {"action":"ping" ,"parameters":{ "sub_action":"ping_device" , "sub_parameter":address_list } }
return self.instrument.redis_communicate( 253, json_data)
def ping_all_devices( self, modbus_address = None ):
self.instrument.address = modbus_address
json_data = {"action":"ping" ,"parameters":{ "sub_action":"ping_all_devices" , "sub_parameter": None } }
return self.instrument.redis_communicate( 253, json_data)
def clear_all_counters( self , modbus_address= None):
self.instrument.address = modbus_address
json_data = {"action":"counter" ,"parameters":{ "sub_action":"clear_all_counters" , "sub_parameter":None } }
return self.instrument.redis_communicate( 253, json_data)
def get_all_counters( self, modbus_address=None ):
self.instrument.address = modbus_address
json_data = {"action":"counter" ,"parameters":{ "sub_action": "get_all_counters" , "sub_parameter":None } }
return self.instrument.redis_communicate( 253, json_data)
def clear_counter_list( self, modbus_address, address_list ):
self.instrument.address = modbus_address
json_data = {"action":"counter" ,"parameters":{ "sub_action":"clear_counter_list" , "sub_parameter":address_list } }
return self.instrument.redis_communicate( 253, json_data)
class Modbus_RTU( ModbusUDPClient ):
def __init__(self, address):
ModbusUDPClient.__init__(self,address)
def special_command(self, modbus_address, registeraddress, values):
self.instrument.address = modbus_address
return self.instrument.special_command(registeraddress, values)
def read_eeprom_registers_byte( self, modbus_address, starting_register,number):
self.instrument.address = modbus_address
return self.instrument.read_eeprom_registers( starting_register, number )
def write_eeprom_registers_byte( self, modbus_address, starting_address, data_list ):
self.instrument.address = modbus_address
return self.instrument.write_eeprom_registes( starting_address, data_list )
def read_fifo( self, modbus_address, queue, max_number ):
self.instrument.address = modbus_address
return self.instrument.read_fifo( queue, max_number )
def read_eeprom_registers( self,modbus_address, queue, max_number ):
temp = self.read_eeprom_registers_byte( modbus_address,queue,max_number)
return_value = []
for i in temp:
return_value.append( (i[0]<<24)|(i[1]<<16)|(i[2]<<8)|i[3] )
return return_value
def write_eeprom_registers( self, modbus_address, starting_address, data_list ):
temp1 = []
for i in data_list:
temp2 = []
temp2.append( (i>>24)&0xff )
temp2.append( (i>>16)&0xff )
temp2.append( (i>>8)&0xff)
temp2.append( (i)&0xff)
temp1.append( temp2)
return write_eeprom_registers_byte( self, modbus_address, starting_address, temp1 )
if __name__ == "__main__":
import time
from myModbus_udp import *
modbus_client = ModbusUDPClient( "192.168.1.81" )
modbus_rtu = Modbus_RTU( "192.168.1.81" )
print modbus_rtu.read_fifo( 31, 0, 16 )
print modbus_rtu.read_registers( 31,0,20)
print modbus_rtu.read_registers(100,0,10)
print modbus_rtu.read_registers(125,0,10)
print modbus_rtu.read_registers(170,0,10)
print modbus_rtu.write_registers(31,0,[1,2,3])
print modbus_rtu.read_registers( 31,0,20)
print modbus_rtu.read_eeprom_registers( 31,0,10)
print modbus_rtu.read_eeprom_registers_byte( 31,0,10)
print modbus_rtu.redis_write( 255,{"test1":123,"test2":124 } )
print modbus_rtu.redis_read(255,["test1","test2"] )
print modbus_rtu.ping_all_devices(255)
print modbus_rtu.get_all_counters(255 )
print modbus_rtu.clear_counter_list( 255, [31] )
print modbus_rtu.get_all_counters(255 )
print modbus_rtu.clear_all_counters( 255 )
print modbus_rtu.get_all_counters(255 )
for i in range(0,255):
print i
print modbus_rtu.ping_device( 255, [125] )
print modbus_rtu.read_registers( 31,0,20)
print modbus_rtu.read_registers(100,0,10)
print modbus_rtu.read_registers(125,0,10)
print modbus_rtu.read_registers(170,0,10)
print modbus_rtu.get_all_counters(255 )
|
{
"content_hash": "555042adba766c5139f9a38de029f282",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 130,
"avg_line_length": 42.74251497005988,
"alnum_prop": 0.6590081255253573,
"repo_name": "glenn-edgar/local_scda",
"id": "826fa41af733d8f8bfc332438ebbf21209851d17",
"size": "7186",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "io_control/modbus_UDP.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1392"
},
{
"name": "Batchfile",
"bytes": "2510"
},
{
"name": "CSS",
"bytes": "3520163"
},
{
"name": "HTML",
"bytes": "8690082"
},
{
"name": "JavaScript",
"bytes": "8432443"
},
{
"name": "Lua",
"bytes": "45999"
},
{
"name": "Makefile",
"bytes": "5136"
},
{
"name": "PHP",
"bytes": "502388"
},
{
"name": "Python",
"bytes": "2700200"
},
{
"name": "Shell",
"bytes": "600"
},
{
"name": "Smalltalk",
"bytes": "189"
},
{
"name": "TeX",
"bytes": "3153"
}
],
"symlink_target": ""
}
|
from django.db import models
class pharmacy(models.Model):
pharmacy_id = models.CharField(max_length=20,primary_key=True)
pharmacy_name = models.CharField(max_length=200)
password = models.CharField(max_length=20)
owner_fname = models.CharField(max_length=50)
owner_lname = models.CharField(max_length=50)
vat_no = models.ImageField()
drug_license = models.ImageField()
address_street = models.TextField()
address_city = models.CharField(max_length=20)
address_state = models.CharField(max_length=20)
address_pincode = models.IntegerField()
class contact_pharmacy(models.Model):
pharmacy = models.ForeignKey(pharmacy,on_delete=models.CASCADE)
contact_no = models.IntegerField()
class notifications(models.Model):
pharmacy_id = models.ForeignKey(pharmacy,on_delete=models.CASCADE)
time_of_arrival = models.DateTimeField(auto_now_add=True,blank=True)
content = models.TextField()
|
{
"content_hash": "654849ff0bde4144dfd7c12398ca0e01",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 39.375,
"alnum_prop": 0.7386243386243386,
"repo_name": "mpiplani/Online-Pharmacy",
"id": "cb9f68ea7754fcf7360d0d2dde71991127352f9f",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "online_pharmacy/online_pharmacy/pharmacy/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "51456"
}
],
"symlink_target": ""
}
|
"""
TNC: A Python interface to the TNC non-linear optimizer
TNC is a non-linear optimizer. To use it, you must provide a function to
minimize. The function must take one argument: the list of coordinates where to
evaluate the function; and it must return either a tuple, whose first element is the
value of the function, and whose second argument is the gradient of the function
(as a list of values); or None, to abort the minimization.
"""
from scipy.optimize import moduleTNC
from .optimize import (MemoizeJac, OptimizeResult, _check_unknown_options,
_prepare_scalar_function)
from ._constraints import old_bound_to_new
from numpy import inf, array, zeros, asfarray
__all__ = ['fmin_tnc']
MSG_NONE = 0 # No messages
MSG_ITER = 1 # One line per iteration
MSG_INFO = 2 # Informational messages
MSG_VERS = 4 # Version info
MSG_EXIT = 8 # Exit reasons
MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
MSGS = {
MSG_NONE: "No messages",
MSG_ITER: "One line per iteration",
MSG_INFO: "Informational messages",
MSG_VERS: "Version info",
MSG_EXIT: "Exit reasons",
MSG_ALL: "All messages"
}
INFEASIBLE = -1 # Infeasible (lower bound > upper bound)
LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0)
FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
MAXFUN = 3 # Max. number of function evaluations reached
LSFAIL = 4 # Linear search failed
CONSTANT = 5 # All lower bounds are equal to the upper bounds
NOPROGRESS = 6 # Unable to progress
USERABORT = 7 # User requested end of minimization
RCSTRINGS = {
INFEASIBLE: "Infeasible (lower bound > upper bound)",
LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
MAXFUN: "Max. number of function evaluations reached",
LSFAIL: "Linear search failed",
CONSTANT: "All lower bounds are equal to the upper bounds",
NOPROGRESS: "Unable to progress",
USERABORT: "User requested end of minimization"
}
# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
# SciPy
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
bounds=None, epsilon=1e-8, scale=None, offset=None,
messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
rescale=-1, disp=None, callback=None):
"""
Minimize a function with variables subject to bounds, using
gradient information in a truncated Newton algorithm. This
method wraps a C implementation of the algorithm.
Parameters
----------
func : callable ``func(x, *args)``
Function to minimize. Must do one of:
1. Return f and g, where f is the value of the function and g its
gradient (a list of floats).
2. Return the function value but supply gradient function
separately as `fprime`.
3. Return the function value and set ``approx_grad=True``.
If the function returns None, the minimization
is aborted.
x0 : array_like
Initial estimate of minimum.
fprime : callable ``fprime(x, *args)``, optional
Gradient of `func`. If None, then either `func` must return the
function value and the gradient (``f,g = func(x, *args)``)
or `approx_grad` must be True.
args : tuple, optional
Arguments to pass to function.
approx_grad : bool, optional
If true, approximate the gradient numerically.
bounds : list, optional
(min, max) pairs for each element in x0, defining the
bounds on that parameter. Use None or +/-inf for one of
min or max when there is no bound in that direction.
epsilon : float, optional
Used if approx_grad is True. The stepsize in a finite
difference approximation for fprime.
scale : array_like, optional
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x| for the others. Defaults to None.
offset : array_like, optional
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
messages : int, optional
Bit mask used to select messages display during
minimization values defined in the MSGS dict. Defaults to
MGS_ALL.
disp : int, optional
Integer interface to messages. 0 = no message, 5 = all messages
maxCGit : int, optional
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxfun : int, optional
Maximum number of function evaluation. If None, maxfun is
set to max(100, 10*len(x0)). Defaults to None.
eta : float, optional
Severity of the line search. If < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float, optional
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float, optional
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
fmin : float, optional
Minimum function value estimate. Defaults to 0.
ftol : float, optional
Precision goal for the value of f in the stopping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float, optional
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
pgtol : float, optional
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float, optional
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
Returns
-------
x : ndarray
The solution.
nfeval : int
The number of function evaluations.
rc : int
Return code, see below
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'TNC' `method` in particular.
Notes
-----
The underlying algorithm is truncated Newton, also called
Newton Conjugate-Gradient. This method differs from
scipy.optimize.fmin_ncg in that
1. it wraps a C implementation of the algorithm
2. it allows each variable to be given an upper and lower bound.
The algorithm incorporates the bound constraints by determining
the descent direction as in an unconstrained truncated Newton,
but never taking a step-size large enough to leave the space
of feasible x's. The algorithm keeps track of a set of
currently active constraints, and ignores them when computing
the minimum allowable step size. (The x's associated with the
active constraint are kept fixed.) If the maximum allowable
step size is zero then a new constraint is added. At the end
of each iteration one of the constraints may be deemed no
longer active and removed. A constraint is considered
no longer active is if it is currently active
but the gradient for that variable points inward from the
constraint. The specific constraint removed is the one
associated with the variable of largest index whose
constraint is no longer active.
Return codes are defined as follows::
-1 : Infeasible (lower bound > upper bound)
0 : Local minimum reached (|pg| ~= 0)
1 : Converged (|f_n-f_(n-1)| ~= 0)
2 : Converged (|x_n-x_(n-1)| ~= 0)
3 : Max. number of function evaluations reached
4 : Linear search failed
5 : All lower bounds are equal to the upper bounds
6 : Unable to progress
7 : User requested end of minimization
References
----------
Wright S., Nocedal J. (2006), 'Numerical Optimization'
Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
SIAM Journal of Numerical Analysis 21, pp. 770-778
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
if disp is not None: # disp takes precedence over messages
mesg_num = disp
else:
mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
# build options
opts = {'eps': epsilon,
'scale': scale,
'offset': offset,
'mesg_num': mesg_num,
'maxCGit': maxCGit,
'maxiter': maxfun,
'eta': eta,
'stepmx': stepmx,
'accuracy': accuracy,
'minfev': fmin,
'ftol': ftol,
'xtol': xtol,
'gtol': pgtol,
'rescale': rescale,
'disp': False}
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
return res['x'], res['nfev'], res['status']
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
eps=1e-8, scale=None, offset=None, mesg_num=None,
maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
callback=None, finite_diff_rel_step=None, **unknown_options):
"""
Minimize a scalar function of one or more variables using a truncated
Newton (TNC) algorithm.
Options
-------
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None.
offset : float
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
disp : bool
Set to True to print convergence messages.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxiter : int
Maximum number of function evaluation. If None, `maxiter` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. If < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
ftol : float
Precision goal for the value of f in the stopping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
gtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
fmin = minfev
pgtol = gtol
x0 = asfarray(x0).flatten()
n = len(x0)
if bounds is None:
bounds = [(None,None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
new_bounds = old_bound_to_new(bounds)
if mesg_num is not None:
messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
elif disp:
messages = MSG_ALL
else:
messages = MSG_NONE
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
finite_diff_rel_step=finite_diff_rel_step,
bounds=new_bounds)
func_and_grad = sf.fun_and_grad
"""
low, up : the bounds (lists of floats)
if low is None, the lower bounds are removed.
if up is None, the upper bounds are removed.
low and up defaults to None
"""
low = zeros(n)
up = zeros(n)
for i in range(n):
if bounds[i] is None:
l, u = -inf, inf
else:
l,u = bounds[i]
if l is None:
low[i] = -inf
else:
low[i] = l
if u is None:
up[i] = inf
else:
up[i] = u
if scale is None:
scale = array([])
if offset is None:
offset = array([])
if maxfun is None:
maxfun = max(100, 10*len(x0))
rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
offset, messages, maxCGit, maxfun,
eta, stepmx, accuracy, fmin, ftol,
xtol, pgtol, rescale, callback)
funv, jacv = func_and_grad(x)
return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev,
nit=nit, status=rc, message=RCSTRINGS[rc],
success=(-1 < rc < 3))
if __name__ == '__main__':
# Examples for TNC
def example():
print("Example")
# A function to minimize
def function(x):
f = pow(x[0],2.0)+pow(abs(x[1]),3.0)
g = [0,0]
g[0] = 2.0*x[0]
g[1] = 3.0*pow(abs(x[1]),2.0)
if x[1] < 0:
g[1] = -g[1]
return f, g
# Optimizer call
x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10]))
print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc])
print("x =", x)
print("exact value = [0, 1]")
print()
example()
|
{
"content_hash": "0a33a9463321aae8bdc893104fb5afad",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 84,
"avg_line_length": 37.81428571428572,
"alnum_prop": 0.6076060949502582,
"repo_name": "aeklant/scipy",
"id": "44bc996213b8b1bd94b9371a10155fdf53a26dd2",
"size": "17086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/optimize/tnc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4392569"
},
{
"name": "C++",
"bytes": "648401"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368728"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12865927"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe.utils import formatdate, fmt_money, flt, cstr, cint, format_datetime
from frappe.model.meta import get_field_currency, get_field_precision
import re
def format_value(value, df, doc=None, currency=None):
# Convert dict to object if necessary
if (isinstance(df, dict)):
df = frappe._dict(df)
if value is None:
value = ""
if not df:
return value
elif df.get("fieldtype")=="Date":
return formatdate(value)
elif df.get("fieldtype")=="Datetime":
return format_datetime(value)
elif df.get("fieldtype") == "Currency" or (df.get("fieldtype")=="Float" and (df.options or "").strip()):
return fmt_money(value, precision=get_field_precision(df, doc),
currency=currency if currency else (get_field_currency(df, doc) if doc else None))
elif df.get("fieldtype") == "Float":
precision = get_field_precision(df, doc)
# show 1.000000 as 1
# options should not specified
if not df.options and value is not None:
temp = cstr(value).split(".")
if len(temp)==1 or cint(temp[1])==0:
precision = 0
return fmt_money(value, precision=precision)
elif df.get("fieldtype") == "Percent":
return "{}%".format(flt(value, 2))
elif df.get("fieldtype") in ("Text", "Small Text"):
if not re.search("(\<br|\<div|\<p)", value):
return value.replace("\n", "<br>")
return value
|
{
"content_hash": "b4c85bb1a50ac1b501b9efb51d1d5fee",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 105,
"avg_line_length": 29.127659574468087,
"alnum_prop": 0.6800584368151936,
"repo_name": "mbauskar/omnitech-frappe",
"id": "1d2b1783cbe796b727a4f13146916748012636e6",
"size": "1470",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "frappe/utils/formatters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246115"
},
{
"name": "HTML",
"bytes": "142481"
},
{
"name": "JavaScript",
"bytes": "1061035"
},
{
"name": "Python",
"bytes": "1188634"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
from .core import Migration
from .core.exceptions import MigrationException
from .helpers import Utils
class MSSQL(object):
def __init__(self, config=None, mssql_driver=None):
self.__mssql_script_encoding = config.get("database_script_encoding", "utf8")
self.__mssql_encoding = config.get("database_encoding", "utf8")
self.__mssql_host = config.get("database_host")
self.__mssql_port = config.get("database_port", 1433)
self.__mssql_user = config.get("database_user")
self.__mssql_passwd = config.get("database_password")
self.__mssql_db = config.get("database_name")
self.__version_table = config.get("database_version_table")
self.__mssql_driver = mssql_driver
if not mssql_driver:
import _mssql
self.__mssql_driver = _mssql
if config.get("drop_db_first"):
self._drop_database()
self._create_database_if_not_exists()
self._create_version_table_if_not_exists()
def __mssql_connect(self, connect_using_database_name=True):
try:
conn = self.__mssql_driver.connect(server=self.__mssql_host, port=self.__mssql_port, user=self.__mssql_user, password=self.__mssql_passwd, charset=self.__mssql_encoding)
if connect_using_database_name:
conn.select_db(self.__mssql_db)
return conn
except Exception as e:
raise Exception("could not connect to database: %s" % e)
def __execute(self, sql, execution_log=None):
db = self.__mssql_connect()
curr_statement = None
try:
statments = MSSQL._parse_sql_statements(sql)
if len(sql.strip(' \t\n\r')) != 0 and len(statments) == 0:
raise Exception("invalid sql syntax '%s'" % Utils.encode(sql, "utf-8"))
for statement in statments:
curr_statement = statement
db.execute_non_query(statement)
affected_rows = db.rows_affected
if execution_log:
execution_log("%s\n-- %d row(s) affected\n" % (statement, affected_rows and int(affected_rows) or 0))
except Exception as e:
db.cancel()
raise MigrationException("error executing migration: %s" % e, curr_statement)
finally:
db.close()
@classmethod
def _parse_sql_statements(cls, migration_sql):
all_statements = []
last_statement = ''
for statement in migration_sql.split(';'):
if len(last_statement) > 0:
curr_statement = '%s;%s' % (last_statement, statement)
else:
curr_statement = statement
count = Utils.count_occurrences(curr_statement)
single_quotes = count.get("'", 0)
double_quotes = count.get('"', 0)
left_parenthesis = count.get('(', 0)
right_parenthesis = count.get(')', 0)
if single_quotes % 2 == 0 and double_quotes % 2 == 0 and left_parenthesis == right_parenthesis:
all_statements.append(curr_statement)
last_statement = ''
else:
last_statement = curr_statement
return [s.strip() for s in all_statements if ((s.strip() != "") and (last_statement == ""))]
def _drop_database(self):
db = self.__mssql_connect(False)
try:
db.execute_non_query("if exists ( select 1 from sysdatabases where name = '%s' ) drop database %s;" % (self.__mssql_db, self.__mssql_db))
except Exception as e:
raise Exception("can't drop database '%s'; \n%s" % (self.__mssql_db, str(e)))
finally:
db.close()
def _create_database_if_not_exists(self):
db = self.__mssql_connect(False)
db.execute_non_query("if not exists ( select 1 from sysdatabases where name = '%s' ) create database %s;" % (self.__mssql_db, self.__mssql_db))
db.close()
def _create_version_table_if_not_exists(self):
# create version table
sql = "if not exists ( select 1 from sysobjects where name = '%s' and type = 'u' ) create table %s ( id INT IDENTITY(1,1) NOT NULL PRIMARY KEY, version varchar(20) NOT NULL default '0', label varchar(255), name varchar(255), sql_up NTEXT, sql_down NTEXT);" % (self.__version_table, self.__version_table)
self.__execute(sql)
# check if there is a register there
db = self.__mssql_connect()
count = db.execute_scalar("select count(*) from %s;" % self.__version_table)
db.close()
# if there is not a version register, insert one
if count == 0:
sql = "insert into %s (version) values ('0');" % self.__version_table
self.__execute(sql)
def __change_db_version(self, version, migration_file_name, sql_up, sql_down, up=True, execution_log=None, label_version=None):
params = []
params.append(version)
if up:
# moving up and storing history
sql = "insert into %s (version, label, name, sql_up, sql_down) values (%%s, %%s, %%s, %%s, %%s);" % (self.__version_table)
params.append(label_version)
params.append(migration_file_name)
params.append(sql_up and Utils.encode(sql_up, self.__mssql_script_encoding) or "")
params.append(sql_down and Utils.encode(sql_down, self.__mssql_script_encoding) or "")
else:
# moving down and deleting from history
sql = "delete from %s where version = %%s;" % (self.__version_table)
db = self.__mssql_connect()
try:
db.execute_non_query(Utils.encode(sql, self.__mssql_script_encoding), tuple(params))
if execution_log:
execution_log("migration %s registered\n" % (migration_file_name))
except Exception as e:
db.cancel()
raise MigrationException("error logging migration: %s" % e, migration_file_name)
finally:
db.close()
def change(self, sql, new_db_version, migration_file_name, sql_up, sql_down, up=True, execution_log=None, label_version=None):
self.__execute(sql, execution_log)
self.__change_db_version(new_db_version, migration_file_name, sql_up, sql_down, up, execution_log, label_version)
def get_current_schema_version(self):
db = self.__mssql_connect()
version = db.execute_scalar("select top 1 version from %s order by id desc" % self.__version_table) or 0
db.close()
return version
def get_all_schema_versions(self):
versions = []
db = self.__mssql_connect()
db.execute_query("select version from %s order by id;" % self.__version_table)
all_versions = db
for version in all_versions:
versions.append(version['version'])
db.close()
versions.sort()
return versions
def get_version_id_from_version_number(self, version):
db = self.__mssql_connect()
result = db.execute_row("select id from %s where version = '%s' order by id desc;" % (self.__version_table, version))
_id = result and int(result['id']) or None
db.close()
return _id
def get_version_number_from_label(self, label):
db = self.__mssql_connect()
result = db.execute_row("select version from %s where label = '%s' order by id desc" % (self.__version_table, label))
version = result and result['version'] or None
db.close()
return version
def get_all_schema_migrations(self):
migrations = []
db = self.__mssql_connect()
db.execute_query("select id, version, label, name, cast(sql_up as text) as sql_up, cast(sql_down as text) as sql_down from %s order by id;" % self.__version_table)
all_migrations = db
for migration_db in all_migrations:
migration = Migration(id = int(migration_db['id']),
version = migration_db['version'] and str(migration_db['version']) or None,
label = migration_db['label'] and str(migration_db['label']) or None,
file_name = migration_db['name'] and str(migration_db['name']) or None,
sql_up = Migration.ensure_sql_unicode(migration_db['sql_up'], self.__mssql_script_encoding),
sql_down = Migration.ensure_sql_unicode(migration_db['sql_down'], self.__mssql_script_encoding))
migrations.append(migration)
db.close()
return migrations
|
{
"content_hash": "8ff83883a7502a1b03b5713449de37c4",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 311,
"avg_line_length": 46.39784946236559,
"alnum_prop": 0.5841251448435689,
"repo_name": "guilhermechapiewski/simple-db-migrate",
"id": "6a6bd59e37fa8fe6e7ada0e75ee130ded4e8d5e8",
"size": "8630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_db_migrate/mssql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1267"
},
{
"name": "Python",
"bytes": "350098"
}
],
"symlink_target": ""
}
|
import unittest
from katas.kyu_8.barking_mad import Dog
class DogTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(Dog("Beagle").bark(), 'Woof')
def test_equals_2(self):
self.assertEqual(Dog("Great Dane").bark(), 'Woof')
def test_equals_3(self):
self.assertEqual(Dog('Schnauzer').bark(), 'Woof')
|
{
"content_hash": "b10f5df9cf3af5b908081853c9e3aebb",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 58,
"avg_line_length": 25.428571428571427,
"alnum_prop": 0.6544943820224719,
"repo_name": "the-zebulan/CodeWars",
"id": "893dd7be51d2a5d8c0a1af2f45f990ffddd5bbf4",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kyu_8_tests/test_barking_mad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
import cgi
import flask
import random
import urllib
# [START taskq-imp]
from google.appengine.ext import ndb
from google.appengine.api import taskqueue
# [END taskq-imp]
class Note(ndb.Model):
"""Models an individual Note entry with content."""
content = ndb.StringProperty()
def parent_key(page_name):
return ndb.Key("Parent", page_name)
app = flask.Flask(__name__)
@app.route('/')
def main_page():
page_name = flask.request.args.get('page_name', 'default')
response = """
<html><body>
<h2>Permenant note page: %s</h2>""" % cgi.escape(page_name)
parent = parent_key(page_name)
notes = Note.query(ancestor=parent).fetch(20)
for note in notes:
response += '<h3>%s</h3>' % cgi.escape(note.key.id())
response += '<blockquote>%s</blockquote>' % cgi.escape(note.content)
response += """
<hr>
<form action="/add?%s" method="post">
Submit Note: <input value="Title" name="note_title"><br>
<textarea value="Note" name="note_text" rows="4" cols="60"></textarea>
<input type="submit" value="Etch in stone"></form>""" % urllib.urlencode({'page_name': page_name})
response += """
<hr>
<form>Switch page: <input value="%s" name="page_name">
<input type="submit" value="Switch"></form>
</body>
</html>""" % cgi.escape(page_name, quote=True)
return response
# [START standard]
@ndb.transactional
def insert_if_absent(note_key, note):
fetch = note_key.get()
if fetch is None:
note.put()
return True
return False
# [END standard]
# [START two-tries]
@ndb.transactional(retries=1)
def insert_if_absent_2_retries(note_key, note):
# do insert
# [END two-tries]
fetch = note_key.get()
if fetch is None:
note.put()
return True
return False
# [START cross-group]
@ndb.transactional(xg=True)
def insert_if_absent_xg(note_key, note):
# do insert
# [END cross-group]
fetch = note_key.get()
if fetch is None:
note.put()
return True
return False
# [START sometimes]
def insert_if_absent_sometimes(note_key, note):
# do insert
# [END sometimes]
fetch = note_key.get()
if fetch is None:
note.put()
return True
return False
# [START indep]
@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT)
def insert_if_absent_indep(note_key, note):
# do insert
# [END indep]
fetch = note_key.get()
if fetch is None:
note.put()
return True
return False
# [START taskq]
@ndb.transactional
def insert_if_absent_taskq(note_key, note):
taskqueue.add(url=flask.url_for('taskq_worker'), transactional=True)
# do insert
# [END taskq]
fetch = note_key.get()
if fetch is None:
note.put()
return True
return False
@app.route('/worker')
def taskq_worker():
pass
def pick_random_insert(note_key, note):
choice = random.randint(0, 5)
if choice == 0:
# [START calling2]
inserted = insert_if_absent(note_key, note)
# [END calling2]
elif choice == 1:
inserted = insert_if_absent_2_retries(note_key, note)
elif choice == 2:
inserted = insert_if_absent_xg(note_key, note)
elif choice == 3:
# [START sometimes-call]
inserted = ndb.transaction(lambda: insert_if_absent_sometimes(note_key, note))
# [END sometimes-call]
elif choice == 4:
inserted = insert_if_absent_indep(note_key, note)
elif choice == 5:
inserted = insert_if_absent_taskq(note_key, note)
return inserted
@app.route('/add', methods=['POST'])
def add_note():
page_name = flask.request.args.get('page_name', 'default')
note_title = flask.request.form['note_title']
note_text = flask.request.form['note_text']
parent = parent_key(page_name)
choice = random.randint(0, 1)
if choice == 0:
# Use transactional function
# [START calling]
note_key = ndb.Key(Note, note_title, parent=parent)
note = Note(key=note_key, content=note_text)
# [END calling]
if pick_random_insert(note_key, note) is False:
return 'Already there<br><a href="%s">Return</a>' % flask.url_for('main_page', page_name=page_name)
return flask.redirect(flask.url_for('main_page', page_name=page_name))
elif choice == 1:
# Use get_or_insert, which is transactional
note = Note.get_or_insert(note_title, parent=parent, content=note_text)
if note.content != note_text:
return 'Already there<br><a href="%s">Return</a>' % flask.url_for('main_page', page_name=page_name)
return flask.redirect(flask.url_for('main_page', page_name=page_name))
if __name__ == '__main__':
app.run()
|
{
"content_hash": "1a3f7160f2ec1f04b41de74130c6ec9f",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 111,
"avg_line_length": 27.873563218390803,
"alnum_prop": 0.6090721649484536,
"repo_name": "googlearchive/datastore-samples",
"id": "6f06bc067ff4b5a7ae0d4942e6bd42f20b61f8de",
"size": "5447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ndb/transactions/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8519"
}
],
"symlink_target": ""
}
|
import ast
import sys
import os
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED CONSTANTS ######################################################################################################
####################################################################################################################################################################################################################################
# Possible characters to send to the maze application
# Any other will be ignored
# Do not edit this code
UP = 'U'
DOWN = 'D'
LEFT = 'L'
RIGHT = 'R'
####################################################################################################################################################################################################################################
# Name of your team
# It will be displayed in the maze
# You have to edit this code
TEAM_NAME = "Improved closest v2"
####################################################################################################################################################################################################################################
########################################################################################################## YOUR VARIABLES ##########################################################################################################
####################################################################################################################################################################################################################################
# Stores all the moves in a list to restitute them one by one
allMoves = [RIGHT, RIGHT, RIGHT, RIGHT, UP, UP, LEFT, UP, RIGHT, RIGHT, UP, RIGHT, UP, UP, UP, RIGHT, DOWN, RIGHT, UP, UP]
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED FUNCTIONS ######################################################################################################
####################################################################################################################################################################################################################################
# Writes a message to the shell
# Use for debugging your program
# Channels stdout and stdin are captured to enable communication with the maze
# Do not edit this code
def debug (text) :
# Writes to the stderr channel
sys.stderr.write(str(text) + "\n")
sys.stderr.flush()
####################################################################################################################################################################################################################################
# Reads one line of information sent by the maze application
# This function is blocking, and will wait for a line to terminate
# The received information is automatically converted to the correct type
# Do not edit this code
def readFromPipe () :
# Reads from the stdin channel and returns the structure associated to the string
try :
text = sys.stdin.readline()
return ast.literal_eval(text.strip())
except :
os._exit(-1)
####################################################################################################################################################################################################################################
# Sends the text to the maze application
# Do not edit this code
def writeToPipe (text) :
# Writes to the stdout channel
sys.stdout.write(text)
sys.stdout.flush()
####################################################################################################################################################################################################################################
# Reads the initial maze information
# The function processes the text and returns the associated variables
# The dimensions of the maze are positive integers
# Maze map is a dictionary associating to a location its adjacent locations and the associated weights
# The preparation time gives the time during which 'initializationCode' can make computations before the game starts
# The turn time gives the time during which 'determineNextMove' can make computations before returning a decision
# Player locations are tuples (line, column)
# Coins are given as a list of locations where they appear
# A boolean indicates if the game is over
# Do not edit this code
def processInitialInformation () :
# We read from the pipe
data = readFromPipe()
return (data['mazeWidth'], data['mazeHeight'], data['mazeMap'], data['preparationTime'], data['turnTime'], data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
# Reads the information after each player moved
# The maze map and allowed times are no longer provided since they do not change
# Do not edit this code
def processNextInformation () :
# We read from the pipe
data = readFromPipe()
return (data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
########################################################################################################## YOUR FUNCTIONS ##########################################################################################################
####################################################################################################################################################################################################################################
# This is where you should write your code to do things during the initialization delay
# This function should not return anything, but should be used for a short preprocessing
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def initializationCode (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# Nothing to do
pass
####################################################################################################################################################################################################################################
# This is where you should write your code to determine the next direction
# This function should return one of the directions defined in the CONSTANTS section
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def determineNextMove (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# We return the next move as described by the list
global allMoves
nextMove = allMoves[0]
allMoves = allMoves[1:]
return nextMove
####################################################################################################################################################################################################################################
############################################################################################################# MAIN LOOP ############################################################################################################
####################################################################################################################################################################################################################################
# This is the entry point when executing this file
# We first send the name of the team to the maze
# The first message we receive from the maze includes its dimensions and map, the times allowed to the various steps, and the players and coins locations
# Then, at every loop iteration, we get the maze status and determine a move
# Do not edit this code
if __name__ == "__main__" :
# We send the team name
writeToPipe(TEAM_NAME + "\n")
# We process the initial information and have a delay to compute things using it
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = processInitialInformation()
initializationCode(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)
# We decide how to move and wait for the next step
while not gameIsOver :
(playerLocation, opponentLocation, coins, gameIsOver) = processNextInformation()
if gameIsOver :
break
nextMove = determineNextMove(mazeWidth, mazeHeight, mazeMap, turnTime, playerLocation, opponentLocation, coins)
writeToPipe(nextMove)
####################################################################################################################################################################################################################################
####################################################################################################################################################################################################################################
|
{
"content_hash": "9658804427e5cdef4ba4d1a2ad356964",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 228,
"avg_line_length": 63.66463414634146,
"alnum_prop": 0.3746767551000862,
"repo_name": "dimtion/jml",
"id": "f9686094d12715255d74d2e32960099aa168037e",
"size": "11276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "outputFiles/statistics/archives/ourIA/improved_closest_v2.py/1.0/9/player1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1654391"
},
{
"name": "TeX",
"bytes": "1439179"
}
],
"symlink_target": ""
}
|
app_name = "frappe"
# app_title = "Frappe Framework"
app_title = "LetzERP"
# app_publisher = "Web Notes Technologies Pvt. Ltd."
app_publisher = "LetzERP Pvt. Ltd."
app_description = "Full Stack Web Application Framework in Python"
app_icon = "assets/frappe/images/LetzERP.svg"
app_version = "5.0.0-alpha"
app_color = "#3498db"
app_email = "support@frappe.io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
# website
app_include_js = "assets/js/frappe.min.js"
app_include_css = [
"assets/frappe/css/splash.css",
"assets/css/frappe.css"
]
web_include_js = [
"assets/js/frappe-web.min.js",
"website_script.js"
]
web_include_css = [
"assets/css/frappe-web.css",
"style_settings.css"
]
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
website_generators = ["Web Page", "Blog Post", "Blog Category", "Web Form"]
# login
on_session_creation = "frappe.desk.doctype.feed.feed.login_feed"
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Feed": "frappe.desk.doctype.feed.feed.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions"
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Feed": "frappe.desk.doctype.feed.feed.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission"
}
doc_events = {
"*": {
"after_insert": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"validate": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.desk.doctype.feed.feed.update_feed"
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_submit": [
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.desk.doctype.feed.feed.update_feed"
],
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts"
],
"on_trash": "frappe.desk.notifications.clear_doctype_notifications"
}
}
scheduler_events = {
"all": [
"frappe.email.bulk.flush",
"frappe.email.doctype.email_account.email_account.pull"
],
"daily": [
"frappe.email.bulk.clear_outbox",
"frappe.desk.notifications.clear_notifications",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.email_alert.email_alert.trigger_daily_alerts",
]
}
|
{
"content_hash": "69ec0d0645d500fb19b8aad95ddeb42e",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 86,
"avg_line_length": 32.4468085106383,
"alnum_prop": 0.7288524590163934,
"repo_name": "gangadharkadam/letzfrappe",
"id": "d1d63e7645d769f7b3df1477968992fdb37b5374",
"size": "3050",
"binary": false,
"copies": "1",
"ref": "refs/heads/v5.0",
"path": "frappe/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "152245"
},
{
"name": "HTML",
"bytes": "114035"
},
{
"name": "JavaScript",
"bytes": "1523383"
},
{
"name": "Python",
"bytes": "970350"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
import codecs
import functools
import json
import os
import sys
import time
import socket
import redis
import logging
import signal
import aioredis
import asyncio
def noop(i):
return i
def env(identifier, default, type=noop):
return type(os.getenv("SINGLE_BEAT_%s" % identifier, default))
class Config(object):
REDIS_SERVER = env('REDIS_SERVER', 'redis://localhost:6379')
REDIS_PASSWORD = env('REDIS_PASSWORD', None)
REDIS_SENTINEL = env('REDIS_SENTINEL', None)
REDIS_SENTINEL_MASTER = env('REDIS_SENTINEL_MASTER', 'mymaster')
REDIS_SENTINEL_DB = env('REDIS_SENTINEL_DB', 0)
REDIS_SENTINEL_PASSWORD = env('REDIS_SENTINEL_PASSWORD', None)
IDENTIFIER = env('IDENTIFIER', None)
LOCK_TIME = env('LOCK_TIME', 5, int)
INITIAL_LOCK_TIME = env('INITIAL_LOCK_TIME', LOCK_TIME * 2, int)
HEARTBEAT_INTERVAL = env('HEARTBEAT_INTERVAL', 1, int)
HOST_IDENTIFIER = env('HOST_IDENTIFIER', socket.gethostname())
LOG_LEVEL = env('LOG_LEVEL', 'warn')
# wait_mode can be, supervisord or heartbeat
WAIT_MODE = env("WAIT_MODE", "heartbeat")
WAIT_BEFORE_DIE = env("WAIT_BEFORE_DIE", 60, int)
_host_identifier = None
def check(self, cond, message):
if not cond:
raise Exception(message)
def checks(self):
self.check(
self.LOCK_TIME < self.INITIAL_LOCK_TIME,
"initial lock time must be greater than lock time",
)
self.check(
self.HEARTBEAT_INTERVAL < (self.LOCK_TIME / 2.0),
"SINGLE_BEAT_HEARTBEAT_INTERVAL must be smaller than SINGLE_BEAT_LOCK_TIME / 2",
)
self.check(self.WAIT_MODE in ("supervised", "heartbeat"), "undefined wait mode")
if self.REDIS_SENTINEL:
master = self._sentinel.discover_master(self.REDIS_SENTINEL_MASTER)
else:
self._redis.ping()
def get_redis(self):
if self.REDIS_SENTINEL:
return self._sentinel.master_for(self.REDIS_SENTINEL_MASTER,
password=self.REDIS_PASSWORD,
redis_class=redis.Redis)
return self._redis
def rewrite_redis_url(self):
"""\
if REDIS_SERVER is just an ip address, then we try to translate it to
redis_url, redis://REDIS_SERVER so that it doesn't try to connect to
localhost while you try to connect to another server
:return:
"""
if (
self.REDIS_SERVER.startswith("unix://")
or self.REDIS_SERVER.startswith("redis://")
or self.REDIS_SERVER.startswith("rediss://")
):
return self.REDIS_SERVER
return "redis://{}/".format(self.REDIS_SERVER)
def __init__(self):
if self.REDIS_SENTINEL:
sentinels = [tuple(s.split(':')) for s in self.REDIS_SENTINEL.split(';')]
self._sentinel = redis.sentinel.Sentinel(sentinels,
db=self.REDIS_SENTINEL_DB,
socket_timeout=0.1,
sentinel_kwargs={"password": self.REDIS_SENTINEL_PASSWORD}
)
else:
self._redis = redis.Redis.from_url(self.rewrite_redis_url())
def get_async_redis_client(self):
conn = self.get_redis().connection_pool.get_connection("ping")
host, port, password = conn.host, conn.port, conn.password
r = aioredis.Redis(host=host, port=port, password=password)
return r.pubsub()
def get_host_identifier(self):
"""\
we try to return IPADDR:PID form to identify where any singlebeat instance is
running.
:return:
"""
if self._host_identifier:
return self._host_identifier
local_ip_addr = (
self.get_redis()
.connection_pool.get_connection("ping")
._sock.getsockname()[0]
)
self._host_identifier = "{}:{}".format(local_ip_addr, os.getpid())
return self._host_identifier
config = Config()
config.checks()
numeric_log_level = getattr(logging, config.LOG_LEVEL.upper(), None)
logging.basicConfig(level=numeric_log_level)
logger = logging.getLogger(__name__)
def get_process_identifier(args):
"""by looking at arguments we try to generate a proper identifier
>>> get_process_identifier(['python', 'echo.py', '1'])
'python_echo.py_1'
"""
return "_".join(args)
class State:
PAUSED = "PAUSED"
RUNNING = "RUNNING"
WAITING = "WAITING"
RESTARTING = "RESTARTING"
def is_process_alive(pid):
try:
os.kill(pid, 0)
return True
except:
return False
class Process(object):
def __init__(self, args):
self.args = args
self.state = None
self.t1 = time.time()
self.identifier = config.IDENTIFIER or get_process_identifier(self.args[1:])
self.ioloop = asyncio.get_running_loop()
for signame in {"SIGINT", "SIGTERM"}:
sig = getattr(signal, signame)
self.ioloop.add_signal_handler(
sig, functools.partial(self.sigterm_handler, sig, self.ioloop)
)
self.async_redis = config.get_async_redis_client()
self.fence_token = 0
self.sprocess = None
self.pc = None
self.state = State.WAITING
self._periodic_callback_running = True
self.child_exit_cb = self.proc_exit_cb
def proc_exit_cb(self, exit_status):
"""When child exits we use the same exit status code"""
self._periodic_callback_running = False
sys.exit(exit_status)
def proc_exit_cb_noop(self, exit_status):
"""\
when we deliberately restart/stop the child process,
we don't want to exit ourselves, so we replace proc_exit_cb
with a noop one when restarting
:param exit_status:
:return:
"""
def proc_exit_cb_restart(self, exit_status):
"""\
this is used when we restart the process,
it re-triggers the start
"""
self.ioloop.run_until_complete(self.spawn_process())
def proc_exit_cb_state_set(self, exit_status):
if self.state == State.PAUSED:
self.state = State.WAITING
def stdout_read_cb(self, data):
sys.stdout.write(data)
def stderr_read_cb(self, data):
sys.stderr.write(data)
async def timer_cb_paused(self):
pass
async def timer_cb_waiting(self):
if self.acquire_lock():
logger.info(f"acquired lock, {self.identifier} spawning child process")
return self.ioloop.create_task(self.spawn_process())
# couldn't acquire lock
if config.WAIT_MODE == "supervised":
logger.debug(
"already running, will exit after %s seconds" % config.WAIT_BEFORE_DIE
)
time.sleep(config.WAIT_BEFORE_DIE)
sys.exit()
def process_pid(self):
"""\
when we are restarting, we want to keep sending heart beat, so any other single-beat
node will not pick it up.
hence we need a process-id as an identifier - even for a short period of time.
:return:
"""
if self.sprocess:
return self.sprocess.pid
return -1
async def timer_cb_running(self):
rds = config.get_redis()
# read current fence token
redis_fence_token = rds.get(
"SINGLE_BEAT_{identifier}".format(identifier=self.identifier)
)
if redis_fence_token:
redis_fence_token = int(redis_fence_token.split(b":")[0])
else:
logger.error(
"fence token could not be read from Redis - assuming lock expired, trying to reacquire lock"
)
if self.acquire_lock():
logger.info("reacquired lock")
redis_fence_token = self.fence_token
else:
logger.error("unable to reacquire lock, terminating")
os.kill(os.getpid(), signal.SIGTERM)
logger.debug(
"expected fence token: {} fence token read from Redis: {}".format(
self.fence_token, redis_fence_token
)
)
if self.fence_token == redis_fence_token:
self.fence_token += 1
rds.set(
"SINGLE_BEAT_{identifier}".format(identifier=self.identifier),
"{}:{}:{}".format(
self.fence_token, config.HOST_IDENTIFIER, self.process_pid()
),
ex=config.LOCK_TIME,
)
else:
logger.error(
"fence token did not match - lock is held by another process, terminating"
)
# send sigterm to ourself and let the sigterm_handler do the rest
os.kill(os.getpid(), signal.SIGTERM)
async def timer_cb_restarting(self):
"""\
when restarting we are doing exactly the same as running - we don't want any other
single-beat node to pick up
:return:
"""
await self.timer_cb_running()
async def timer_cb(self):
logger.debug("timer called %s state=%s", time.time() - self.t1, self.state)
self.t1 = time.time()
fn = getattr(self, "timer_cb_{}".format(self.state.lower()))
await fn()
def acquire_lock(self):
rds = config.get_redis()
return rds.execute_command(
"SET",
"SINGLE_BEAT_{}".format(self.identifier),
"{}:{}:{}".format(self.fence_token, config.HOST_IDENTIFIER, 0),
"NX",
"EX",
config.INITIAL_LOCK_TIME,
)
def sigterm_handler(self, signum, loop):
"""When we get term signal
if we are waiting and got a sigterm, we just exit.
if we have a child running, we pass the signal first to the child
then we exit.
To exit we signal our main sleep/trigger loop on `self.run()`
:param signum:
:param ioloop:
:return:
"""
assert self.state in ("WAITING", "RUNNING", "PAUSED")
logger.debug("our state %s", self.state)
if self.state == "WAITING":
self._periodic_callback_running = False
if self.state == "RUNNING":
logger.debug(
"already running sending signal to child - %s", self.sprocess.pid
)
self.sprocess.send_signal(signum)
logger.debug("waiting for subprocess to finish")
self.ioloop.create_task(self.sprocess.wait())
self._periodic_callback_running = False
async def run(self):
while self._periodic_callback_running:
await self.timer_cb()
await asyncio.sleep(config.HEARTBEAT_INTERVAL)
async def _read_stream(self, stream, cb):
decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
while True:
line = await stream.read(100)
if line:
cb(decoder.decode(line))
else:
break
async def spawn_process(self):
cmd = self.args
env = os.environ
self.state = State.RUNNING
try:
self.sprocess = await asyncio.create_subprocess_exec(
*cmd,
env=env,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
except FileNotFoundError:
"""
if the file that we need to run doesn't exists
we immediately exit.
"""
logger.exception("file not found")
return self.child_exit_cb(1)
try:
await asyncio.wait(
[
self._read_stream(self.sprocess.stdout, self.forward_stdout),
self._read_stream(self.sprocess.stderr, self.forward_stderr),
]
)
self.child_exit_cb(self.sprocess.returncode)
except SystemExit as e:
os._exit(e.code)
def cli_command_info(self, msg):
info = ""
if self.sprocess:
if is_process_alive(self.sprocess.pid):
info = "pid: {}".format(self.sprocess.pid)
return info
def child_process_alive(self):
return not self.sprocess.protocol._process_exited
def cli_command_quit(self, msg):
"""\
kills the child and exits
"""
if self.state == State.RUNNING and self.sprocess and self.child_process_alive():
self.sprocess.kill()
else:
sys.exit(0)
def cli_command_pause(self, msg):
"""\
if we have a running child we kill it and set our state to paused
if we don't have a running child, we set our state to paused
this will pause all the nodes in single-beat cluster
its useful when you deploy some code and don't want your child to spawn
randomly
:param msg:
:return:
"""
info = ""
if self.state == State.RUNNING and self.sprocess and self.child_process_alive():
self.child_exit_cb = self.proc_exit_cb_noop
self.sprocess.kill()
info = "killed"
# TODO: check if process is really dead etc.
self.state = State.PAUSED
return info
def cli_command_resume(self, msg):
"""\
sets state to waiting - so we resume spawning children
"""
if self.state == State.PAUSED:
self.state = State.WAITING
def cli_command_stop(self, msg):
"""\
stops the running child process - if its running
it will re-spawn in any single-beat node after sometime
:param msg:
:return:
"""
info = ""
if self.state == State.RUNNING and self.sprocess and self.sprocess.proc:
self.state = State.PAUSED
# TODO:
# self.sprocess.set_exit_callback(self.proc_exit_cb_state_set)
self.sprocess.kill()
info = "killed"
# TODO: check if process is really dead etc.
return info
def cli_command_restart(self, msg):
"""\
restart the subprocess
i. we set our state to RESTARTING - on restarting we still send heartbeat
ii. we kill the subprocess
iii. we start again
iv. if its started we set our state to RUNNING, else we set it to WAITING
:param msg:
:return:
"""
info = ""
if self.state == State.RUNNING and self.sprocess and self.sprocess.proc:
self.state = State.RESTARTING
self.child_exit_cb = self.proc_exit_cb_restart
self.sprocess.kill()
info = "killed"
# TODO: check if process is really dead etc.
return info
def pubsub_callback(self, msg):
logger.info("got command - %s", msg)
if msg["type"] != b"message":
return
try:
cmd = json.loads(msg["data"])
except:
logger.exception("exception on parsing command %s", msg)
return
fn = getattr(self, "cli_command_{}".format(cmd["cmd"]), None)
if not fn:
logger.info("cli_command_{} not found".format(cmd["cmd"]))
return
logger.info("got command - %s running %s", msg["data"], fn)
info = fn(cmd)
rds = config.get_redis()
logger.info("reply to %s", cmd["reply_channel"])
rds.publish(
cmd["reply_channel"],
json.dumps(
{
"identifier": config.get_host_identifier(),
"state": self.state,
"info": info or "",
}
),
)
async def wait_for_commands(self):
logger.info("subscribed to %s", "SB_{}".format(self.identifier))
await self.async_redis.subscribe("SB_{}".format(self.identifier))
logger.debug("subscribed to redis channel %s", "SB_{}".format(self.identifier))
async for msg in self.async_redis.listen():
self.pubsub_callback(msg)
def forward_stdout(self, buf):
self.stdout_read_cb(buf)
def forward_stderr(self, buf):
self.stderr_read_cb(buf)
async def run_process():
process = Process(sys.argv[1:])
await process.run()
def main():
asyncio.run(run_process())
if __name__ == "__main__":
main()
|
{
"content_hash": "c408bb5c02ea51675fdebaf18dedb01b",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 111,
"avg_line_length": 32.74409448818898,
"alnum_prop": 0.5611999519057352,
"repo_name": "ybrs/single-beat",
"id": "89aee9a82cb4ac101b90442c846a719b94eef8bf",
"size": "16634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "singlebeat/beat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24214"
},
{
"name": "Shell",
"bytes": "854"
}
],
"symlink_target": ""
}
|
import math
import unittest
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class TestDistinctUntilChanged(unittest.TestCase):
def test_default_if_empty_non_empty1(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_completed(420))
def create():
return xs.default_if_empty()
results = scheduler.start(create)
results.messages.assert_equal(on_next(280, 42), on_next(360, 43), on_completed(420))
xs.subscriptions.assert_equal(subscribe(200, 420))
def test_default_if_empty_non_empty2(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(280, 42), on_next(360, 43), on_completed(420))
def create():
return xs.default_if_empty(-1)
results = scheduler.start(create)
results.messages.assert_equal(on_next(280, 42), on_next(360, 43), on_completed(420))
xs.subscriptions.assert_equal(subscribe(200, 420))
def test_default_if_empty_empty1(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_completed(420))
def create():
return xs.default_if_empty(None)
results = scheduler.start(create)
results.messages.assert_equal(on_next(420, None), on_completed(420))
xs.subscriptions.assert_equal(subscribe(200, 420))
def test_default_if_empty_empty2(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_completed(420))
def create():
return xs.default_if_empty(-1)
results = scheduler.start(create)
results.messages.assert_equal(on_next(420, -1), on_completed(420))
xs.subscriptions.assert_equal(subscribe(200, 420))
|
{
"content_hash": "ae96849c36900eaf68673d0721dda9a8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 99,
"avg_line_length": 34.6231884057971,
"alnum_prop": 0.6596902469652575,
"repo_name": "dbrattli/RxPY",
"id": "206d9781bb96418fa27dcb4f0b37a3dc8adbcf3c",
"size": "2389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_observable/test_defaultifempty.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1334787"
}
],
"symlink_target": ""
}
|
import collections
from . import machines
from . import syntax
__all__ = ['Graph', 'from_graph', 'write_dot', 'read_tgf', 'to_graph', 'Editor']
class Graph:
"""A directed graph. Both nodes and edges can have a `dict` of attributes.
Nodes can be any object that implements `__hash__` and `__eq__`.
If `g` is a `Graph` and `v` is a node, `v`'s attributes can be
accessed as `g.nodes[v]`. If `u` and `v` are nodes, edge (`u`,
`v`)'s attributes can be accessed as `g.edges[u][v]`.
"""
def __init__(self, attrs=None):
self.nodes = {}
self.edges = {}
if attrs is None: attrs = {}
self.attrs = attrs
def add_node(self, v, attrs=None):
"""Add node `v` to graph with attributes `attrs`."""
if attrs is None: attrs = {}
if v in self.nodes:
self.nodes[v].update(attrs)
else:
self.nodes[v] = attrs
def remove_node(self, v):
"""Remove node `v`, as well as any edges incident to `v`."""
del self.nodes[v]
del self.edges[v]
for u in self.nodes:
if u in self.edges and v in self.edges[u]:
del self.edges[u][v]
def add_edge(self, u, v, attrs=None):
"""Add edge from `u` to `v` to graph with attributes `attrs`."""
if attrs is None: attrs = {}
if u not in self.nodes: self.nodes[u] = {}
if v not in self.nodes: self.nodes[v] = {}
self.edges.setdefault(u, {})
self.edges[u].setdefault(v, [])
self.edges[u][v].append(attrs)
def has_edge(self, u, v):
"""Remove edge from `u` to `v`."""
return u in self.edges and v in self.edges[u] and len(self.edges[u][v]) > 0
def get_edges(self, u, v):
self.edges.setdefault(u, {})
self.edges[u].setdefault(v, [])
return self.edges[u][v]
def only_path(self):
"""Finds the only path from the start node. If there is more than one,
raises ValueError."""
start = [v for v in self.nodes if self.nodes[v].get('start', False)]
if len(start) != 1:
raise ValueError("There must be exactly one start node")
nodes = []
edges = []
[v] = start
while True:
nodes.append(v)
u = v
vs = self.edges.get(u, ())
if len(vs) == 0:
break
elif len(vs) > 1:
raise ValueError("There must be exactly one path")
[v] = vs
if len(self.edges[u][v]) != 1:
raise ValueError("There must be exactly one path")
[e] = self.edges[u][v]
edges.append(e)
return Path(nodes, edges, self.nodes[v].get('accept', False))
def shortest_path(self):
"""Finds the shortest path from the start node to an accept node. If
there is more than one, chooses one arbitrarily."""
start = [v for v in self.nodes if self.nodes[v].get('start', False)]
if len(start) != 1:
raise ValueError("There must be exactly one start node")
frontier = collections.deque(start)
pred = {start[0]: None}
while len(frontier) > 0:
u = frontier.popleft()
if self.nodes[u].get('accept', False):
nodes = []
edges = []
while u is not None:
nodes.append(u)
if pred[u] is not None:
edges.append(self.edges[pred[u]][u][0])
u = pred[u]
nodes.reverse()
edges.reverse()
return Path(nodes, edges, True)
for v in self.edges.get(u, ()):
if v not in pred:
frontier.append(v)
pred[v] = u
raise ValueError("There is no accepting path")
def has_path(self):
"""Returns `True` iff there is a path from the start node to an accept node."""
try:
self.shortest_path()
return True
except:
return False
def __getitem__(self, u):
return self.edges[u]
def _repr_dot_(self, merge_parallel=True, index=None):
def repr_html(x):
if hasattr(x, '_repr_html_'):
return x._repr_html_()
else:
return str(x)
result = []
result.append('digraph {')
for key, val in self.attrs.items():
result.append(' {}={};'.format(key, val))
result.append(' node [fontname=Monospace,fontsize=10,shape=box,style=rounded,height=0,width=0,margin="0.055,0.042"];')
result.append(' edge [arrowhead=vee,arrowsize=0.5,fontname=Monospace,fontsize=9];')
# Draw nodes
result.append(' _START[shape=none,label=""];\n')
if index is None:
index = {}
else:
if not isinstance(index, dict):
raise TypeError('index must be a dict')
index.clear()
for i, q in enumerate(sorted(self.nodes, key=id)):
index[q] = i
attrs = {}
for key, val in self.nodes[q].items():
if key in ['label', 'style']:
attrs[key] = val
if 'label' not in attrs:
attrs['label'] = q
attrs['label'] = '<'+repr_html(attrs['label'])+'>'
if self.nodes[q].get('accept', False):
attrs['peripheries'] = 2
attrs = ','.join('{}={}'.format(key, val) for (key, val) in attrs.items())
result.append(' {}[{}];'.format(i, attrs))
# Draw edges to nowhere
for q in self.nodes:
i = index[q]
if self.nodes[q].get('start', False):
result.append(' _START -> {}'.format(i))
if self.nodes[q].get('incomplete', False):
result.append(' _DOTS_{}[shape=none,label=""];\n'.format(i))
result.append(' {} -> _DOTS_{}[dir=none,style=dotted]'.format(i, i))
# Organize nodes into ranks, if any
rank_nodes = collections.defaultdict(set)
has_rank = set()
for v in self.nodes:
if 'rank' in self.nodes[v]:
has_rank.add(v)
rank = self.nodes[v]['rank']
rank_nodes[rank].add(v)
if len(has_rank) > 0:
for rank in rank_nodes:
result.append(' {{ rank=same; {} }}'.format(' '.join(str(index[v]) for v in rank_nodes[rank])))
node_has_constraint = set()
rank_has_constraint = set()
for u in has_rank:
ur = self.nodes[u]['rank']
for v in self.edges.get(u, ()):
if v in has_rank:
vr = self.nodes[v]['rank']
if ur != vr:
if vr not in rank_has_constraint:
rank_has_constraint.add(vr)
node_has_constraint.add(v)
# Draw normal edges
for u in self.edges:
for v in self.edges[u]:
edges = []
for e in self.edges[u][v]:
attrs = {}
for key, val in e.items():
if key == 'label':
attrs['label'] = repr_html(e['label'])
elif key in ['style', 'color']:
attrs[key] = val
edges.append(attrs)
if merge_parallel:
attrs = {}
labels = []
for e in edges:
if 'label' in e:
labels.append(e['label'])
# In principle it's possible for parallel edges to have
# different attributes, but not for the cases where
# we currently use attributes.
attrs.update(e)
if labels:
labels = [f'<tr><td>{label}</td></tr>' for label in labels]
attrs['label'] = '<<table border="0" cellpadding="1">{}</table>>'.format(''.join(labels))
edges = [attrs]
else:
for attrs in edges:
attrs['label'] = f"<{attrs['label']}>"
for attrs in edges:
# Within-rank edges don't constrain position of v if v's position is already determined
if 'rank' in self.nodes[u] and 'rank' in self.nodes[v] and self.nodes[u]['rank'] == self.nodes[v]['rank'] and v in node_has_constraint:
attrs['constraint'] = 'false'
if attrs:
attrs = ','.join('{}={}'.format(key, val) for key, val in attrs.items())
result.append(' {} -> {}[{}];'.format(index[u], index[v], attrs))
else:
result.append(' {} -> {};'.format(index[u], index[v]))
result.append('}')
return '\n'.join(result)
def _ipython_display_(self):
from IPython.display import display # type: ignore
from .graphviz import run_dot
display(run_dot(self._repr_dot_()))
def graph_to_json(g):
j = {'nodes': {}, 'edges': {}}
for attr in ['xmin', 'xmax', 'ymin', 'ymax']:
if attr in g.attrs:
j[attr] = g.attrs[attr]
for v in g.nodes:
j['nodes'][v] = {}
for attr in ['start', 'accept', 'x', 'y', 'startx', 'starty']:
if attr in g.nodes[v]:
j['nodes'][str(v)][attr] = g.nodes[v][attr]
for u in g.edges:
j['edges'][u] = {}
for v in g.edges[u]:
j['edges'][u][v] = []
for e in g.edges[u][v]:
attrs = {'label': str(e['label'])}
for attr in ['anchorx', 'anchory']:
if attr in e:
attrs[attr] = e[attr]
j['edges'][u][v].append(attrs)
return j
def json_to_graph(j):
g = Graph()
for v in j['nodes']:
g.add_node(str(v), {
'start': j['nodes'][v]['start'],
'accept': j['nodes'][v]['accept']})
for u in j['edges']:
for v in j['edges'][u]:
for e in j['edges'][u][v]:
g.add_edge(u, v,
{'label': syntax.str_to_transition(e['label'])})
return g
def read_tgf(filename):
"""Reads a file in Trivial Graph Format. Edge labels are read into the
`label` attribute."""
with open(filename) as file:
g = Graph()
states = {}
section = 0
for line in file:
line = line.strip()
if line == "":
continue
fields = line.split()
if fields == ["#"]:
section += 1
elif section == 0:
# Nodes
if len(fields) != 2:
raise ValueError(f"A node must have an id and a label (not {line})")
i, q = fields
q, attrs = syntax.str_to_state(q)
states[i] = q
g.add_node(q, attrs)
elif section == 1:
# Edges
if len(fields) != 3:
raise ValueError(f"An edge must have a tail, a head, and a label (not {line})")
i, j, t = fields
q, r = states[i], states[j]
t = syntax.str_to_transition(t)
g.add_edge(q, r, {'label':t})
return from_graph(g)
def from_graph(g):
"""Converts a `Graph` to a `Machine`."""
transitions = []
for q in g.edges:
for r in g.edges[q]:
for e in g.edges[q][r]:
t = e['label']
transitions.append(([[q]]+list(t.lhs), [[r]]+list(t.rhs)))
start_state = None
accept_states = set()
for q in g.nodes:
if g.nodes[q].get('start', False):
if start_state is not None:
raise ValueError("A Machine must have only one start state")
start_state = q
if g.nodes[q].get('accept', False):
accept_states.add(q)
if start_state is None:
raise ValueError("A Machine must have one start state")
return machines.from_transitions(transitions, start_state, accept_states)
def write_dot(x, filename):
"""Writes a `Machine` or `Graph` to file named `filename` in GraphViz
(DOT) format."""
if isinstance(x, machines.Machine):
x = to_graph(x)
if not isinstance(x, Graph):
raise TypeError("Only Machines and Graphs can be written as DOT files")
with open(filename, "w") as file:
file.write(x._repr_dot_())
def to_graph(m):
"""Converts a `Machine` to a `Graph`."""
g = Graph()
g.attrs['rankdir'] = 'LR'
q = m.get_start_state()
g.add_node(q, {'start': True})
for q in m.get_accept_states():
g.add_node(q, {'accept': True})
for t in m.get_transitions():
state = t[m.state]
[[q]] = state.lhs
[[r]] = state.rhs
t = t[:m.state] + t[m.state+1:]
g.add_edge(q, r, {'label': t})
return g
class Path:
def __init__(self, nodes, edges, accept):
self.nodes = nodes
self.edges = edges
self.accept = accept
def __len__(self):
return len(self.nodes)
def __getitem__(self, i):
return self.nodes[i]
def __str__(self):
return '\n'.join(map(str, self.nodes))
def _repr_html_(self):
html = ['<table style="font-family: Monospace, monospace;">\n']
for config in self.nodes:
if not isinstance(config, machines.Configuration):
raise TypeError('A Path can only displayed as HTML if its nodes are Configurations')
html.append(' <tr>')
for store in config.stores:
html.extend(['<td style="text-align: left">', store._repr_html_(), '</td>'])
html.append('</tr>\n')
html.append('</table>\n')
if self.accept:
html.append('<p>accept</p>')
else:
html.append('<p>reject</p>')
return ''.join(html)
def layout(g):
import pydot
from .graphviz import run_dot
def parse_string(s):
if s.startswith('"') and s.endswith('"'):
s = s[1:-1]
s = s.replace('\\\n','')
return s
node_index = {}
dot = g._repr_dot_(index=node_index, merge_parallel=False)
dot = run_dot(dot, format="dot")
dot = pydot.graph_from_dot_data(dot)[0]
bbox = parse_string(dot.get_bb()).split(',')
g.attrs['xmin'] = bbox[0]
g.attrs['ymin'] = bbox[1]
g.attrs['xmax'] = bbox[2]
g.attrs['ymax'] = bbox[3]
for v in g.nodes:
vid = node_index[v]
vdot = dot.get_node(str(vid))[0]
pos = parse_string(vdot.get_attributes()['pos'])
x, y = pos.split(',', 1)
g.nodes[v]['x'] = float(x)
g.nodes[v]['y'] = float(y)
if g.nodes[v].get('start', False):
sdot = dot.get_node('_START')[0]
pos = parse_string(sdot.get_attributes()['pos'])
x, y = pos.split(',', 1)
g.nodes[v]['startx'] = float(x)
g.nodes[v]['starty'] = float(y)
for u in g.edges:
uid = node_index[u]
for v in g.edges[u]:
vid = node_index[v]
edots = dot.get_edge(str(uid), str(vid))
for e, edot in zip(g.edges[u][v], edots):
pos = parse_string(edot.get_attributes()['pos'])
points = []
start = end = None
for pstr in pos.split():
fields = pstr.split(',')
if fields[0] not in ['s', 'e']:
points.append(tuple(map(float, fields)))
if len(points) % 2 == 1:
# Every third point is actually on the curve.
# Choose the middle one.
e['anchorx'] = points[len(points)//2][0]
e['anchory'] = points[len(points)//2][1]
else:
# Find the middle of the middle spline
i = len(points)//2-2
e['anchorx'] = (points[i][0] + points[i+1][0]*3 + points[i+2][0]*3 + points[i+3][0])/8
e['anchory'] = (points[i][1] + points[i+1][1]*3 + points[i+2][1]*3 + points[i+3][1])/8
return g
class Editor:
_editors = []
def __init__(self, m):
self.m = m
import importlib.resources
self.src = importlib.resources.read_text(__package__, 'editor.js');
self.src = self.src + f'main({len(Editor._editors)});'
Editor._editors.append(self)
try:
import google.colab
import IPython.display
google.colab.output.register_callback('notebook.editor_load',
lambda ei: IPython.display.JSON(editor_load(ei)))
google.colab.output.register_callback('notebook.editor_save', editor_save)
except ImportError:
pass
def _ipython_display_(self):
# bad to have more than one of these per Editor object?
import IPython
IPython.display.display(IPython.display.Javascript(self.src))
def save(self, j):
g = json_to_graph(j)
# from_graph knows how to convert g to a Machine, but tries to
# guess store_types whereas we know what it should actually
# be. So we can do some more careful validation here.
self.m.transitions = []
for q in g.edges:
for r in g.edges[q]:
for e in g.edges[q][r]:
t = e['label']
lhs = list(t.lhs)
lhs[self.m.state:self.m.state] = [q]
rhs = list(t.rhs)
rhs[self.m.state:self.m.state] = [r]
try:
self.m.add_transition(lhs, rhs)
except Exception as e:
raise ValueError(f"Error in transition from {q} on {t} to {r} ({e})")
start_state = None
accept_states = set()
for q in g.nodes:
if g.nodes[q].get('start', False):
if start_state is not None:
raise ValueError("A Machine must have only one start state")
start_state = q
if g.nodes[q].get('accept', False):
accept_states.add(q)
if start_state is None:
raise ValueError("A Machine must have one start state")
self.m.set_start_state(start_state)
self.m.accept_configs.clear()
self.m.add_accept_states(accept_states)
def load(self):
g = to_graph(self.m)
layout(g)
return graph_to_json(g)
def editor_save(ei, g):
Editor._editors[ei].save(g)
def editor_load(ei):
return Editor._editors[ei].load()
|
{
"content_hash": "c2b794e0ec659a0f79e26a95d98792ee",
"timestamp": "",
"source": "github",
"line_count": 529,
"max_line_length": 155,
"avg_line_length": 36.226843100189036,
"alnum_prop": 0.48121477770820287,
"repo_name": "ND-CSE-30151/tock",
"id": "ecc5f8773016908c29a1115fb2944d9d174fe5b7",
"size": "19164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tock/graphs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "31551"
},
{
"name": "Makefile",
"bytes": "247"
},
{
"name": "Nix",
"bytes": "363"
},
{
"name": "Python",
"bytes": "141846"
}
],
"symlink_target": ""
}
|
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template settings for CERT (Community Emergency Response Teams)
http://eden.sahanafoundation.org/wiki/BluePrintCERT
Demo only, not in Production
"""
T = current.T
# Pre-Populate
settings.base.prepopulate += ("CERT", "default/users")
# Theme
#settings.base.theme = "CERT"
settings.base.system_name = T("Sahana Disaster Management Platform")
settings.base.system_name_short = T("Sahana")
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "-0600"
# Uncomment these to use US-style dates in English
settings.L10n.date_format = "%m-%d-%Y"
# Start week on Sunday
settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
# PDF to Letter
settings.base.paper_size = T("Letter")
settings.hrm.multiple_orgs = False
settings.hrm.vol_experience = False
settings.hrm.use_description = None
settings.hrm.use_skills = False
settings.hrm.use_awards = False
settings.hrm.use_credentials = False
settings.msg.require_international_phone_numbers = False
settings.gis.geocode_imported_addresses = "google"
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
#("project", Storage(
# name_nice = T("Projects"),
# #description = "Tracking of Projects, Activities and Tasks",
# restricted = True,
# module_type = 2
# )),
("scenario", Storage(
name_nice = T("Scenarios"),
#description = "Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = None,
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
("irs", Storage(
name_nice = T("Incidents"),
#description = "Incident Reporting System",
restricted = False,
module_type = 10
)),
])
# END =========================================================================
|
{
"content_hash": "a194b7395ef1c9a964025235d9e35d84",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 151,
"avg_line_length": 40.056497175141246,
"alnum_prop": 0.5451339915373766,
"repo_name": "sahana/Turkey",
"id": "989c2805cbb577f441ec93e49215acbaaf7ea3b1",
"size": "7115",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "modules/templates/CERT/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3336714"
},
{
"name": "HTML",
"bytes": "1369269"
},
{
"name": "JavaScript",
"bytes": "20093511"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31303565"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3208049"
}
],
"symlink_target": ""
}
|
from .identifier import Identifier
from .declaration import Declaration
from .vardeclarator import VarDeclarator
from .parameter import Parameter
from .functiondef import FunctionDef
|
{
"content_hash": "d03482ee7415bdc8975458d50d83b153",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 40,
"avg_line_length": 36.6,
"alnum_prop": 0.8633879781420765,
"repo_name": "andaviaco/tronido",
"id": "e0859f728458e6ae5d22f86b2fca49fbf53b5151",
"size": "183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/syntax/declarations/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "16338"
},
{
"name": "Python",
"bytes": "74269"
}
],
"symlink_target": ""
}
|
import mock
from oslo_config import cfg
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral.services import workbooks as wb_service
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
SIMPLE_WORKBOOK = """
---
version: '2.0'
name: wb1
workflows:
wf1:
type: direct
tasks:
t1:
action: std.echo output="Task 1"
on-success:
- t2
t2:
action: std.echo output="Task 2"
on-success:
- t3
t3:
action: std.echo output="Task 3"
"""
WITH_ITEMS_WORKBOOK = """
---
version: '2.0'
name: wb3
workflows:
wf1:
type: direct
tasks:
t1:
with-items: i in <% list(range(0, 3)) %>
action: std.echo output="Task 1.<% $.i %>"
publish:
v1: <% $.t1 %>
on-success:
- t2
t2:
action: std.echo output="Task 2"
"""
WITH_ITEMS_WORKBOOK_CONCURRENCY = """
---
version: '2.0'
name: wb3
workflows:
wf1:
type: direct
tasks:
t1:
with-items: i in <% list(range(0, 4)) %>
action: std.echo output="Task 1.<% $.i %>"
concurrency: 2
publish:
v1: <% $.t1 %>
on-success:
- t2
t2:
action: std.echo output="Task 2"
"""
JOIN_WORKBOOK = """
---
version: '2.0'
name: wb1
workflows:
wf1:
type: direct
tasks:
t1:
action: std.echo output="Task 1"
on-success:
- t3
t2:
action: std.echo output="Task 2"
on-success:
- t3
t3:
action: std.echo output="Task 3"
join: all
"""
class DirectWorkflowRerunTest(base.EngineTestCase):
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(
side_effect=[
'Task 1', # Mock task1 success for initial run.
exc.ActionException(), # Mock task2 exception for initial run.
'Task 2', # Mock task2 success for rerun.
'Task 3' # Mock task3 success.
]
)
)
def test_rerun(self):
wb_service.create_workbook_v2(SIMPLE_WORKBOOK)
# Run workflow and fail task.
wf_ex = self.engine.start_workflow('wb1.wf1', {})
self._await(lambda: self.is_execution_error(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.ERROR, wf_ex.state)
self.assertEqual(2, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
self.assertEqual(states.SUCCESS, task_1_ex.state)
self.assertEqual(states.ERROR, task_2_ex.state)
# Resume workflow and re-run failed task.
self.engine.rerun_workflow(wf_ex.id, task_2_ex.id)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.RUNNING, wf_ex.state)
# Wait for the workflow to succeed.
self._await(lambda: self.is_execution_success(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self.assertEqual(3, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3')
# Check action executions of task 1.
self.assertEqual(states.SUCCESS, task_1_ex.state)
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id)
self.assertEqual(1, len(task_1_action_exs))
self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
# Check action executions of task 2.
self.assertEqual(states.SUCCESS, task_2_ex.state)
task_2_action_exs = db_api.get_action_executions(
task_execution_id=task_2_ex.id)
self.assertEqual(2, len(task_2_action_exs))
self.assertEqual(states.ERROR, task_2_action_exs[0].state)
self.assertEqual(states.SUCCESS, task_2_action_exs[1].state)
# Check action executions of task 3.
self.assertEqual(states.SUCCESS, task_3_ex.state)
task_3_action_exs = db_api.get_action_executions(
task_execution_id=task_3_ex.id)
self.assertEqual(1, len(task_3_action_exs))
self.assertEqual(states.SUCCESS, task_3_action_exs[0].state)
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(
side_effect=[
'Task 1', # Mock task1 success for initial run.
exc.ActionException() # Mock task2 exception for initial run.
]
)
)
def test_rerun_from_prev_step(self):
wb_service.create_workbook_v2(SIMPLE_WORKBOOK)
# Run workflow and fail task.
wf_ex = self.engine.start_workflow('wb1.wf1', {})
self._await(lambda: self.is_execution_error(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.ERROR, wf_ex.state)
self.assertEqual(2, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
self.assertEqual(states.SUCCESS, task_1_ex.state)
self.assertEqual(states.ERROR, task_2_ex.state)
# Resume workflow and re-run failed task.
e = self.assertRaises(
exc.EngineException,
self.engine.rerun_workflow,
wf_ex.id,
task_1_ex.id
)
self.assertIn('not supported', str(e))
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(
side_effect=[
exc.ActionException(), # Mock task1 exception for initial run.
'Task 1.1', # Mock task1 success for initial run.
exc.ActionException(), # Mock task1 exception for initial run.
'Task 1.0', # Mock task1 success for rerun.
'Task 1.2', # Mock task1 success for rerun.
'Task 2' # Mock task2 success.
]
)
)
def test_rerun_with_items(self):
wb_service.create_workbook_v2(WITH_ITEMS_WORKBOOK)
# Run workflow and fail task.
wf_ex = self.engine.start_workflow('wb3.wf1', {})
self._await(lambda: self.is_execution_error(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.ERROR, wf_ex.state)
self.assertEqual(1, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
self.assertEqual(states.ERROR, task_1_ex.state)
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id)
self.assertEqual(3, len(task_1_action_exs))
# Resume workflow and re-run failed task.
self.engine.rerun_workflow(wf_ex.id, task_1_ex.id, reset=False)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.RUNNING, wf_ex.state)
self._await(lambda: self.is_execution_success(wf_ex.id), delay=10)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self.assertEqual(2, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
# Check action executions of task 1.
self.assertEqual(states.SUCCESS, task_1_ex.state)
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id)
# The single action execution that succeeded should not re-run.
self.assertEqual(5, len(task_1_action_exs))
self.assertListEqual(['Task 1.0', 'Task 1.1', 'Task 1.2'],
task_1_ex.published.get('v1'))
# Check action executions of task 2.
self.assertEqual(states.SUCCESS, task_2_ex.state)
task_2_action_exs = db_api.get_action_executions(
task_execution_id=task_2_ex.id)
self.assertEqual(1, len(task_2_action_exs))
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(
side_effect=[
exc.ActionException(), # Mock task1 exception for initial run.
'Task 1.1', # Mock task1 success for initial run.
exc.ActionException(), # Mock task1 exception for initial run.
'Task 1.3', # Mock task1 success for initial run.
'Task 1.0', # Mock task1 success for rerun.
'Task 1.2', # Mock task1 success for rerun.
'Task 2' # Mock task2 success.
]
)
)
def test_rerun_with_items_concurrency(self):
wb_service.create_workbook_v2(WITH_ITEMS_WORKBOOK_CONCURRENCY)
# Run workflow and fail task.
wf_ex = self.engine.start_workflow('wb3.wf1', {})
self._await(lambda: self.is_execution_error(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.ERROR, wf_ex.state)
self.assertEqual(1, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
self.assertEqual(states.ERROR, task_1_ex.state)
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id
)
self.assertEqual(4, len(task_1_action_exs))
# Resume workflow and re-run failed task.
self.engine.rerun_workflow(wf_ex.id, task_1_ex.id, reset=False)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.RUNNING, wf_ex.state)
self._await(lambda: self.is_execution_success(wf_ex.id), delay=10)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self.assertEqual(2, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
# Check action executions of task 1.
self.assertEqual(states.SUCCESS, task_1_ex.state)
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id)
# The action executions that succeeded should not re-run.
self.assertEqual(6, len(task_1_action_exs))
self.assertListEqual(['Task 1.0', 'Task 1.1', 'Task 1.2', 'Task 1.3'],
task_1_ex.published.get('v1'))
# Check action executions of task 2.
self.assertEqual(states.SUCCESS, task_2_ex.state)
task_2_action_exs = db_api.get_action_executions(
task_execution_id=task_2_ex.id)
self.assertEqual(1, len(task_2_action_exs))
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(
side_effect=[
'Task 1', # Mock task1 success for initial run.
'Task 2', # Mock task2 success for initial run.
exc.ActionException(), # Mock task3 exception for initial run.
'Task 3' # Mock task3 success for rerun.
]
)
)
def test_rerun_on_join_task(self):
wb_service.create_workbook_v2(JOIN_WORKBOOK)
# Run workflow and fail task.
wf_ex = self.engine.start_workflow('wb1.wf1', {})
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self._await(lambda: self.is_execution_error(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.ERROR, wf_ex.state)
self.assertEqual(3, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3')
self.assertEqual(states.SUCCESS, task_1_ex.state)
self.assertEqual(states.SUCCESS, task_2_ex.state)
self.assertEqual(states.ERROR, task_3_ex.state)
# Resume workflow and re-run failed task.
self.engine.rerun_workflow(wf_ex.id, task_3_ex.id)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.RUNNING, wf_ex.state)
# Wait for the workflow to succeed.
self._await(lambda: self.is_execution_success(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self.assertEqual(3, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3')
# Check action executions of task 1.
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id)
self.assertEqual(1, len(task_1_action_exs))
self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
# Check action executions of task 2.
task_2_action_exs = db_api.get_action_executions(
task_execution_id=task_2_ex.id)
self.assertEqual(1, len(task_2_action_exs))
self.assertEqual(states.SUCCESS, task_2_action_exs[0].state)
# Check action executions of task 3.
task_3_action_exs = db_api.get_action_executions(
task_execution_id=wf_ex.task_executions[2].id)
self.assertEqual(2, len(task_3_action_exs))
self.assertEqual(states.ERROR, task_3_action_exs[0].state)
self.assertEqual(states.SUCCESS, task_3_action_exs[1].state)
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(
side_effect=[
exc.ActionException(), # Mock task1 exception for initial run.
exc.ActionException(), # Mock task2 exception for initial run.
'Task 1', # Mock task2 success for rerun.
'Task 2', # Mock task2 success for rerun.
'Task 3' # Mock task3 success.
]
)
)
def test_rerun_join_with_branch_errors(self):
wb_service.create_workbook_v2(JOIN_WORKBOOK)
# Run workflow and fail task.
wf_ex = self.engine.start_workflow('wb1.wf1', {})
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
self._await(lambda: self.is_task_in_state(task_1_ex.id, states.ERROR))
self._await(lambda: self.is_task_in_state(task_2_ex.id, states.ERROR))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.ERROR, wf_ex.state)
self.assertEqual(2, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
self.assertEqual(states.ERROR, task_1_ex.state)
self.assertEqual(states.ERROR, task_2_ex.state)
# Resume workflow and re-run failed task.
self.engine.rerun_workflow(wf_ex.id, task_1_ex.id)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.RUNNING, wf_ex.state)
# Wait for the task to succeed.
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
self._await(lambda: self.is_task_in_state(task_1_ex.id,
states.SUCCESS))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertEqual(3, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3')
self.assertEqual(states.SUCCESS, task_1_ex.state)
self.assertEqual(states.ERROR, task_2_ex.state)
self.assertEqual(states.WAITING, task_3_ex.state)
# Resume workflow and re-run failed task.
self.engine.rerun_workflow(wf_ex.id, task_2_ex.id)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.RUNNING, wf_ex.state)
# Wait for the workflow to succeed.
self._await(lambda: self.is_execution_success(wf_ex.id))
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self.assertEqual(3, len(wf_ex.task_executions))
task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1')
task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2')
task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3')
# Check action executions of task 1.
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id)
self.assertEqual(2, len(task_1_action_exs))
self.assertEqual(states.ERROR, task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, task_1_action_exs[1].state)
# Check action executions of task 2.
task_2_action_exs = db_api.get_action_executions(
task_execution_id=task_2_ex.id)
self.assertEqual(2, len(task_2_action_exs))
self.assertEqual(states.ERROR, task_2_action_exs[0].state)
self.assertEqual(states.SUCCESS, task_2_action_exs[1].state)
# Check action executions of task 3.
task_3_action_exs = db_api.get_action_executions(
task_execution_id=wf_ex.task_executions[2].id)
self.assertEqual(1, len(task_3_action_exs))
self.assertEqual(states.SUCCESS, task_3_action_exs[0].state)
|
{
"content_hash": "efca41415576e8a88002efcf780e9fd1",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 79,
"avg_line_length": 36.190751445086704,
"alnum_prop": 0.5993717723473354,
"repo_name": "dennybaa/mistral",
"id": "d9a5cd58ba40677d9555d103da3890bbe59d9233",
"size": "19390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/tests/unit/engine/test_direct_workflow_rerun.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "1037769"
},
{
"name": "Shell",
"bytes": "18657"
}
],
"symlink_target": ""
}
|
import tornado
from tornado.websocket import WebSocketHandler
from tornado.web import RequestHandler, Application, url
import tornadis
clients = []
class GetHandler(RequestHandler):
@tornado.gen.coroutine
def get(self):
self.render("websocket.html")
class WSHandler(WebSocketHandler):
@tornado.gen.coroutine
def initialize(self):
self.redis = tornadis.Client()
loop = tornado.ioloop.IOLoop.current()
loop.add_callback(self.watch_redis)
@tornado.gen.coroutine
def watch_redis(self):
while True:
response = yield self.redis.call('BLPOP', 'ws-queue', 0)
for client in clients:
client.write_message(response[1])
def open(self, *args):
clients.append(self)
@tornado.gen.coroutine
def on_message(self, message):
yield self.redis.call('LPUSH', 'ws-queue', message)
def on_close(self):
clients.remove(self)
app = Application([
url(r"/", GetHandler),
url(r"/ws", WSHandler)
])
if __name__ == '__main__':
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
{
"content_hash": "343b780e14c7b39e5c557c9f0dec4a53",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 68,
"avg_line_length": 22.54,
"alnum_prop": 0.639751552795031,
"repo_name": "thefab/tornadis",
"id": "edea9a9466423f02dd4923d271d9f6b83c4773f3",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/websocket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "850"
},
{
"name": "Python",
"bytes": "92753"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
}
|
import MSService
import MSMessage
import MSSession
import MSModule
import MSTaskManager
import time
import math
class MSModuleAccount(MSModule.Module):
def __init__(self, network, dbMgr, svrDispatcher, sesTransfer):
super(MSModuleAccount, self).__init__(MSService.SERVICE_PLAYER_ACCOUNT, network, dbMgr, svrDispatcher, sesTransfer)
MSMessage.RequestManager.defineRequest(MSMessage.MSG_CS_REGISTER,
self.serviceID,
MSService.ACTION_PLAYER_REGISTER)
MSMessage.RequestManager.defineRequest(MSMessage.MSG_CS_LOGIN,
self.serviceID,
MSService.ACTION_PLAYER_LOGIN)
MSMessage.RequestManager.defineRequest(MSMessage.MSG_CS_COOKIE_LOGIN,
self.serviceID,
MSService.ACTION_PLAYER_COOKIE_LOGIN)
self.registerHandler(MSService.ACTION_PLAYER_REGISTER, self.handleRegister)
self.registerHandler(MSService.ACTION_PLAYER_LOGIN, self.handleLogin)
self.registerHandler(MSService.ACTION_PLAYER_COOKIE_LOGIN, self.handleCookieLogin)
svrDispatcher.register(MSService.SERVICE_PLAYER_ACCOUNT, self)
self.loginPlayer = []
# sub-class implement this method.
def onSessionOpen(self, session):
#self.transferCallback(session, MSModule.MSPLAYER_STAGE_REGISTER)
print ('session opened in Account %d')%(session.getNetHandle())
# sub-class implement this method
def onSessionClose(self, session):
print ('session closed in Account %d')%(session.getNetHandle())
def handleRegister(self, rqst, owner):
if self.sessions.get(owner, None) == None:
return
try:
# do account validation.
acc = rqst.data.get('acc',None)
acc = acc.encode('utf-8')
pwd = rqst.data.get('pwd', None)
pwd = pwd.encode('utf-8')
nick = rqst.data.get('nick', None)
for c in acc:
if ord(c) > 128 or ord(c) == 32:
data = {'msg':'账号必须为英文数字组合'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_DENIED, data))
return
for c in pwd:
if ord(c) > 128 or ord(c) == 32:
data = {'msg':'密码必须为英文数字组合'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_DENIED, data))
return
if nick == None:
data = {'msg':'昵称不能为空'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_DENIED, data))
return
if len(acc) < 6 or len(acc) > 15:
data = {'msg':'账号长度不合法'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_DENIED, data))
return
if len(pwd) < 6 or len(pwd) > 15:
data = {'msg':'密码长度不合法'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_DENIED, data))
return
if len(nick) < 4 or len(nick) > 15:
data = {'msg':'昵称长度不合法'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_DENIED, data))
return
# defensive operations on sql inception
result = self.database.registerPlayer(acc, pwd, nick)
if result == None:
data = {'msg':'账号或昵称重复'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_DENIED, data))
else:
data = {'msg':'Success'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_CONFIRM, data))
except:
data = {'msg':'注册失败, 未知错误'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_REGISTER_DENIED, data))
def handleLogin(self, rqst, owner):
ses = self.sessions.get(owner, None)
if ses == None:
return
try:
# do account validation.
acc = rqst.data.get('acc',None)
pwd = rqst.data.get('pwd', None)
if acc == None or pwd == None:
data = {'msg':'账号或密码不能为空'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
return
if len(acc) < 6 or len(acc) > 15:
data = {'msg':'账号长度不合法'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
return
if len(pwd) < 6 or len(pwd) > 15:
data = {'msg':'密码长度不合法'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
return
#
result = self.database.checkPlayer(acc, pwd)
if result == None:
data = {'msg':'密码不匹配'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
else:
for v in self.loginPlayer:
if v == result['_id']:
data = {'msg':'当前玩家已经在线'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
return
self.onConfirmLogin(result, ses, owner)
except:
raise
data = {'msg':'登录失败, 未知错误'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
def handleCookieLogin(self, rqst, owner):
ses = self.sessions.get(owner, None)
if ses == None:
return
try:
# do account validation.
cookie = rqst.data['cookie']
result = self.database.checkCookie(cookie)
if result == None:
data = {'msg':'Cookie 已过期'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
else:
for v in self.loginPlayer:
if v == result['_id']:
data = {'msg':'当前玩家已经在线'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
return
self.onConfirmLogin(result, ses, owner)
except:
raise
data = {'msg':'登录失败, 未知错误'}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_DENIED, data))
def onConfirmLogin(self, result, ses, owner):
data = {'msg':'Login accepted', 'pid':str(result['_id']), 'uid':result['uid'], 'nick':result['Nick'], 'cookie':result['Cookie']}
self.network.send(owner, MSMessage.packMessage(MSMessage.MSG_SC_LOGIN_CONFIRM, data))
ses.data['_id'] = result['_id']
ses.data['Nick'] = result['Nick']
ses.data['uid'] = result['uid']
ses.data['LastSpGenTime'] = result['LastSpGenTime']
self.loginPlayer.append(result['_id'])
# do spirit gen.
curTime = time.time()
minutes = (curTime - result['LastSpGenTime'])/60
genSp = math.floor(minutes/5)
if genSp >= 1:
prop = self.database.fetchPlayerProperty(ses.data['_id'])
if prop['Spirit'] < 100:
if prop['Spirit'] + genSp >= 100:
genSp = 100 - prop['Spirit']
prop['Spirit'] = genSp + prop['Spirit']
self.database.updateSpiritGenTime(ses.data['_id'], curTime)
ses.data['LastSpGenTime'] = curTime
self.database.updatePlayerProperty(ses.data['_id'], prop)
else:
self.database.updateSpiritGenTime(ses.data['_id'], curTime)
ses.data['LastSpGenTime'] = curTime
ses.data['SpiritGenTick'] = MSTaskManager.TaskManager.addSustainTask(300, self.onGenSpirit, ses)
else:
ses.data['SpiritGenTick'] = MSTaskManager.TaskManager.addDelayTask(300 - (curTime - result['LastSpGenTime']), self.delayGenSpirit, ses)
self.transferCallback(ses, MSModule.MSPLAYER_STAGE_GAME_ENTRANCE)
def delayGenSpirit(self, ses):
self.onGenSpirit(ses)
ses.data['SpiritGenTick'] = MSTaskManager.TaskManager.addSustainTask(300, self.onGenSpirit, ses)
def onGenSpirit(self, ses):
try:
pid = ses.data['_id']
data = self.database.fetchPlayerProperty(pid)
if data['Spirit'] < 100 :
data['Spirit'] += 1
curTime = time.time()
self.database.updatePlayerProperty(pid, data)
self.database.updateSpiritGenTime(pid, curTime)
chance = self.database.getPlayerPurchaseSpiritChance(pid)
data['chance'] = chance
data['LastSpGenTime'] = curTime
self.network.send(ses.getNetHandle(), MSMessage.packMessage(MSMessage.MSG_SC_RESTORE_SPIRIT, {'Spirit':1, 'LastSpGenTime':curTime, 'chance':data['chance']}))
except:
print 'ses failed gen spirit %s'%(ses.data['Nick'])
def handlePlayerDisconnected(self, session):
_id = session.data.get('_id', None)
if _id != None:
self.loginPlayer.remove(_id)
MSTaskManager.TaskManager.cancel(session.data['SpiritGenTick'])
|
{
"content_hash": "59739b9b1760b559f76e3843d8cb093a",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 161,
"avg_line_length": 35.68055555555556,
"alnum_prop": 0.6923575969897495,
"repo_name": "liaow10/MetalStrike",
"id": "24f8b99faa1361ef4f449daf58f0afeb97da9d5f",
"size": "7984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/MSModuleAccount.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "103905"
},
{
"name": "Shell",
"bytes": "128"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from netmiko.cisco import CiscoIosSSH
from netmiko.cisco import CiscoAsaSSH
from netmiko.cisco import CiscoNxosSSH
from netmiko.cisco import CiscoXrSSH
from netmiko.cisco import CiscoWlcSSH
from netmiko.arista import AristaSSH
from netmiko.hp import HPProcurveSSH, HPComwareSSH
from netmiko.huawei import HuaweiSSH
from netmiko.f5 import F5LtmSSH
from netmiko.juniper import JuniperSSH
from netmiko.brocade import BrocadeVdxSSH
from netmiko.avaya import AvayaVspSSH
from netmiko.avaya import AvayaErsSSH
# The keys of this dictionary are the supported device_types
CLASS_MAPPER = {
'cisco_ios' : CiscoIosSSH,
'cisco_xe' : CiscoIosSSH,
'cisco_asa' : CiscoAsaSSH,
'cisco_nxos' : CiscoNxosSSH,
'cisco_xr' : CiscoXrSSH,
'cisco_wlc_ssh' : CiscoWlcSSH,
'arista_eos' : AristaSSH,
'hp_procurve' : HPProcurveSSH,
'hp_comware' : HPComwareSSH,
'huawei' : HuaweiSSH,
'f5_ltm' : F5LtmSSH,
'juniper' : JuniperSSH,
'brocade_vdx' : BrocadeVdxSSH,
'avaya_vsp' : AvayaVspSSH,
'avaya_ers' : AvayaErsSSH,
}
platforms = list(CLASS_MAPPER.keys())
platforms.sort()
def ConnectHandler(*args, **kwargs):
'''
Factory function that selects the proper class and instantiates the object based on device_type
Returns the object
'''
ConnectionClass = ssh_dispatcher(kwargs['device_type'])
return ConnectionClass(*args, **kwargs)
def ssh_dispatcher(device_type):
'''
Select the class to be instantiated based on vendor/platform
'''
return CLASS_MAPPER[device_type]
|
{
"content_hash": "563c5af4bcd0d340fc92792a9031d08c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 99,
"avg_line_length": 29.836363636363636,
"alnum_prop": 0.704448507007922,
"repo_name": "mileswdavis/netmiko",
"id": "117d0f838a92a989a5cac71c93a0c44b12712652",
"size": "1641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netmiko/ssh_dispatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106626"
},
{
"name": "Shell",
"bytes": "1201"
}
],
"symlink_target": ""
}
|
import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_accuracyIndicators():
hoursPerPeriod = 24
noTypicalPeriods = 8
raw = pd.read_csv(
os.path.join(os.path.dirname(__file__), "..", "examples", "testdata.csv"),
index_col=0,
)
aggregation1 = tsam.TimeSeriesAggregation(
raw,
noTypicalPeriods=noTypicalPeriods,
hoursPerPeriod=hoursPerPeriod,
clusterMethod="hierarchical",
)
aggregation2 = tsam.TimeSeriesAggregation(
raw,
noTypicalPeriods=noTypicalPeriods,
hoursPerPeriod=hoursPerPeriod,
clusterMethod="hierarchical",
sortValues=True,
)
# make sure that the sum of the attribute specific RMSEs is smaller for the normal time series clustering than for
# the duration curve clustering
np.testing.assert_array_less(
aggregation1.accuracyIndicators().loc[:, "RMSE"].sum(),
aggregation2.accuracyIndicators().loc[:, "RMSE"].sum(),
)
# make sure that the sum of the attribute specific duration curve RMSEs is smaller for the duration curve
# clustering than for the normal time series clustering
np.testing.assert_array_less(
aggregation2.accuracyIndicators().loc[:, "RMSE_duration"].sum(),
aggregation1.accuracyIndicators().loc[:, "RMSE_duration"].sum(),
)
# make sure that the same accounts for the total accuracy indicator
np.testing.assert_array_less(
aggregation1.totalAccuracyIndicators()["RMSE"],
aggregation2.totalAccuracyIndicators()["RMSE"],
)
# make sure that the same accounts for the total accuracy indicator
np.testing.assert_array_less(
aggregation2.totalAccuracyIndicators()["RMSE_duration"],
aggregation1.totalAccuracyIndicators()["RMSE_duration"],
)
if __name__ == "__main__":
test_accuracyIndicators()
|
{
"content_hash": "dbc3ca3fb55367ad2f1755296cd98497",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 118,
"avg_line_length": 30.61904761904762,
"alnum_prop": 0.6801451529289787,
"repo_name": "FZJ-IEK3-VSA/tsam",
"id": "4dc4a2a8f26b59cd9e2a3810a2d25de5109565e3",
"size": "1929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_accuracyIndicators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159851"
}
],
"symlink_target": ""
}
|
from runtime import *
'''
evaluation order
'''
# https://github.com/PythonJS/PythonJS/issues/131
def main():
a = False and (False or True)
assert( a==False )
main()
|
{
"content_hash": "008eba1e4a0a4c145b3bbb3136855654",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 49,
"avg_line_length": 16.8,
"alnum_prop": 0.6785714285714286,
"repo_name": "rusthon/Rusthon",
"id": "590b76fe57552943a3d9d9e64129034ab31ed6be",
"size": "168",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "regtests/lang/eval_order.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "23667"
},
{
"name": "HTML",
"bytes": "44433"
},
{
"name": "Perl",
"bytes": "66040"
},
{
"name": "Python",
"bytes": "379517"
},
{
"name": "Shell",
"bytes": "1124"
}
],
"symlink_target": ""
}
|
import re
from oslo_config import cfg
from oslo_log import log
from pecan import hooks
import six
from six.moves import http_client
from ironic.common import context
from ironic.common import policy
from ironic.conductor import rpcapi
from ironic.db import api as dbapi
LOG = log.getLogger(__name__)
CHECKED_DEPRECATED_POLICY_ARGS = False
def policy_deprecation_check():
global CHECKED_DEPRECATED_POLICY_ARGS
if not CHECKED_DEPRECATED_POLICY_ARGS:
enforcer = policy.get_enforcer()
substitution_dict = {
'user': 'user_id',
'domain_id': 'user_domain_id',
'domain_name': 'user_domain_id',
'tenant': 'project_name',
}
policy_rules = enforcer.file_rules.values()
for rule in policy_rules:
str_rule = six.text_type(rule)
for deprecated, replacement in substitution_dict.items():
if re.search(r'\b%s\b' % deprecated, str_rule):
LOG.warning(
"Deprecated argument %(deprecated)s is used in policy "
"file rule (%(rule)s), please use %(replacement)s "
"argument instead. The possibility to use deprecated "
"arguments will be removed in the Pike release.",
{'deprecated': deprecated, 'replacement': replacement,
'rule': str_rule})
if deprecated == 'domain_name':
LOG.warning(
"Please note that user_domain_id is an ID of the "
"user domain, while the deprecated domain_name is "
"its name. The policy rule has to be updated "
"accordingly.")
CHECKED_DEPRECATED_POLICY_ARGS = True
class ConfigHook(hooks.PecanHook):
"""Attach the config object to the request so controllers can get to it."""
def before(self, state):
state.request.cfg = cfg.CONF
class DBHook(hooks.PecanHook):
"""Attach the dbapi object to the request so controllers can get to it."""
def before(self, state):
state.request.dbapi = dbapi.get_instance()
class ContextHook(hooks.PecanHook):
"""Configures a request context and attaches it to the request."""
def __init__(self, public_api_routes):
self.public_api_routes = public_api_routes
super(ContextHook, self).__init__()
def before(self, state):
is_public_api = state.request.environ.get('is_public_api', False)
ctx = context.RequestContext.from_environ(state.request.environ,
is_public_api=is_public_api)
# Do not pass any token with context for noauth mode
if cfg.CONF.auth_strategy == 'noauth':
ctx.auth_token = None
creds = ctx.to_policy_values()
is_admin = policy.check('is_admin', creds, creds)
ctx.is_admin = is_admin
policy_deprecation_check()
state.request.context = ctx
def after(self, state):
if state.request.context == {}:
# An incorrect url path will not create RequestContext
return
# NOTE(lintan): RequestContext will generate a request_id if no one
# passing outside, so it always contain a request_id.
request_id = state.request.context.request_id
state.response.headers['Openstack-Request-Id'] = request_id
class RPCHook(hooks.PecanHook):
"""Attach the rpcapi object to the request so controllers can get to it."""
def before(self, state):
state.request.rpcapi = rpcapi.ConductorAPI()
class NoExceptionTracebackHook(hooks.PecanHook):
"""Workaround rpc.common: deserialize_remote_exception.
deserialize_remote_exception builds rpc exception traceback into error
message which is then sent to the client. Such behavior is a security
concern so this hook is aimed to cut-off traceback from the error message.
"""
# NOTE(max_lobur): 'after' hook used instead of 'on_error' because
# 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator
# catches and handles all the errors, so 'on_error' dedicated for unhandled
# exceptions never fired.
def after(self, state):
# Omit empty body. Some errors may not have body at this level yet.
if not state.response.body:
return
# Do nothing if there is no error.
# Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not
# an error.
if (http_client.OK <= state.response.status_int <
http_client.BAD_REQUEST):
return
json_body = state.response.json
# Do not remove traceback when traceback config is set
if cfg.CONF.debug_tracebacks_in_api:
return
faultstring = json_body.get('faultstring')
traceback_marker = 'Traceback (most recent call last):'
if faultstring and traceback_marker in faultstring:
# Cut-off traceback.
faultstring = faultstring.split(traceback_marker, 1)[0]
# Remove trailing newlines and spaces if any.
json_body['faultstring'] = faultstring.rstrip()
# Replace the whole json. Cannot change original one because it's
# generated on the fly.
state.response.json = json_body
class PublicUrlHook(hooks.PecanHook):
"""Attach the right public_url to the request.
Attach the right public_url to the request so resources can create
links even when the API service is behind a proxy or SSL terminator.
"""
def before(self, state):
state.request.public_url = (cfg.CONF.api.public_endpoint or
state.request.host_url)
|
{
"content_hash": "85fdf65c2f22cf8c57af7edc2c334766",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 79,
"avg_line_length": 38.091503267973856,
"alnum_prop": 0.6177076183939602,
"repo_name": "pshchelo/ironic",
"id": "94e89d98f8f0235c87b20e459b2732257b371b2c",
"size": "6459",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ironic/api/hooks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5675429"
},
{
"name": "Shell",
"bytes": "126426"
}
],
"symlink_target": ""
}
|
"""
PipelineLoader accepting a DataFrame as input.
"""
from functools import partial
from interface import implements
from numpy import (
ix_,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
to_datetime
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import make_adjustment_from_labels
from zipline.utils.numpy_utils import (
as_column,
datetime64ns_dtype,
datetime64D_dtype
)
from .base import PipelineLoader
ADJUSTMENT_COLUMNS = Index([
'sid',
'value',
'kind',
'start_date',
'end_date',
'apply_date',
])
class DataFrameLoader(implements(PipelineLoader)):
"""
A PipelineLoader that reads its input from DataFrames.
Mostly useful for testing, but can also be used for real work if your data
fits in memory.
Parameters
----------
column : zipline.pipeline.data.BoundColumn
The column whose data is loadable by this loader.
baseline : pandas.DataFrame
A DataFrame with index of type DatetimeIndex and columns of type
Int64Index. Dates should be labelled with the first date on which a
value would be **available** to an algorithm. This means that OHLCV
data should generally be shifted back by a trading day before being
supplied to this class.
adjustments : pandas.DataFrame, default=None
A DataFrame with the following columns:
sid : int
value : any
kind : int (zipline.pipeline.loaders.frame.ADJUSTMENT_TYPES)
start_date : datetime64 (can be NaT)
end_date : datetime64 (must be set)
apply_date : datetime64 (must be set)
The default of None is interpreted as "no adjustments to the baseline".
"""
def __init__(self, column, baseline, adjustments=None):
self.column = column
if column.dtype in (datetime64ns_dtype, datetime64D_dtype):
# For datetime columns, a roundabout syntax is required to
# avoid various errors
self.baseline = baseline.apply(to_datetime, utc=True)\
.apply(lambda x: x.dt.tz_localize(None))\
.astype(self.column.dtype).values
else:
self.baseline = baseline.values.astype(self.column.dtype)
self.dates = baseline.index
self.assets = baseline.columns
if adjustments is None:
adjustments = DataFrame(
index=DatetimeIndex([]),
columns=ADJUSTMENT_COLUMNS,
)
else:
# Ensure that columns are in the correct order.
adjustments = adjustments.reindex(ADJUSTMENT_COLUMNS, axis=1)
adjustments.sort_values(['apply_date', 'sid'], inplace=True)
self.adjustments = adjustments
self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date)
self.adjustment_end_dates = DatetimeIndex(adjustments.end_date)
self.adjustment_sids = Int64Index(adjustments.sid)
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
}
def _validate_input_column(self, column):
"""Make sure a passed column is our column.
"""
if column != self.column and column.unspecialize() != self.column:
raise ValueError("Can't load unknown column %s" % column)
|
{
"content_hash": "6e075d659ef7e212bb87288bd82c00cf",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 35.688442211055275,
"alnum_prop": 0.6064488876372852,
"repo_name": "humdings/zipline",
"id": "5a5b08dd64a37729665d140b7fce09460808b5de",
"size": "7102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipline/pipeline/loaders/frame.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "160078"
},
{
"name": "Python",
"bytes": "3371499"
},
{
"name": "Shell",
"bytes": "203"
}
],
"symlink_target": ""
}
|
"""Module implementing the master-side code.
This file only imports all LU's (and other classes) in order to re-export them
to clients of cmdlib.
"""
from ganeti.cmdlib.base import \
LogicalUnit, \
NoHooksLU, \
ResultWithJobs
from ganeti.cmdlib.cluster import \
LUClusterActivateMasterIp, \
LUClusterDeactivateMasterIp, \
LUClusterConfigQuery, \
LUClusterDestroy, \
LUClusterPostInit, \
LUClusterQuery, \
LUClusterRedistConf, \
LUClusterRename, \
LUClusterRepairDiskSizes, \
LUClusterSetParams, \
LUClusterRenewCrypto
from ganeti.cmdlib.cluster.verify import \
LUClusterVerify, \
LUClusterVerifyConfig, \
LUClusterVerifyGroup, \
LUClusterVerifyDisks
from ganeti.cmdlib.group import \
LUGroupAdd, \
LUGroupAssignNodes, \
LUGroupSetParams, \
LUGroupRemove, \
LUGroupRename, \
LUGroupEvacuate, \
LUGroupVerifyDisks
from ganeti.cmdlib.node import \
LUNodeAdd, \
LUNodeSetParams, \
LUNodePowercycle, \
LUNodeEvacuate, \
LUNodeMigrate, \
LUNodeModifyStorage, \
LUNodeQueryvols, \
LUNodeQueryStorage, \
LUNodeRemove, \
LURepairNodeStorage
from ganeti.cmdlib.instance import \
LUInstanceRename, \
LUInstanceRemove, \
LUInstanceMove, \
LUInstanceMultiAlloc, \
LUInstanceChangeGroup
from ganeti.cmdlib.instance_create import \
LUInstanceCreate
from ganeti.cmdlib.instance_storage import \
LUInstanceRecreateDisks, \
LUInstanceGrowDisk, \
LUInstanceReplaceDisks, \
LUInstanceActivateDisks, \
LUInstanceDeactivateDisks
from ganeti.cmdlib.instance_migration import \
LUInstanceFailover, \
LUInstanceMigrate
from ganeti.cmdlib.instance_operation import \
LUInstanceStartup, \
LUInstanceShutdown, \
LUInstanceReinstall, \
LUInstanceReboot, \
LUInstanceConsole
from ganeti.cmdlib.instance_set_params import \
LUInstanceSetParams
from ganeti.cmdlib.instance_query import \
LUInstanceQueryData
from ganeti.cmdlib.backup import \
LUBackupPrepare, \
LUBackupExport, \
LUBackupRemove
from ganeti.cmdlib.query import \
LUQuery, \
LUQueryFields
from ganeti.cmdlib.operating_system import \
LUOsDiagnose
from ganeti.cmdlib.tags import \
LUTagsGet, \
LUTagsSearch, \
LUTagsSet, \
LUTagsDel
from ganeti.cmdlib.network import \
LUNetworkAdd, \
LUNetworkRemove, \
LUNetworkSetParams, \
LUNetworkConnect, \
LUNetworkDisconnect
from ganeti.cmdlib.misc import \
LUOobCommand, \
LUExtStorageDiagnose, \
LURestrictedCommand
from ganeti.cmdlib.test import \
LUTestDelay, \
LUTestJqueue, \
LUTestAllocator
|
{
"content_hash": "995ecc25acb918aa4aac49e38fb48fc7",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 24.533980582524272,
"alnum_prop": 0.7696873763355758,
"repo_name": "dimara/ganeti",
"id": "ee024177bb75f93b597b476eab74f8c929eb4642",
"size": "3911",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/cmdlib/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2409763"
},
{
"name": "Python",
"bytes": "5842471"
},
{
"name": "Shell",
"bytes": "110549"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
import torch
import progressbar
import math
import numpy as np
from torch.multiprocessing import Lock, Manager
from .PER_src import per_operator
def cut_data(seq, sizeSeq):
maxSeq = sizeSeq.max()
seq = seq[:, :maxSeq]
return seq
def beam_search(score_preds, nKeep, blankLabel):
T, P = score_preds.shape
beams = set([''])
pb_t_1 = {"": 1}
pnb_t_1 = {"": 0}
def getLastNumber(b):
return int(b.split(',')[-1])
for t in range(T):
nextBeams = set()
pb_t = {}
pnb_t = {}
for i_beam, b in enumerate(beams):
if b not in pb_t:
pb_t[b] = 0
pnb_t[b] = 0
if len(b) > 0:
pnb_t[b] += pnb_t_1[b] * score_preds[t, getLastNumber(b)]
pb_t[b] = (pnb_t_1[b] + pb_t_1[b]) * score_preds[t, blankLabel]
nextBeams.add(b)
for c in range(P):
if c == blankLabel:
continue
b_ = b + "," + str(c)
if b_ not in pb_t:
pb_t[b_] = 0
pnb_t[b_] = 0
if b != "" and getLastNumber(b) == c:
pnb_t[b_] += pb_t_1[b] * score_preds[t, c]
else:
pnb_t[b_] += (pb_t_1[b] + pnb_t_1[b]) * score_preds[t, c]
nextBeams.add(b_)
allPreds = [(pb_t[b] + pnb_t[b], b) for b in nextBeams]
allPreds.sort(reverse=True)
beams = [x[1] for x in allPreds[:nKeep]]
pb_t_1 = deepcopy(pb_t)
pnb_t_1 = deepcopy(pnb_t)
output = []
for score, x in allPreds[:nKeep]:
output.append((score, [int(y) for y in x.split(',') if len(y) > 0]))
return output
def get_seq_PER(seqLabels, detectedLabels):
return per_operator.needleman_wunsch_align_score(seqLabels, detectedLabels,
-1, -1, 0,
normalize=True)
def prepare_data(data):
seq, sizeSeq, phone, sizePhone = data
seq = seq.cuda(non_blocking=True)
phone = phone.cuda(non_blocking=True)
sizeSeq = sizeSeq.cuda(non_blocking=True).view(-1)
sizePhone = sizePhone.cuda(non_blocking=True).view(-1)
seq = cut_data(seq, sizeSeq)
return seq, sizeSeq, phone, sizePhone
def get_local_per(pool, mutex, p_, gt_seq, BLANK_LABEL):
predSeq = np.array(beam_search(p_, 10, BLANK_LABEL)[0][1], dtype=np.int32)
per = get_seq_PER(gt_seq, predSeq)
mutex.acquire()
pool.append(per)
mutex.release()
def per_step(valLoader,
model,
criterion,
downsamplingFactor):
model.eval()
criterion.eval()
avgPER = 0
varPER = 0
nItems = 0
print("Starting the PER computation through beam search")
bar = progressbar.ProgressBar(maxval=len(valLoader))
bar.start()
for index, data in enumerate(valLoader):
bar.update(index)
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature = model(seq)
sizeSeq = sizeSeq / downsamplingFactor
predictions = torch.nn.functional.softmax(criterion.getPrediction(c_feature),
dim=2).cpu()
c_feature = c_feature
phone = phone.cpu()
sizeSeq = sizeSeq.cpu()
sizePhone = sizePhone.cpu()
mutex = Lock()
manager = Manager()
poolData = manager.list()
processes = []
for b in range(sizeSeq.size(0)):
l_ = min(sizeSeq[b] // 4, predictions.size(1))
s_ = sizePhone[b]
p = torch.multiprocessing.Process(target=get_local_per,
args=(poolData, mutex, predictions[b, :l_].view(l_, -1).numpy(),
phone[b, :s_].view(-1).numpy().astype(np.int32), criterion.BLANK_LABEL))
p.start()
processes.append(p)
for p in processes:
p.join()
avgPER += sum([x for x in poolData])
varPER += sum([x*x for x in poolData])
nItems += len(poolData)
bar.finish()
avgPER /= nItems
varPER /= nItems
varPER -= avgPER**2
print(f"Average PER {avgPER}")
print(f"Standard deviation PER {math.sqrt(varPER)}")
|
{
"content_hash": "a4cfb86b087bb627ba03913980252c51",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 128,
"avg_line_length": 29.366013071895424,
"alnum_prop": 0.5036723792566214,
"repo_name": "facebookresearch/libri-light",
"id": "93eba74eb19daa1e225827b121dfabbb011fb63f",
"size": "4564",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "eval/PER_src/seq_alignment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "3385"
},
{
"name": "Python",
"bytes": "207420"
},
{
"name": "Shell",
"bytes": "396"
}
],
"symlink_target": ""
}
|
try:
import xmlrpclib
pypi = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
except:
import xmlrpc.client
pypi = xmlrpc.client.ServerProxy('https://pypi.python.org/pypi')
import pip
packages_to_upgrade = []
for dist in pip.get_installed_distributions():
try:
available = pypi.package_releases(dist.project_name)
if not available:
# Try to capitalize pkg name
available = pypi.package_releases(dist.project_name.capitalize())
except:
available = 'X'
if not available:
#can't upgrade the package with pip
continue
msg = 'no releases at pypi'
elif available[0] == 'X':
msg = '(pypi not available)'
elif available[0] != dist.version:
if available[0] < dist.version:
#installed version is newer than the available version
continue
msg = '%s available' % (available[0])
else:
#latest version is already installed
continue
msg = 'up to date'
pkg_info = '%s %9s' % (dist.project_name, dist.version)
pkg_msg = '%40s %s' % (pkg_info, msg)
print (pkg_msg)
packages_to_upgrade.append(dist.project_name)
if packages_to_upgrade:
print("\nDo you want to upgrade these packages to the latest version? (Y/n)")
else:
print("\nAll packages are at the latest available version (or higher)")
for package in packages_to_upgrade:
pip.main(['install',package,'--upgrade'])
|
{
"content_hash": "2df67cd82db0a22ce5f460c61659d73c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 30.142857142857142,
"alnum_prop": 0.6296547054840894,
"repo_name": "bcarroll/authmgr",
"id": "33d2791650b8bf7218cc2ce40662516c618b582d",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pip-upgrade.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2105"
},
{
"name": "C",
"bytes": "470753"
},
{
"name": "C++",
"bytes": "139524"
},
{
"name": "CSS",
"bytes": "19326"
},
{
"name": "HTML",
"bytes": "54046"
},
{
"name": "JavaScript",
"bytes": "221397"
},
{
"name": "Mako",
"bytes": "9524"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "13215056"
},
{
"name": "Tcl",
"bytes": "1295070"
}
],
"symlink_target": ""
}
|
from telemetry.page import page_test
class NoOp(page_test.PageTest):
def __init__(self):
super(NoOp, self).__init__()
def ValidateAndMeasurePage(self, page, tab, results):
pass
|
{
"content_hash": "3e025fbb554db266e334e8e799e91521",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 19.3,
"alnum_prop": 0.6787564766839378,
"repo_name": "ds-hwang/chromium-crosswalk",
"id": "09639105e57d79a9f98f5a4909b462efb5bd72f6",
"size": "356",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/perf/measurements/no_op.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from .constant import Constant
__NR_exit = Constant('__NR_exit',1)
__NR_fork = Constant('__NR_fork',2)
__NR_read = Constant('__NR_read',3)
__NR_write = Constant('__NR_write',4)
__NR_open = Constant('__NR_open',5)
__NR_close = Constant('__NR_close',6)
__NR_waitpid = Constant('__NR_waitpid',7)
__NR_creat = Constant('__NR_creat',8)
__NR_link = Constant('__NR_link',9)
__NR_unlink = Constant('__NR_unlink',10)
__NR_execve = Constant('__NR_execve',11)
__NR_chdir = Constant('__NR_chdir',12)
__NR_time = Constant('__NR_time',13)
__NR_mknod = Constant('__NR_mknod',14)
__NR_chmod = Constant('__NR_chmod',15)
__NR_lchown = Constant('__NR_lchown',16)
__NR_break = Constant('__NR_break',17)
__NR_oldstat = Constant('__NR_oldstat',18)
__NR_lseek = Constant('__NR_lseek',19)
__NR_getpid = Constant('__NR_getpid',20)
__NR_mount = Constant('__NR_mount',21)
__NR_umount = Constant('__NR_umount',22)
__NR_setuid = Constant('__NR_setuid',23)
__NR_getuid = Constant('__NR_getuid',24)
__NR_stime = Constant('__NR_stime',25)
__NR_ptrace = Constant('__NR_ptrace',26)
__NR_alarm = Constant('__NR_alarm',27)
__NR_oldfstat = Constant('__NR_oldfstat',28)
__NR_pause = Constant('__NR_pause',29)
__NR_utime = Constant('__NR_utime',30)
__NR_stty = Constant('__NR_stty',31)
__NR_gtty = Constant('__NR_gtty',32)
__NR_access = Constant('__NR_access',33)
__NR_nice = Constant('__NR_nice',34)
__NR_ftime = Constant('__NR_ftime',35)
__NR_sync = Constant('__NR_sync',36)
__NR_kill = Constant('__NR_kill',37)
__NR_rename = Constant('__NR_rename',38)
__NR_mkdir = Constant('__NR_mkdir',39)
__NR_rmdir = Constant('__NR_rmdir',40)
__NR_dup = Constant('__NR_dup',41)
__NR_pipe = Constant('__NR_pipe',42)
__NR_times = Constant('__NR_times',43)
__NR_prof = Constant('__NR_prof',44)
__NR_brk = Constant('__NR_brk',45)
__NR_setgid = Constant('__NR_setgid',46)
__NR_getgid = Constant('__NR_getgid',47)
__NR_signal = Constant('__NR_signal',48)
__NR_geteuid = Constant('__NR_geteuid',49)
__NR_getegid = Constant('__NR_getegid',50)
__NR_acct = Constant('__NR_acct',51)
__NR_umount2 = Constant('__NR_umount2',52)
__NR_lock = Constant('__NR_lock',53)
__NR_ioctl = Constant('__NR_ioctl',54)
__NR_fcntl = Constant('__NR_fcntl',55)
__NR_mpx = Constant('__NR_mpx',56)
__NR_setpgid = Constant('__NR_setpgid',57)
__NR_ulimit = Constant('__NR_ulimit',58)
__NR_oldolduname = Constant('__NR_oldolduname',59)
__NR_umask = Constant('__NR_umask',60)
__NR_chroot = Constant('__NR_chroot',61)
__NR_ustat = Constant('__NR_ustat',62)
__NR_dup2 = Constant('__NR_dup2',63)
__NR_getppid = Constant('__NR_getppid',64)
__NR_getpgrp = Constant('__NR_getpgrp',65)
__NR_setsid = Constant('__NR_setsid',66)
__NR_sigaction = Constant('__NR_sigaction',67)
__NR_sgetmask = Constant('__NR_sgetmask',68)
__NR_ssetmask = Constant('__NR_ssetmask',69)
__NR_setreuid = Constant('__NR_setreuid',70)
__NR_setregid = Constant('__NR_setregid',71)
__NR_sigsuspend = Constant('__NR_sigsuspend',72)
__NR_sigpending = Constant('__NR_sigpending',73)
__NR_sethostname = Constant('__NR_sethostname',74)
__NR_setrlimit = Constant('__NR_setrlimit',75)
__NR_getrlimit = Constant('__NR_getrlimit',76)
__NR_getrusage = Constant('__NR_getrusage',77)
__NR_gettimeofday = Constant('__NR_gettimeofday',78)
__NR_settimeofday = Constant('__NR_settimeofday',79)
__NR_getgroups = Constant('__NR_getgroups',80)
__NR_setgroups = Constant('__NR_setgroups',81)
__NR_select = Constant('__NR_select',82)
__NR_symlink = Constant('__NR_symlink',83)
__NR_oldlstat = Constant('__NR_oldlstat',84)
__NR_readlink = Constant('__NR_readlink',85)
__NR_uselib = Constant('__NR_uselib',86)
__NR_swapon = Constant('__NR_swapon',87)
__NR_reboot = Constant('__NR_reboot',88)
__NR_readdir = Constant('__NR_readdir',89)
__NR_mmap = Constant('__NR_mmap',90)
__NR_munmap = Constant('__NR_munmap',91)
__NR_truncate = Constant('__NR_truncate',92)
__NR_ftruncate = Constant('__NR_ftruncate',93)
__NR_fchmod = Constant('__NR_fchmod',94)
__NR_fchown = Constant('__NR_fchown',95)
__NR_getpriority = Constant('__NR_getpriority',96)
__NR_setpriority = Constant('__NR_setpriority',97)
__NR_profil = Constant('__NR_profil',98)
__NR_statfs = Constant('__NR_statfs',99)
__NR_fstatfs = Constant('__NR_fstatfs',100)
__NR_ioperm = Constant('__NR_ioperm',101)
__NR_socketcall = Constant('__NR_socketcall',102)
__NR_syslog = Constant('__NR_syslog',103)
__NR_setitimer = Constant('__NR_setitimer',104)
__NR_getitimer = Constant('__NR_getitimer',105)
__NR_stat = Constant('__NR_stat',106)
__NR_lstat = Constant('__NR_lstat',107)
__NR_fstat = Constant('__NR_fstat',108)
__NR_olduname = Constant('__NR_olduname',109)
__NR_iopl = Constant('__NR_iopl',110)
__NR_vhangup = Constant('__NR_vhangup',111)
__NR_idle = Constant('__NR_idle',112)
__NR_vm86 = Constant('__NR_vm86',113)
__NR_wait4 = Constant('__NR_wait4',114)
__NR_swapoff = Constant('__NR_swapoff',115)
__NR_sysinfo = Constant('__NR_sysinfo',116)
__NR_ipc = Constant('__NR_ipc',117)
__NR_fsync = Constant('__NR_fsync',118)
__NR_sigreturn = Constant('__NR_sigreturn',119)
__NR_clone = Constant('__NR_clone',120)
__NR_setdomainname = Constant('__NR_setdomainname',121)
__NR_uname = Constant('__NR_uname',122)
__NR_modify_ldt = Constant('__NR_modify_ldt',123)
__NR_adjtimex = Constant('__NR_adjtimex',124)
__NR_mprotect = Constant('__NR_mprotect',125)
__NR_sigprocmask = Constant('__NR_sigprocmask',126)
__NR_create_module = Constant('__NR_create_module',127)
__NR_init_module = Constant('__NR_init_module',128)
__NR_delete_module = Constant('__NR_delete_module',129)
__NR_get_kernel_syms = Constant('__NR_get_kernel_syms',130)
__NR_quotactl = Constant('__NR_quotactl',131)
__NR_getpgid = Constant('__NR_getpgid',132)
__NR_fchdir = Constant('__NR_fchdir',133)
__NR_bdflush = Constant('__NR_bdflush',134)
__NR_sysfs = Constant('__NR_sysfs',135)
__NR_personality = Constant('__NR_personality',136)
__NR_afs_syscall = Constant('__NR_afs_syscall',137)
__NR_setfsuid = Constant('__NR_setfsuid',138)
__NR_setfsgid = Constant('__NR_setfsgid',139)
__NR__llseek = Constant('__NR__llseek',140)
__NR_getdents = Constant('__NR_getdents',141)
__NR__newselect = Constant('__NR__newselect',142)
__NR_flock = Constant('__NR_flock',143)
__NR_msync = Constant('__NR_msync',144)
__NR_readv = Constant('__NR_readv',145)
__NR_writev = Constant('__NR_writev',146)
__NR_getsid = Constant('__NR_getsid',147)
__NR_fdatasync = Constant('__NR_fdatasync',148)
__NR__sysctl = Constant('__NR__sysctl',149)
__NR_mlock = Constant('__NR_mlock',150)
__NR_munlock = Constant('__NR_munlock',151)
__NR_mlockall = Constant('__NR_mlockall',152)
__NR_munlockall = Constant('__NR_munlockall',153)
__NR_sched_setparam = Constant('__NR_sched_setparam',154)
__NR_sched_getparam = Constant('__NR_sched_getparam',155)
__NR_sched_setscheduler = Constant('__NR_sched_setscheduler',156)
__NR_sched_getscheduler = Constant('__NR_sched_getscheduler',157)
__NR_sched_yield = Constant('__NR_sched_yield',158)
__NR_sched_get_priority_max = Constant('__NR_sched_get_priority_max',159)
__NR_sched_get_priority_min = Constant('__NR_sched_get_priority_min',160)
__NR_sched_rr_get_interval = Constant('__NR_sched_rr_get_interval',161)
__NR_nanosleep = Constant('__NR_nanosleep',162)
__NR_mremap = Constant('__NR_mremap',163)
__NR_setresuid = Constant('__NR_setresuid',164)
__NR_getresuid = Constant('__NR_getresuid',165)
__NR_query_module = Constant('__NR_query_module',166)
__NR_poll = Constant('__NR_poll',167)
__NR_nfsservctl = Constant('__NR_nfsservctl',168)
__NR_setresgid = Constant('__NR_setresgid',169)
__NR_getresgid = Constant('__NR_getresgid',170)
__NR_prctl = Constant('__NR_prctl',171)
__NR_rt_sigreturn = Constant('__NR_rt_sigreturn',172)
__NR_rt_sigaction = Constant('__NR_rt_sigaction',173)
__NR_rt_sigprocmask = Constant('__NR_rt_sigprocmask',174)
__NR_rt_sigpending = Constant('__NR_rt_sigpending',175)
__NR_rt_sigtimedwait = Constant('__NR_rt_sigtimedwait',176)
__NR_rt_sigqueueinfo = Constant('__NR_rt_sigqueueinfo',177)
__NR_rt_sigsuspend = Constant('__NR_rt_sigsuspend',178)
__NR_pread = Constant('__NR_pread',179)
__NR_pwrite = Constant('__NR_pwrite',180)
__NR_chown = Constant('__NR_chown',181)
__NR_getcwd = Constant('__NR_getcwd',182)
__NR_capget = Constant('__NR_capget',183)
__NR_capset = Constant('__NR_capset',184)
__NR_sigaltstack = Constant('__NR_sigaltstack',185)
__NR_sendfile = Constant('__NR_sendfile',186)
__NR_getpmsg = Constant('__NR_getpmsg',187)
__NR_putpmsg = Constant('__NR_putpmsg',188)
__NR_vfork = Constant('__NR_vfork',189)
__NR_ugetrlimit = Constant('__NR_ugetrlimit',190)
__NR_readahead = Constant('__NR_readahead',191)
__NR_pciconfig_read = Constant('__NR_pciconfig_read',198)
__NR_pciconfig_write = Constant('__NR_pciconfig_write',199)
__NR_pciconfig_iobase = Constant('__NR_pciconfig_iobase',200)
__NR_multiplexer = Constant('__NR_multiplexer',201)
__NR_getdents64 = Constant('__NR_getdents64',202)
__NR_pivot_root = Constant('__NR_pivot_root',203)
__NR_madvise = Constant('__NR_madvise',205)
__NR_mincore = Constant('__NR_mincore',206)
__NR_gettid = Constant('__NR_gettid',207)
__NR_tkill = Constant('__NR_tkill',208)
__NR_setxattr = Constant('__NR_setxattr',209)
__NR_lsetxattr = Constant('__NR_lsetxattr',210)
__NR_fsetxattr = Constant('__NR_fsetxattr',211)
__NR_getxattr = Constant('__NR_getxattr',212)
__NR_lgetxattr = Constant('__NR_lgetxattr',213)
__NR_fgetxattr = Constant('__NR_fgetxattr',214)
__NR_listxattr = Constant('__NR_listxattr',215)
__NR_llistxattr = Constant('__NR_llistxattr',216)
__NR_flistxattr = Constant('__NR_flistxattr',217)
__NR_removexattr = Constant('__NR_removexattr',218)
__NR_lremovexattr = Constant('__NR_lremovexattr',219)
__NR_fremovexattr = Constant('__NR_fremovexattr',220)
__NR_futex = Constant('__NR_futex',221)
__NR_sched_setaffinity = Constant('__NR_sched_setaffinity',222)
__NR_sched_getaffinity = Constant('__NR_sched_getaffinity',223)
__NR_tuxcall = Constant('__NR_tuxcall',225)
__NR_io_setup = Constant('__NR_io_setup',227)
__NR_io_destroy = Constant('__NR_io_destroy',228)
__NR_io_getevents = Constant('__NR_io_getevents',229)
__NR_io_submit = Constant('__NR_io_submit',230)
__NR_io_cancel = Constant('__NR_io_cancel',231)
__NR_set_tid_address = Constant('__NR_set_tid_address',232)
__NR_fadvise64 = Constant('__NR_fadvise64',233)
__NR_exit_group = Constant('__NR_exit_group',234)
__NR_lookup_dcookie = Constant('__NR_lookup_dcookie',235)
__NR_epoll_create = Constant('__NR_epoll_create',236)
__NR_epoll_ctl = Constant('__NR_epoll_ctl',237)
__NR_epoll_wait = Constant('__NR_epoll_wait',238)
__NR_remap_file_pages = Constant('__NR_remap_file_pages',239)
__NR_timer_create = Constant('__NR_timer_create',240)
__NR_timer_settime = Constant('__NR_timer_settime',241)
__NR_timer_gettime = Constant('__NR_timer_gettime',242)
__NR_timer_getoverrun = Constant('__NR_timer_getoverrun',243)
__NR_timer_delete = Constant('__NR_timer_delete',244)
__NR_clock_settime = Constant('__NR_clock_settime',245)
__NR_clock_gettime = Constant('__NR_clock_gettime',246)
__NR_clock_getres = Constant('__NR_clock_getres',247)
__NR_clock_nanosleep = Constant('__NR_clock_nanosleep',248)
__NR_swapcontext = Constant('__NR_swapcontext',249)
__NR_tgkill = Constant('__NR_tgkill',250)
__NR_utimes = Constant('__NR_utimes',251)
__NR_statfs64 = Constant('__NR_statfs64',252)
__NR_fstatfs64 = Constant('__NR_fstatfs64',253)
__NR_rtas = Constant('__NR_rtas',255)
__NR_mbind = Constant('__NR_mbind',259)
__NR_get_mempolicy = Constant('__NR_get_mempolicy',260)
__NR_set_mempolicy = Constant('__NR_set_mempolicy',261)
__NR_mq_open = Constant('__NR_mq_open',262)
__NR_mq_unlink = Constant('__NR_mq_unlink',263)
__NR_mq_timedsend = Constant('__NR_mq_timedsend',264)
__NR_mq_timedreceive = Constant('__NR_mq_timedreceive',265)
__NR_mq_notify = Constant('__NR_mq_notify',266)
__NR_mq_getsetattr = Constant('__NR_mq_getsetattr',267)
__NR_kexec_load = Constant('__NR_kexec_load',268)
__NR_add_key = Constant('__NR_add_key',269)
__NR_request_key = Constant('__NR_request_key',270)
__NR_keyctl = Constant('__NR_keyctl',271)
__NR_waitid = Constant('__NR_waitid',272)
__NR_ioprio_set = Constant('__NR_ioprio_set',273)
__NR_ioprio_get = Constant('__NR_ioprio_get',274)
__NR_inotify_init = Constant('__NR_inotify_init',275)
__NR_inotify_add_watch = Constant('__NR_inotify_add_watch',276)
__NR_inotify_rm_watch = Constant('__NR_inotify_rm_watch',277)
__NR_spu_run = Constant('__NR_spu_run',278)
__NR_spu_create = Constant('__NR_spu_create',279)
__NR_pselect6 = Constant('__NR_pselect6',280)
__NR_ppoll = Constant('__NR_ppoll',281)
__NR_unshare = Constant('__NR_unshare',282)
__NR_splice = Constant('__NR_splice',283)
__NR_tee = Constant('__NR_tee',284)
__NR_vmsplice = Constant('__NR_vmsplice',285)
__NR_openat = Constant('__NR_openat',286)
__NR_mkdirat = Constant('__NR_mkdirat',287)
__NR_mknodat = Constant('__NR_mknodat',288)
__NR_fchownat = Constant('__NR_fchownat',289)
__NR_futimesat = Constant('__NR_futimesat',290)
__NR_newfstatat = Constant('__NR_newfstatat',291)
__NR_unlinkat = Constant('__NR_unlinkat',292)
__NR_renameat = Constant('__NR_renameat',293)
__NR_linkat = Constant('__NR_linkat',294)
__NR_symlinkat = Constant('__NR_symlinkat',295)
__NR_readlinkat = Constant('__NR_readlinkat',296)
__NR_fchmodat = Constant('__NR_fchmodat',297)
__NR_faccessat = Constant('__NR_faccessat',298)
__NR_get_robust_list = Constant('__NR_get_robust_list',299)
__NR_set_robust_list = Constant('__NR_set_robust_list',300)
__NR_move_pages = Constant('__NR_move_pages',301)
__NR_getcpu = Constant('__NR_getcpu',302)
__NR_epoll_pwait = Constant('__NR_epoll_pwait',303)
__NR_utimensat = Constant('__NR_utimensat',304)
__NR_signalfd = Constant('__NR_signalfd',305)
__NR_timerfd = Constant('__NR_timerfd',306)
__NR_eventfd = Constant('__NR_eventfd',307)
__NR_sync_file_range2 = Constant('__NR_sync_file_range2',308)
__NR_fallocate = Constant('__NR_fallocate',309)
__NR_subpage_prot = Constant('__NR_subpage_prot',310)
__NR_timerfd_settime = Constant('__NR_timerfd_settime',311)
__NR_timerfd_gettime = Constant('__NR_timerfd_gettime',312)
__SYS_NERR = Constant('__SYS_NERR',((129) + 1))
_SYS_TIME_H = Constant('_SYS_TIME_H',1)
SYS_access = Constant('SYS_access',33)
SYS_acct = Constant('SYS_acct',51)
SYS_add_key = Constant('SYS_add_key',269)
SYS_adjtimex = Constant('SYS_adjtimex',124)
SYS_afs_syscall = Constant('SYS_afs_syscall',137)
SYS_alarm = Constant('SYS_alarm',27)
SYS_bdflush = Constant('SYS_bdflush',134)
SYS_break = Constant('SYS_break',17)
SYS_brk = Constant('SYS_brk',45)
SYS_capget = Constant('SYS_capget',183)
SYS_capset = Constant('SYS_capset',184)
SYS_chdir = Constant('SYS_chdir',12)
SYS_chmod = Constant('SYS_chmod',15)
SYS_chown = Constant('SYS_chown',181)
SYS_chroot = Constant('SYS_chroot',61)
SYS_clock_getres = Constant('SYS_clock_getres',247)
SYS_clock_gettime = Constant('SYS_clock_gettime',246)
SYS_clock_nanosleep = Constant('SYS_clock_nanosleep',248)
SYS_clock_settime = Constant('SYS_clock_settime',245)
SYS_clone = Constant('SYS_clone',120)
SYS_close = Constant('SYS_close',6)
SYS_creat = Constant('SYS_creat',8)
SYS_create_module = Constant('SYS_create_module',127)
SYS_delete_module = Constant('SYS_delete_module',129)
SYS_dup = Constant('SYS_dup',41)
SYS_dup2 = Constant('SYS_dup2',63)
SYS_epoll_create = Constant('SYS_epoll_create',236)
SYS_epoll_ctl = Constant('SYS_epoll_ctl',237)
SYS_epoll_pwait = Constant('SYS_epoll_pwait',303)
SYS_epoll_wait = Constant('SYS_epoll_wait',238)
SYS_eventfd = Constant('SYS_eventfd',307)
SYS_execve = Constant('SYS_execve',11)
SYS_exit = Constant('SYS_exit',1)
SYS_exit_group = Constant('SYS_exit_group',234)
SYS_faccessat = Constant('SYS_faccessat',298)
SYS_fadvise64 = Constant('SYS_fadvise64',233)
SYS_fallocate = Constant('SYS_fallocate',309)
SYS_fchdir = Constant('SYS_fchdir',133)
SYS_fchmod = Constant('SYS_fchmod',94)
SYS_fchmodat = Constant('SYS_fchmodat',297)
SYS_fchown = Constant('SYS_fchown',95)
SYS_fchownat = Constant('SYS_fchownat',289)
SYS_fcntl = Constant('SYS_fcntl',55)
SYS_fdatasync = Constant('SYS_fdatasync',148)
SYS_fgetxattr = Constant('SYS_fgetxattr',214)
SYS_flistxattr = Constant('SYS_flistxattr',217)
SYS_flock = Constant('SYS_flock',143)
SYS_fork = Constant('SYS_fork',2)
SYS_fremovexattr = Constant('SYS_fremovexattr',220)
SYS_fsetxattr = Constant('SYS_fsetxattr',211)
SYS_fstat = Constant('SYS_fstat',108)
SYS_fstatfs = Constant('SYS_fstatfs',100)
SYS_fstatfs64 = Constant('SYS_fstatfs64',253)
SYS_fsync = Constant('SYS_fsync',118)
SYS_ftime = Constant('SYS_ftime',35)
SYS_ftruncate = Constant('SYS_ftruncate',93)
SYS_futex = Constant('SYS_futex',221)
SYS_futimesat = Constant('SYS_futimesat',290)
SYS_getcpu = Constant('SYS_getcpu',302)
SYS_getcwd = Constant('SYS_getcwd',182)
SYS_getdents = Constant('SYS_getdents',141)
SYS_getdents64 = Constant('SYS_getdents64',202)
SYS_getegid = Constant('SYS_getegid',50)
SYS_geteuid = Constant('SYS_geteuid',49)
SYS_getgid = Constant('SYS_getgid',47)
SYS_getgroups = Constant('SYS_getgroups',80)
SYS_getitimer = Constant('SYS_getitimer',105)
SYS_get_kernel_syms = Constant('SYS_get_kernel_syms',130)
SYS_get_mempolicy = Constant('SYS_get_mempolicy',260)
SYS_getpgid = Constant('SYS_getpgid',132)
SYS_getpgrp = Constant('SYS_getpgrp',65)
SYS_getpid = Constant('SYS_getpid',20)
SYS_getpmsg = Constant('SYS_getpmsg',187)
SYS_getppid = Constant('SYS_getppid',64)
SYS_getpriority = Constant('SYS_getpriority',96)
SYS_getresgid = Constant('SYS_getresgid',170)
SYS_getresuid = Constant('SYS_getresuid',165)
SYS_getrlimit = Constant('SYS_getrlimit',76)
SYS_get_robust_list = Constant('SYS_get_robust_list',299)
SYS_getrusage = Constant('SYS_getrusage',77)
SYS_getsid = Constant('SYS_getsid',147)
SYS_gettid = Constant('SYS_gettid',207)
SYS_gettimeofday = Constant('SYS_gettimeofday',78)
SYS_getuid = Constant('SYS_getuid',24)
SYS_getxattr = Constant('SYS_getxattr',212)
SYS_gtty = Constant('SYS_gtty',32)
SYS_idle = Constant('SYS_idle',112)
SYS_init_module = Constant('SYS_init_module',128)
SYS_inotify_add_watch = Constant('SYS_inotify_add_watch',276)
SYS_inotify_init = Constant('SYS_inotify_init',275)
SYS_inotify_rm_watch = Constant('SYS_inotify_rm_watch',277)
SYS_io_cancel = Constant('SYS_io_cancel',231)
SYS_ioctl = Constant('SYS_ioctl',54)
SYS_io_destroy = Constant('SYS_io_destroy',228)
SYS_io_getevents = Constant('SYS_io_getevents',229)
SYS_ioperm = Constant('SYS_ioperm',101)
SYS_iopl = Constant('SYS_iopl',110)
SYS_ioprio_get = Constant('SYS_ioprio_get',274)
SYS_ioprio_set = Constant('SYS_ioprio_set',273)
SYS_io_setup = Constant('SYS_io_setup',227)
SYS_io_submit = Constant('SYS_io_submit',230)
SYS_ipc = Constant('SYS_ipc',117)
SYS_kexec_load = Constant('SYS_kexec_load',268)
SYS_keyctl = Constant('SYS_keyctl',271)
SYS_kill = Constant('SYS_kill',37)
SYS_lchown = Constant('SYS_lchown',16)
SYS_lgetxattr = Constant('SYS_lgetxattr',213)
SYS_link = Constant('SYS_link',9)
SYS_linkat = Constant('SYS_linkat',294)
SYS_listxattr = Constant('SYS_listxattr',215)
SYS_llistxattr = Constant('SYS_llistxattr',216)
SYS__llseek = Constant('SYS__llseek',140)
SYS_lock = Constant('SYS_lock',53)
SYS_lookup_dcookie = Constant('SYS_lookup_dcookie',235)
SYS_lremovexattr = Constant('SYS_lremovexattr',219)
SYS_lseek = Constant('SYS_lseek',19)
SYS_lsetxattr = Constant('SYS_lsetxattr',210)
SYS_lstat = Constant('SYS_lstat',107)
SYS_madvise = Constant('SYS_madvise',205)
SYS_mbind = Constant('SYS_mbind',259)
SYS_mincore = Constant('SYS_mincore',206)
SYS_mkdir = Constant('SYS_mkdir',39)
SYS_mkdirat = Constant('SYS_mkdirat',287)
SYS_mknod = Constant('SYS_mknod',14)
SYS_mknodat = Constant('SYS_mknodat',288)
SYS_mlock = Constant('SYS_mlock',150)
SYS_mlockall = Constant('SYS_mlockall',152)
SYS_mmap = Constant('SYS_mmap',90)
SYS_modify_ldt = Constant('SYS_modify_ldt',123)
SYS_mount = Constant('SYS_mount',21)
SYS_move_pages = Constant('SYS_move_pages',301)
SYS_mprotect = Constant('SYS_mprotect',125)
SYS_mpx = Constant('SYS_mpx',56)
SYS_mq_getsetattr = Constant('SYS_mq_getsetattr',267)
SYS_mq_notify = Constant('SYS_mq_notify',266)
SYS_mq_open = Constant('SYS_mq_open',262)
SYS_mq_timedreceive = Constant('SYS_mq_timedreceive',265)
SYS_mq_timedsend = Constant('SYS_mq_timedsend',264)
SYS_mq_unlink = Constant('SYS_mq_unlink',263)
SYS_mremap = Constant('SYS_mremap',163)
SYS_msync = Constant('SYS_msync',144)
SYS_multiplexer = Constant('SYS_multiplexer',201)
SYS_munlock = Constant('SYS_munlock',151)
SYS_munlockall = Constant('SYS_munlockall',153)
SYS_munmap = Constant('SYS_munmap',91)
SYS_nanosleep = Constant('SYS_nanosleep',162)
SYS_newfstatat = Constant('SYS_newfstatat',291)
SYS__newselect = Constant('SYS__newselect',142)
SYS_nfsservctl = Constant('SYS_nfsservctl',168)
SYS_nice = Constant('SYS_nice',34)
SYS_oldfstat = Constant('SYS_oldfstat',28)
SYS_oldlstat = Constant('SYS_oldlstat',84)
SYS_oldolduname = Constant('SYS_oldolduname',59)
SYS_oldstat = Constant('SYS_oldstat',18)
SYS_olduname = Constant('SYS_olduname',109)
SYS_open = Constant('SYS_open',5)
SYS_openat = Constant('SYS_openat',286)
SYS_pause = Constant('SYS_pause',29)
SYS_pciconfig_iobase = Constant('SYS_pciconfig_iobase',200)
SYS_pciconfig_read = Constant('SYS_pciconfig_read',198)
SYS_pciconfig_write = Constant('SYS_pciconfig_write',199)
SYS_personality = Constant('SYS_personality',136)
SYS_pipe = Constant('SYS_pipe',42)
SYS_pivot_root = Constant('SYS_pivot_root',203)
SYS_poll = Constant('SYS_poll',167)
SYS_ppoll = Constant('SYS_ppoll',281)
SYS_prctl = Constant('SYS_prctl',171)
SYS_pread = Constant('SYS_pread',179)
SYS_prof = Constant('SYS_prof',44)
SYS_profil = Constant('SYS_profil',98)
SYS_pselect6 = Constant('SYS_pselect6',280)
SYS_ptrace = Constant('SYS_ptrace',26)
SYS_putpmsg = Constant('SYS_putpmsg',188)
SYS_pwrite = Constant('SYS_pwrite',180)
SYS_query_module = Constant('SYS_query_module',166)
SYS_quotactl = Constant('SYS_quotactl',131)
SYS_read = Constant('SYS_read',3)
SYS_readahead = Constant('SYS_readahead',191)
SYS_readdir = Constant('SYS_readdir',89)
SYS_readlink = Constant('SYS_readlink',85)
SYS_readlinkat = Constant('SYS_readlinkat',296)
SYS_readv = Constant('SYS_readv',145)
SYS_reboot = Constant('SYS_reboot',88)
SYS_remap_file_pages = Constant('SYS_remap_file_pages',239)
SYS_removexattr = Constant('SYS_removexattr',218)
SYS_rename = Constant('SYS_rename',38)
SYS_renameat = Constant('SYS_renameat',293)
SYS_request_key = Constant('SYS_request_key',270)
SYS_rmdir = Constant('SYS_rmdir',40)
SYS_rtas = Constant('SYS_rtas',255)
SYS_rt_sigaction = Constant('SYS_rt_sigaction',173)
SYS_rt_sigpending = Constant('SYS_rt_sigpending',175)
SYS_rt_sigprocmask = Constant('SYS_rt_sigprocmask',174)
SYS_rt_sigqueueinfo = Constant('SYS_rt_sigqueueinfo',177)
SYS_rt_sigreturn = Constant('SYS_rt_sigreturn',172)
SYS_rt_sigsuspend = Constant('SYS_rt_sigsuspend',178)
SYS_rt_sigtimedwait = Constant('SYS_rt_sigtimedwait',176)
SYS_sched_getaffinity = Constant('SYS_sched_getaffinity',223)
SYS_sched_getparam = Constant('SYS_sched_getparam',155)
SYS_sched_get_priority_max = Constant('SYS_sched_get_priority_max',159)
SYS_sched_get_priority_min = Constant('SYS_sched_get_priority_min',160)
SYS_sched_getscheduler = Constant('SYS_sched_getscheduler',157)
SYS_sched_rr_get_interval = Constant('SYS_sched_rr_get_interval',161)
SYS_sched_setaffinity = Constant('SYS_sched_setaffinity',222)
SYS_sched_setparam = Constant('SYS_sched_setparam',154)
SYS_sched_setscheduler = Constant('SYS_sched_setscheduler',156)
SYS_sched_yield = Constant('SYS_sched_yield',158)
SYS_select = Constant('SYS_select',82)
SYS_sendfile = Constant('SYS_sendfile',186)
SYS_setdomainname = Constant('SYS_setdomainname',121)
SYS_setfsgid = Constant('SYS_setfsgid',139)
SYS_setfsuid = Constant('SYS_setfsuid',138)
SYS_setgid = Constant('SYS_setgid',46)
SYS_setgroups = Constant('SYS_setgroups',81)
SYS_sethostname = Constant('SYS_sethostname',74)
SYS_setitimer = Constant('SYS_setitimer',104)
SYS_set_mempolicy = Constant('SYS_set_mempolicy',261)
SYS_setpgid = Constant('SYS_setpgid',57)
SYS_setpriority = Constant('SYS_setpriority',97)
SYS_setregid = Constant('SYS_setregid',71)
SYS_setresgid = Constant('SYS_setresgid',169)
SYS_setresuid = Constant('SYS_setresuid',164)
SYS_setreuid = Constant('SYS_setreuid',70)
SYS_setrlimit = Constant('SYS_setrlimit',75)
SYS_set_robust_list = Constant('SYS_set_robust_list',300)
SYS_setsid = Constant('SYS_setsid',66)
SYS_set_tid_address = Constant('SYS_set_tid_address',232)
SYS_settimeofday = Constant('SYS_settimeofday',79)
SYS_setuid = Constant('SYS_setuid',23)
SYS_setxattr = Constant('SYS_setxattr',209)
SYS_sgetmask = Constant('SYS_sgetmask',68)
SYS_sigaction = Constant('SYS_sigaction',67)
SYS_sigaltstack = Constant('SYS_sigaltstack',185)
SYS_signal = Constant('SYS_signal',48)
SYS_signalfd = Constant('SYS_signalfd',305)
SYS_sigpending = Constant('SYS_sigpending',73)
SYS_sigprocmask = Constant('SYS_sigprocmask',126)
SYS_sigreturn = Constant('SYS_sigreturn',119)
SYS_sigsuspend = Constant('SYS_sigsuspend',72)
SYS_socketcall = Constant('SYS_socketcall',102)
SYS_splice = Constant('SYS_splice',283)
SYS_spu_create = Constant('SYS_spu_create',279)
SYS_spu_run = Constant('SYS_spu_run',278)
SYS_ssetmask = Constant('SYS_ssetmask',69)
SYS_stat = Constant('SYS_stat',106)
SYS_statfs = Constant('SYS_statfs',99)
SYS_statfs64 = Constant('SYS_statfs64',252)
SYS_stime = Constant('SYS_stime',25)
SYS_stty = Constant('SYS_stty',31)
SYS_subpage_prot = Constant('SYS_subpage_prot',310)
SYS_swapcontext = Constant('SYS_swapcontext',249)
SYS_swapoff = Constant('SYS_swapoff',115)
SYS_swapon = Constant('SYS_swapon',87)
SYS_symlink = Constant('SYS_symlink',83)
SYS_symlinkat = Constant('SYS_symlinkat',295)
SYS_sync = Constant('SYS_sync',36)
SYS_sync_file_range2 = Constant('SYS_sync_file_range2',308)
SYS__sysctl = Constant('SYS__sysctl',149)
SYS_sysfs = Constant('SYS_sysfs',135)
SYS_sysinfo = Constant('SYS_sysinfo',116)
SYS_syslog = Constant('SYS_syslog',103)
SYS_tee = Constant('SYS_tee',284)
SYS_tgkill = Constant('SYS_tgkill',250)
SYS_time = Constant('SYS_time',13)
SYS_timer_create = Constant('SYS_timer_create',240)
SYS_timer_delete = Constant('SYS_timer_delete',244)
SYS_timerfd = Constant('SYS_timerfd',306)
SYS_timerfd_gettime = Constant('SYS_timerfd_gettime',312)
SYS_timerfd_settime = Constant('SYS_timerfd_settime',311)
SYS_timer_getoverrun = Constant('SYS_timer_getoverrun',243)
SYS_timer_gettime = Constant('SYS_timer_gettime',242)
SYS_timer_settime = Constant('SYS_timer_settime',241)
SYS_times = Constant('SYS_times',43)
SYS_tkill = Constant('SYS_tkill',208)
SYS_truncate = Constant('SYS_truncate',92)
SYS_tuxcall = Constant('SYS_tuxcall',225)
SYS_ugetrlimit = Constant('SYS_ugetrlimit',190)
SYS_ulimit = Constant('SYS_ulimit',58)
SYS_umask = Constant('SYS_umask',60)
SYS_umount = Constant('SYS_umount',22)
SYS_umount2 = Constant('SYS_umount2',52)
SYS_uname = Constant('SYS_uname',122)
SYS_unlink = Constant('SYS_unlink',10)
SYS_unlinkat = Constant('SYS_unlinkat',292)
SYS_unshare = Constant('SYS_unshare',282)
SYS_uselib = Constant('SYS_uselib',86)
SYS_ustat = Constant('SYS_ustat',62)
SYS_utime = Constant('SYS_utime',30)
SYS_utimensat = Constant('SYS_utimensat',304)
SYS_utimes = Constant('SYS_utimes',251)
SYS_vfork = Constant('SYS_vfork',189)
SYS_vhangup = Constant('SYS_vhangup',111)
SYS_vm86 = Constant('SYS_vm86',113)
SYS_vmsplice = Constant('SYS_vmsplice',285)
SYS_wait4 = Constant('SYS_wait4',114)
SYS_waitid = Constant('SYS_waitid',272)
SYS_waitpid = Constant('SYS_waitpid',7)
SYS_write = Constant('SYS_write',4)
SYS_writev = Constant('SYS_writev',146)
|
{
"content_hash": "295413fc0ce892373e1c4bf41c8233d0",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 73,
"avg_line_length": 45.34551495016611,
"alnum_prop": 0.7001611839695215,
"repo_name": "anthraxx/pwndbg",
"id": "13f5fb13769b3acc27dc61ef1c0bdfe9d37bc329",
"size": "27345",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pwndbg/constants/powerpc64.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "584"
},
{
"name": "C",
"bytes": "113"
},
{
"name": "Go",
"bytes": "58"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Python",
"bytes": "1823698"
},
{
"name": "Shell",
"bytes": "5952"
}
],
"symlink_target": ""
}
|
import unittest, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_glm, h2o_common, h2o_exec as h2e
import h2o_print
DO_GLM = True
LOG_MACHINE_STATS = True
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
class releaseTest(h2o_common.ReleaseCommon, unittest.TestCase):
def sub_c2_nongz_fvec_long(self):
# a kludge
h2o.setup_benchmark_log()
avgMichalSize = 237270000
bucket = 'home-0xdiag-datasets'
### importFolderPath = 'more1_1200_link'
importFolderPath = 'manyfiles-nflx'
print "Using non-gz'ed files in", importFolderPath
csvFilenameList= [
("*[1][0-4][0-9].dat", "file_50_A.dat", 50 * avgMichalSize, 1800),
# ("*[1][0-9][0-9].dat", "file_100_A.dat", 100 * avgMichalSize, 3600),
]
if LOG_MACHINE_STATS:
benchmarkLogging = ['cpu', 'disk', 'network']
else:
benchmarkLogging = []
pollTimeoutSecs = 120
retryDelaySecs = 10
for trial, (csvFilepattern, csvFilename, totalBytes, timeoutSecs) in enumerate(csvFilenameList):
csvPathname = importFolderPath + "/" + csvFilepattern
# double import still causing problems?
# (importResult, importPattern) = h2i.import_only(bucket=bucket, path=csvPathname, schema='local')
# importFullList = importResult['files']
# importFailList = importResult['fails']
# print "\n Problem if this is not empty: importFailList:", h2o.dump_json(importFailList)
# this accumulates performance stats into a benchmark log over multiple runs
# good for tracking whether we're getting slower or faster
h2o.cloudPerfH2O.change_logfile(csvFilename)
h2o.cloudPerfH2O.message("")
h2o.cloudPerfH2O.message("Parse " + csvFilename + " Start--------------------------------")
start = time.time()
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local',
hex_key=csvFilename + ".hex", timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse #", trial, "completed in", "%6.2f" % elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=False)
if totalBytes is not None:
fileMBS = (totalBytes/1e6)/elapsed
msg = '{!s} jvms, {!s}GB heap, {:s} {:s} {:6.2f} MB/sec for {:.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, csvFilepattern, csvFilename, fileMBS, elapsed)
print msg
h2o.cloudPerfH2O.message(msg)
if DO_GLM:
# these are all the columns that are enums in the dataset...too many for GLM!
x = range(542) # don't include the output column
# remove the output too! (378)
ignore_x = []
for i in [3,4,5,6,7,8,9,10,11,14,16,17,18,19,20,424,425,426,540,541]:
x.remove(i)
ignore_x.append(i)
# plus 1 because we are no longer 0 offset
x = ",".join(map(lambda x: "C" + str(x+1), x))
ignore_x = ",".join(map(lambda x: "C" + str(x+1), ignore_x))
GLMkwargs = {
'ignored_cols': ignore_x,
'family': 'binomial',
'response': 'C379',
'max_iter': 4,
'n_folds': 1,
'family': 'binomial',
'alpha': 0.2,
'lambda': 1e-5
}
# are the unparsed keys slowing down exec?
h2i.delete_keys_at_all_nodes(pattern="manyfile")
# convert to binomial
execExpr="A.hex=%s" % parseResult['destination_key']
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
execExpr="A.hex[,%s]=(A.hex[,%s]!=%s)" % ('379', '379', 0)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
aHack = {'destination_key': "A.hex"}
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, **GLMkwargs)
elapsed = time.time() - start
h2o.check_sandbox_for_errors()
h2o_glm.simpleCheckGLM(self, glm, None, **GLMkwargs)
msg = '{:d} jvms, {:d}GB heap, {:s} {:s} GLM: {:6.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, csvFilepattern, csvFilename, elapsed)
print msg
h2o.cloudPerfH2O.message(msg)
h2o_cmd.checkKeyDistribution()
#***********************************************************************
# these will be tracked individual by jenkins, which is nice
#***********************************************************************
def test_B_c2_nongz_fvec_long(self):
h2o.beta_features = True
self.sub_c2_nongz_fvec_long()
if __name__ == '__main__':
h2o.unit_main()
|
{
"content_hash": "84c5e6b0c01869483623067b61889b93",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 114,
"avg_line_length": 46.19047619047619,
"alnum_prop": 0.5077319587628866,
"repo_name": "woobe/h2o",
"id": "4781591ecd2550c60a30c8b2cdda41f2e05b0e3a",
"size": "5820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/testdir_release/c2/test_c2_nongz_fvec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import argparse, glob, os, sys, csv
import numpy as np
from matplotlib import pyplot as plt
from importlib import import_module
from scipy.interpolate import interp1d
from sklearn.metrics import roc_curve, roc_auc_score
label_file = "./data/outcome.txt"
target_names = []
with open(label_file, "rb") as vfile:
for line in vfile.readlines():
line = line.strip()
if not line: continue
target_names.append(line.split(',')[1])
def avg_cal_dis(dir, module):
test_files = glob.glob(dir + "/testing-data-*.csv")
print "Calculating Calibration/Discrimination for " + module.title() + "..."
# count = 0
total_cal = []
total_dis = []
for testfile in test_files:
start_idx = testfile.find(dir + "/testing-data-") + len(dir + "/testing-data-")
stop_idx = testfile.find('.csv')
id = testfile[start_idx:stop_idx]
pfile = dir + "/" + module.prefix() + "-params-" + str(id)
trainfile = dir + "/training-data-completed-" + str(id) + ".csv"
if os.path.exists(testfile) and os.path.exists(pfile) and os.path.exists(trainfile):
# count = count + 1
print "Calibration/Discrimination for test set " + id + " ----------------------------------"
cal, dis = module.eval(testfile, trainfile, pfile, 1)
# total_cal += cal
# total_dis += dis
total_cal.append(cal)
total_dis.append(dis)
avg_cal = np.mean(np.array(total_cal), axis=0)
avg_dis = np.mean(np.array(total_dis), axis=0)
std_cal = np.std(np.array(total_cal), axis=0)
std_dis = np.std(np.array(total_dis), axis=0)
print module.title() + " Calibration/Discrimination Average ********************************************"
print "Calibration : " + str(avg_cal)
print "Discrimination: " + str(avg_dis)
print module.title() + " Calibration/Discrimination Error ********************************************"
print "Calibration : " + str(std_cal)
print "Discrimination: " + str(std_dis)
def cal_plots(dir, module):
test_files = glob.glob(dir + "/testing-data-*.csv")
print "Calculating calibration plots for " + module.title() + "..."
for testfile in test_files:
start_idx = testfile.find(dir + "/testing-data-") + len(dir + "/testing-data-")
stop_idx = testfile.find('.csv')
id = testfile[start_idx:stop_idx]
pfile = dir + "/" + module.prefix() + "-params-" + str(id)
trainfile = dir + "/training-data-completed-" + str(id) + ".csv"
if os.path.exists(testfile) and os.path.exists(pfile) and os.path.exists(trainfile):
print "Calibration for test set " + id + " ----------------------------------"
out_file = "./out/calstats-" + id + ".txt"
plot_file = "./out/calplot-" + id + ".pdf"
module.eval(testfile, trainfile, pfile, 2, test_file=testfile, out_file=out_file, plot_file=plot_file)
print "********************************************"
print "Saved calibration plot and Hosmer-Lemeshow goodness of fit for " + module.title() + " in out folder."
def avg_report(dir, module):
test_files = glob.glob(dir + "/testing-data-*.csv")
print "Calculating average report for " + module.title() + "..."
count = 0
total_prec = []
total_rec = []
total_f1 = []
for testfile in test_files:
start_idx = testfile.find(dir + "/testing-data-") + len(dir + "/testing-data-")
stop_idx = testfile.find('.csv')
id = testfile[start_idx:stop_idx]
pfile = dir + "/" + module.prefix() + "-params-" + str(id)
trainfile = dir + "/training-data-completed-" + str(id) + ".csv"
if os.path.exists(testfile) and os.path.exists(pfile) and os.path.exists(trainfile):
count = count + 1
print "Report for test set " + id + " ----------------------------------"
p, r, f, _ = module.eval(testfile, trainfile, pfile, 3)
total_prec.append(p)
total_rec.append(r)
total_f1.append(f)
avg_prec = np.mean(np.array(total_prec), axis=0)
avg_rec = np.mean(np.array(total_rec), axis=0)
avg_f1 = np.mean(np.array(total_f1), axis=0)
std_prec = np.std(np.array(total_prec), axis=0)
std_rec = np.std(np.array(total_rec), axis=0)
std_f1 = np.std(np.array(total_f1), axis=0)
tot_prec_mean = (avg_prec[0] + avg_prec[1])/2
tot_rec_mean = (avg_rec[0]+avg_rec[1])/2
tot_f1_mean = (avg_f1[0]+avg_f1[1])/2
tot_prec_std = (std_prec[0] + std_prec[1])/2
tot_rec_std = (std_rec[0]+std_rec[1])/2
tot_f1_std = (std_f1[0]+std_f1[1])/2
print "Average report for " + module.title() + " ********************************************"
print "{:10s} {:10s} {:10s} {:10s}".format("", "precision", "recall", "f1-score")
print "{:10s} {:2.2f} {:2.2f} {:2.2f}".format(target_names[0], avg_prec[0], avg_rec[0], avg_f1[0])
print "{:10s} {:2.2f} {:2.2f} {:2.2f}".format(target_names[1], avg_prec[1], avg_rec[1], avg_f1[1])
print "{:10s} {:2.2f} {:2.2f} {:2.2f}".format("Total", tot_prec_mean, tot_rec_mean, tot_f1_mean)
print
print "Standard Deviation report for " + module.title() + " ********************************************"
print "{:10s} {:10s} {:10s} {:10s}".format("", "precision", "recall", "f1-score")
print "{:10s} {:2.2f} {:2.2f} {:2.2f}".format(target_names[0], std_prec[0], std_rec[0], std_f1[0])
print "{:10s} {:2.2f} {:2.2f} {:2.2f}".format(target_names[1], std_prec[1], std_rec[1], std_f1[1])
print "{:10s} {:2.2f} {:2.2f} {:2.2f}".format("Total", tot_prec_std, tot_rec_std, tot_f1_std)
print
print "Summary ********************************************"
print "Total,"+str(tot_prec_mean)+","+str(tot_rec_mean)+","+str(tot_f1_mean)+","+str(tot_prec_std)+","+str(tot_rec_std)+","+str(tot_f1_std)
def roc_plots(dir, module):
test_files = glob.glob(dir + "/testing-data-*.csv")
print "Calculating ROC curves for " + module.title() + "..."
count = 0
all_prob = []
all_y = []
total_fpr = np.array([])
total_tpr = np.array([])
total_auc = 0
plt.clf()
fig = plt.figure()
for testfile in test_files:
start_idx = testfile.find(dir + "/testing-data-") + len(dir + "/testing-data-")
stop_idx = testfile.find('.csv')
id = testfile[start_idx:stop_idx]
pfile = dir + "/" + module.prefix() + "-params-" + str(id)
trainfile = dir + "/training-data-completed-" + str(id) + ".csv"
if os.path.exists(testfile) and os.path.exists(pfile) and os.path.exists(trainfile):
print "Report for test set " + id + " ----------------------------------"
fpr, tpr, auc = module.eval(testfile, trainfile, pfile, 4, pltshow=False)
p, y = module.pred(testfile, trainfile, pfile)
all_prob.extend(p)
all_y.extend(y)
# if fpr.size < 3: continue
if total_fpr.size:
if total_fpr[0].size != fpr.size: continue
total_fpr = np.append(total_fpr, np.array([fpr]), axis=0)
total_tpr = np.append(total_tpr, np.array([tpr]), axis=0)
else:
total_fpr = np.array([fpr])
total_tpr = np.array([tpr])
total_auc += auc
count += 1
ave_auc = (total_auc)/(count)
print "********************************************"
ave_fpr = np.mean(total_fpr, axis=0)
ave_tpr = np.mean(total_tpr, axis=0)
# std_fpr = np.std(total_fpr, axis=0)
std_tpr = np.std(total_tpr, axis=0)
# The AUC of the aggregated curve
all_auc = roc_auc_score(all_y, all_prob)
# f2 = interp1d(ave_fpr, ave_tpr, kind='cubic')
plt.plot(ave_fpr, ave_tpr, c="grey")
# print f2(ave_fpr)
# plt.plot(ave_fpr, f2(ave_fpr), c="red")
plt.plot([0, 1], [0, 1], 'k--')
plt.fill_between(ave_fpr, ave_tpr-std_tpr, ave_tpr+std_tpr, alpha=0.5)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
print "Average area under the ROC curve for " + module.title() + ": " + str(ave_auc)
print "Area under the aggregated ROC curve for " + module.title() + ": " + str(all_auc)
fig.savefig('./out/roc.pdf')
print "Saved ROC curve to ./out/roc.pdf"
with open("./out/roc.csv", "wb") as rfile:
writer = csv.writer(rfile, delimiter=",")
writer.writerow(["Y", "P"])
for i in range(0, len(all_prob)):
writer.writerow([all_y[i], all_prob[i]])
print "Saved aggregated ROC data to ./out/roc.csv"
def avg_conf_mat(dir, module):
test_files = glob.glob(dir + "/testing-data-*.csv")
print "Calculating average report for " + module.title() + "..."
count = 0
total_n_hit = 0
total_n_false_alarm = 0
total_n_miss = 0
total_n_correct_rej = 0
for testfile in test_files:
start_idx = testfile.find(dir + "/testing-data-") + len(dir + "/testing-data-")
stop_idx = testfile.find('.csv')
id = testfile[start_idx:stop_idx]
pfile = dir + "/" + module.prefix() + "-params-" + str(id)
trainfile = dir + "/training-data-completed-" + str(id) + ".csv"
if os.path.exists(testfile) and os.path.exists(pfile) and os.path.exists(trainfile):
count = count + 1
print "Confusion matrix for test set " + id + " ------------------------------"
n_hit, n_false_alarm, n_miss, n_correct_rej = module.eval(testfile, trainfile, pfile, 5)
total_n_hit += n_hit
total_n_false_alarm += n_false_alarm
total_n_miss += n_miss
total_n_correct_rej += n_correct_rej
avg_n_hit = total_n_hit/(1.0*count)
avg_n_false_alarm = total_n_false_alarm/(1.0*count)
avg_n_miss = total_n_miss/(1.0*count)
avg_n_correct_rej = total_n_correct_rej/(1.0*count)
print "Average confusion matrix for " + module.title() + " ********************************************"
print "{:25s} {:20s} {:20s}".format("", "Output " + target_names[1], "Output " + target_names[0])
print "{:25s} {:2.2f}{:17s}{:2.2f}".format("Predicted " + target_names[1], avg_n_hit,"", avg_n_false_alarm)
print "{:25s} {:2.2f}{:17s}{:2.2f}".format("Predicted " + target_names[0], avg_n_miss,"", avg_n_correct_rej)
def list_misses(dir, module):
test_files = glob.glob(dir + "/testing-data-*.csv")
print "Miss-classifications for predictor " + module.title() + "..."
count = 0
for testfile in test_files:
start_idx = testfile.find(dir + "/testing-data-") + len(dir + "/testing-data-")
stop_idx = testfile.find('.csv')
id = testfile[start_idx:stop_idx]
pfile = dir + "/" + module.prefix() + "-params-" + str(id)
trainfile = dir + "/training-data-completed-" + str(id) + ".csv"
if os.path.exists(testfile) and os.path.exists(pfile) and os.path.exists(trainfile):
idx = module.miss(testfile, trainfile, pfile)
count += len(idx)
print "********************************************"
print "Total miss-classifications for " + module.title() + ":",count
def evaluate(base, name, predictor, method):
dir = os.path.join(base, "models", name)
module_path = os.path.abspath(predictor)
module_filename = "eval"
sys.path.insert(0, module_path)
module = import_module(module_filename)
if not os.path.exists("./out"): os.makedirs("./out")
# Average calibrations and discriminations
if method == "caldis":
avg_cal_dis(dir, module)
# Plot each method on same calibration plot
elif method == "calplot":
cal_plots(dir, module)
# Average precision, recall, and F1 scores
elif method == "report":
avg_report(dir, module)
# Plot each method on same ROC plot
elif method == "roc":
roc_plots(dir, module)
# Average confusion matrix
elif method == "confusion":
avg_conf_mat(dir, module)
elif method == "misses":
list_misses(dir, module)
# Method not defined:
else:
raise Exception("Invalid method given")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Evaluate the model with given method(s)
parser.add_argument('-B', '--base_dir', nargs=1, default=["./"],
help="Base directory")
parser.add_argument('-N', '--name', nargs=1, default=["test"],
help="Model name")
parser.add_argument('-p', '--predictor', nargs=1, default=["nnet"],
help="Folder containing predictor to evaluate")
parser.add_argument('-m', '--method', nargs=1, default=["report"],
help="Evaluation method: caldis, calplot, report, roc, confusion, misses")
args = parser.parse_args()
evaluate(args.base_dir[0], args.name[0], args.predictor[0], args.method[0])
|
{
"content_hash": "9ca2a8868ff3cc73bec1519e3d48a00e",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 143,
"avg_line_length": 47.36231884057971,
"alnum_prop": 0.5516370869033048,
"repo_name": "broadinstitute/ebola-predictor",
"id": "e2e4ce44eb31cccc40246be6f8981a55d6fb5c6e",
"size": "13072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eval.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "273299"
},
{
"name": "R",
"bytes": "3987"
}
],
"symlink_target": ""
}
|
"""
Exceptions common to OpenStack projects
"""
import itertools
import logging
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % (
description, cmd, exit_code, stdout, stderr)
IOError.__init__(self, message)
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
class NotFound(Error):
pass
class UnknownScheme(Error):
msg = "Unknown scheme '%s' found in URI"
def __init__(self, scheme):
msg = self.__class__.msg % scheme
super(UnknownScheme, self).__init__(msg)
class BadStoreUri(Error):
msg = "The Store URI %s was malformed. Reason: %s"
def __init__(self, uri, reason):
msg = self.__class__.msg % (uri, reason)
super(BadStoreUri, self).__init__(msg)
class Duplicate(Error):
pass
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
class BadInputError(Exception):
"""Error resulting from a client sending bad input to a server"""
pass
class MissingArgumentError(Error):
pass
class DatabaseMigrationError(Error):
pass
class ClientConnectionError(Exception):
"""Error resulting from a client connecting to a server"""
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('Uncaught exception')
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
return _wrap
class OpenstackException(Exception):
"""
Base Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class MalformedRequestBody(OpenstackException):
message = "Malformed message body: %(reason)s"
class InvalidContentType(OpenstackException):
message = "Invalid content type %(content_type)s"
|
{
"content_hash": "c8121c0cb9711dcdc32e7048c41a2293",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 78,
"avg_line_length": 23.49618320610687,
"alnum_prop": 0.6195581546458739,
"repo_name": "xchenum/quantum",
"id": "e5da94b9496518923bf69507a22065589e7f4bfa",
"size": "3753",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quantum/openstack/common/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "1989469"
},
{
"name": "Shell",
"bytes": "7869"
}
],
"symlink_target": ""
}
|
def sayHello():
print 'Hello, world'
version = 0.1
### This is the end of mymodule.py
|
{
"content_hash": "e8ab207be306da75ef47885205565815",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 18.2,
"alnum_prop": 0.6483516483516484,
"repo_name": "DeercoderPractice/exp-code",
"id": "a4a0d0e44b6b00d7378782ab12e3901bfb17918e",
"size": "290",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python/Code/mymodule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "18606"
},
{
"name": "C",
"bytes": "1184"
},
{
"name": "C++",
"bytes": "4132"
},
{
"name": "CMake",
"bytes": "9154"
},
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "660863"
},
{
"name": "JavaScript",
"bytes": "287"
},
{
"name": "Makefile",
"bytes": "39816"
},
{
"name": "Objective-C",
"bytes": "1378"
},
{
"name": "PostScript",
"bytes": "74807"
},
{
"name": "Python",
"bytes": "79942"
},
{
"name": "Shell",
"bytes": "31610"
},
{
"name": "TeX",
"bytes": "907511"
}
],
"symlink_target": ""
}
|
from waflib import Utils,Task,Options,Logs,Errors
from waflib.TaskGen import before_method,after_method,feature
from waflib.Tools import ccroot
from waflib.Configure import conf
import os,tempfile
ccroot.USELIB_VARS['cs']=set(['CSFLAGS','ASSEMBLIES','RESOURCES'])
ccroot.lib_patterns['csshlib']=['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
cs_nodes=[]
no_nodes=[]
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source=no_nodes
bintype=getattr(self,'bintype',self.gen.endswith('.dll')and'library'or'exe')
self.cs_task=tsk=self.create_task('mcs',cs_nodes,self.path.find_or_declare(self.gen))
tsk.env.CSTYPE='/target:%s'%bintype
tsk.env.OUT='/out:%s'%tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS','/platform:%s'%getattr(self,'platform','anycpu'))
inst_to=getattr(self,'install_path',bintype=='exe'and'${BINDIR}'or'${LIBDIR}')
if inst_to:
mod=getattr(self,'chmod',bintype=='exe'and Utils.O755 or Utils.O644)
self.install_task=self.bld.install_files(inst_to,self.cs_task.outputs[:],env=self.env,chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS','/reference:%s'%x)
continue
y.post()
tsk=getattr(y,'cs_task',None)or getattr(y,'link_task',None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r'%self)
self.cs_task.dep_nodes.extend(tsk.outputs)
self.cs_task.set_run_after(tsk)
self.env.append_value('CSFLAGS','/reference:%s'%tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs','use_cs')
def debug_cs(self):
csdebug=getattr(self,'csdebug',self.env.CSDEBUG)
if not csdebug:
return
node=self.cs_task.outputs[0]
if self.env.CS_NAME=='mono':
out=node.parent.find_or_declare(node.name+'.mdb')
else:
out=node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug=='pdbonly':
val=['/debug+','/debug:pdbonly']
elif csdebug=='full':
val=['/debug+','/debug:full']
else:
val=['/debug-']
self.env.append_value('CSFLAGS',val)
class mcs(Task.Task):
color='YELLOW'
run_str='${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
try:
tmp=None
if isinstance(cmd,list)and len(' '.join(cmd))>=8192:
program=cmd[0]
cmd=[self.quote_response_command(x)for x in cmd]
(fd,tmp)=tempfile.mkstemp()
os.write(fd,'\r\n'.join(i.replace('\\','\\\\')for i in cmd[1:]))
os.close(fd)
cmd=[program,'@'+tmp]
ret=self.generator.bld.exec_command(cmd,**kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
def quote_response_command(self,flag):
if flag.lower()=='/noconfig':
return''
if flag.find(' ')>-1:
for x in('/r:','/reference:','/resource:','/lib:','/out:'):
if flag.startswith(x):
flag='%s"%s"'%(x,'","'.join(flag[len(x):].split(',')))
break
else:
flag='"%s"'%flag
return flag
def configure(conf):
csc=getattr(Options.options,'cscbinary',None)
if csc:
conf.env.MCS=csc
conf.find_program(['csc','mcs','gmcs'],var='MCS')
conf.env.ASS_ST='/r:%s'
conf.env.RES_ST='/resource:%s'
conf.env.CS_NAME='csc'
if str(conf.env.MCS).lower().find('mcs')>-1:
conf.env.CS_NAME='mono'
def options(opt):
opt.add_option('--with-csc-binary',type='string',dest='cscbinary')
class fake_csshlib(Task.Task):
color='YELLOW'
inst_to=None
def runnable_status(self):
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
@conf
def read_csshlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='csshlib')
|
{
"content_hash": "2ce68004bd3fa7e49e2da7c8c2c874b3",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 98,
"avg_line_length": 31.40625,
"alnum_prop": 0.6703980099502488,
"repo_name": "romejoe/CByteStream",
"id": "a78e13821da2853034623d6d5ddf7f6669eaaca9",
"size": "4165",
"binary": false,
"copies": "198",
"ref": "refs/heads/master",
"path": ".waf-1.7.15-9c6c439a6416a92b3e844736c4ef3c7b/waflib/Tools/cs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1970"
},
{
"name": "C++",
"bytes": "47"
},
{
"name": "Python",
"bytes": "379338"
}
],
"symlink_target": ""
}
|
import threading
# Monkeypatch xmlsec.initialize() to only run once (https://github.com/ansible/ansible-tower/issues/3241).
xmlsec_init_lock = threading.Lock()
xmlsec_initialized = False
import dm.xmlsec.binding # noqa
original_xmlsec_initialize = dm.xmlsec.binding.initialize
def xmlsec_initialize(*args, **kwargs):
global xmlsec_init_lock, xmlsec_initialized, original_xmlsec_initialize
with xmlsec_init_lock:
if not xmlsec_initialized:
original_xmlsec_initialize(*args, **kwargs)
xmlsec_initialized = True
dm.xmlsec.binding.initialize = xmlsec_initialize
default_app_config = 'awx.sso.apps.SSOConfig'
|
{
"content_hash": "12ecf671a8176b03ae5edd2231e83c6b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 106,
"avg_line_length": 29.636363636363637,
"alnum_prop": 0.7392638036809815,
"repo_name": "wwitzel3/awx",
"id": "aa65d65a11066de7663fa0cff792cffa21d76297",
"size": "720",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "awx/sso/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303046"
},
{
"name": "Dockerfile",
"bytes": "5713"
},
{
"name": "HTML",
"bytes": "496559"
},
{
"name": "JavaScript",
"bytes": "3513112"
},
{
"name": "Makefile",
"bytes": "21133"
},
{
"name": "PowerShell",
"bytes": "10176"
},
{
"name": "Python",
"bytes": "3904288"
},
{
"name": "Shell",
"bytes": "13833"
}
],
"symlink_target": ""
}
|
'''
Created on Jan 2, 2016
@author: Dustin
'''
def save_all_interesections_as_white_stones(img):
'''
After the intersections have been properly identified, this will crop the image into
361 individual images, one for each intersection.
'''
w, h = 18,18
count = 0
global SAVE_NEG_IMAGES, INTERSECTIONS
if SAVE_WHITE_STONE_IMAGES:
if len(INTERSECTIONS) == pow(BOARD_SIZE,2):
for inter in INTERSECTIONS:
x1 = inter[0] - (w / 2)
y1 = inter[1] - (h / 2)
x2 = inter[0] + (w / 2)
y2 = inter[1] + (h / 2)
cv2.imwrite(POS_WHITE_TRAINING_IMAGES_DIR+"white_"+str(count)+".jpg", crop(img,x1,y1,x2,y2))
count+=1
print("Just produced "+ str(count)+" white stone images")
SAVE_WHITE_STONE_IMAGES = False
def save_all_intersections_as_neg_images(img):
'''
After the intersections have been properly identified, this will crop the image into
361 individual images, one for each intersection.
'''
w, h = 18,18
count = 0
global SAVE_NEG_IMAGES, INTERSECTIONS
if SAVE_NEG_IMAGES:
if len(INTERSECTIONS) == pow(BOARD_SIZE,2):
for inter in INTERSECTIONS:
x1 = inter[0] - (w / 2)
y1 = inter[1] - (h / 2)
x2 = inter[0] + (w / 2)
y2 = inter[1] + (h / 2)
cv2.imwrite(NEG_TRAINING_IMAGES_DIR+"neg_"+str(count)+".jpg", crop(img,x1,y1,x2,y2))
count+=1
print("Just produced "+ str(count)+" negative images")
SAVE_NEG_IMAGES = False
def save_all_intersections_as_white_stones(img):
'''
After the intersections have been properly identified, this will crop the image into
361 individual images, one for each intersection.
'''
w, h = 14,14
count = 0
global SAVE_WHITE_STONE_IMAGES, INTERSECTIONS
if SAVE_WHITE_STONE_IMAGES:
if len(INTERSECTIONS) == pow(BOARD_SIZE,2):
for inter in INTERSECTIONS:
x1 = inter[0] - (w / 2)
y1 = inter[1] - (h / 2)
x2 = inter[0] + (w / 2)
y2 = inter[1] + (h / 2)
cv2.imwrite(POS_WHITE_TRAINING_IMAGES_DIR+"white_"+str(count)+".jpg", crop(img,x1,y1,x2,y2))
count+=1
print("Just produced "+ str(count)+" white stone images")
SAVE_WHITE_STONE_IMAGES = False
|
{
"content_hash": "f84a03a1db65b3527878419cda917640",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 108,
"avg_line_length": 37.014925373134325,
"alnum_prop": 0.5479838709677419,
"repo_name": "dtdannen/pocket-sai",
"id": "974d2b89921f3c5380c5784fc26c261c256543f3",
"size": "2480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/classifiers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73427"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.conf import settings # noqa
from django import http
from django.test.utils import override_settings # noqa
from mox import IsA # noqa
from novaclient.v1_1 import servers
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class ServerWrapperTests(test.TestCase):
def test_get_base_attribute(self):
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(server.id, self.servers.first().id)
def test_image_name(self):
image = self.images.first()
self.mox.StubOutWithMock(api.glance, 'image_get')
api.glance.image_get(IsA(http.HttpRequest),
image.id).AndReturn(image)
self.mox.ReplayAll()
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(server.image_name, image.name)
class ComputeApiTests(test.APITestCase):
def test_server_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_HARD
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.reboot(server.id, HARDNESS)
self.mox.ReplayAll()
ret_val = api.nova.server_reboot(self.request, server.id)
self.assertIsNone(ret_val)
def test_server_soft_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_SOFT
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.reboot(server.id, HARDNESS)
self.mox.ReplayAll()
ret_val = api.nova.server_reboot(self.request, server.id, HARDNESS)
self.assertIsNone(ret_val)
def test_server_vnc_console(self):
server = self.servers.first()
console = self.servers.vnc_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get_vnc_console(server.id,
console_type).AndReturn(console)
self.mox.ReplayAll()
ret_val = api.nova.server_vnc_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.VNCConsole)
def test_server_spice_console(self):
server = self.servers.first()
console = self.servers.spice_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get_spice_console(server.id,
console_type).AndReturn(console)
self.mox.ReplayAll()
ret_val = api.nova.server_spice_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.SPICEConsole)
def test_server_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True, {'all_tenants': True}).AndReturn(servers)
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
def test_server_list_pagination(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1}).AndReturn(servers)
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True},
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertFalse(has_more)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_server_list_pagination_more(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1}) \
.AndReturn(servers[:page_size + 1])
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True},
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertEqual(page_size, len(ret_val))
self.assertTrue(has_more)
def test_usage_get(self):
novaclient = self.stub_novaclient()
novaclient.usage = self.mox.CreateMockAnything()
novaclient.usage.get(self.tenant.id,
'start',
'end').AndReturn(self.usages.first())
self.mox.ReplayAll()
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
def test_usage_list(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
novaclient.usage = self.mox.CreateMockAnything()
novaclient.usage.list('start', 'end', True).AndReturn(usages)
self.mox.ReplayAll()
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
def test_server_get(self):
server = self.servers.first()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server.id).AndReturn(server)
self.mox.ReplayAll()
ret_val = api.nova.server_get(self.request, server.id)
self.assertIsInstance(ret_val, api.nova.Server)
def test_absolute_limits_handle_unlimited(self):
values = {"maxTotalCores": -1, "maxTotalInstances": 10}
limits = self.mox.CreateMockAnything()
limits.absolute = []
for key, val in values.iteritems():
limit = self.mox.CreateMockAnything()
limit.name = key
limit.value = val
limits.absolute.append(limit)
novaclient = self.stub_novaclient()
novaclient.limits = self.mox.CreateMockAnything()
novaclient.limits.get(reserved=True).AndReturn(limits)
self.mox.ReplayAll()
ret_val = api.nova.tenant_absolute_limits(self.request, reserved=True)
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10}
for key in expected_results.keys():
self.assertEqual(ret_val[key], expected_results[key])
|
{
"content_hash": "13981072891ecdfbd0c744a96dc6dabe",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 79,
"avg_line_length": 39.35532994923858,
"alnum_prop": 0.5757771185347608,
"repo_name": "kaiweifan/horizon",
"id": "19ec38575ea1dd3ea41fd2b6b32803fee1681303",
"size": "8624",
"binary": false,
"copies": "11",
"ref": "refs/heads/vip2",
"path": "openstack_dashboard/test/api_tests/nova_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160827"
},
{
"name": "JavaScript",
"bytes": "360901"
},
{
"name": "Python",
"bytes": "2832603"
},
{
"name": "Shell",
"bytes": "12986"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns
from django.conf.urls import url
import sys, os
urlpatterns = patterns('geonition_client.views',
#javascript API for the REST
url(r'^geonition.js',
'javascript_api',
name="api_javascript"),
#javascript API for the REST
url(r'^test.html',
'test_api',
name="api_test"),
#get a csfr token for REST clients
url(r'^csrf',
'csrf',
name="api_csrf"),
)
|
{
"content_hash": "75ca957ac0b81f4de5b7d9625a37923b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 48,
"avg_line_length": 26.82608695652174,
"alnum_prop": 0.4538087520259319,
"repo_name": "geonition/django_geonition_client",
"id": "a9222c0328e7cde9cae26586478b875c46bb9800",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geonition_client/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2213"
},
{
"name": "Python",
"bytes": "4355"
}
],
"symlink_target": ""
}
|
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from .._serialization import Deserializer, Serializer
from ._configuration import ComputeManagementClientConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class ComputeManagementClient(MultiApiClientMixin, _SDKClient):
"""Compute Client.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2022-08-01'
_PROFILE_TAG = "azure.mgmt.compute.ComputeManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'cloud_service_operating_systems': '2022-04-04',
'cloud_service_role_instances': '2022-04-04',
'cloud_service_roles': '2022-04-04',
'cloud_services': '2022-04-04',
'cloud_services_update_domain': '2022-04-04',
'community_galleries': '2022-03-03',
'community_gallery_image_versions': '2022-03-03',
'community_gallery_images': '2022-03-03',
'disk_accesses': '2022-07-02',
'disk_encryption_sets': '2022-07-02',
'disk_restore_point': '2022-07-02',
'disks': '2022-07-02',
'galleries': '2022-03-03',
'gallery_application_versions': '2022-03-03',
'gallery_applications': '2022-03-03',
'gallery_image_versions': '2022-03-03',
'gallery_images': '2022-03-03',
'gallery_sharing_profile': '2022-03-03',
'resource_skus': '2021-07-01',
'shared_galleries': '2022-03-03',
'shared_gallery_image_versions': '2022-03-03',
'shared_gallery_images': '2022-03-03',
'snapshots': '2022-07-02',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
api_version: Optional[str] = None,
base_url: str = "https://management.azure.com",
profile: KnownProfiles = KnownProfiles.default,
**kwargs # type: Any
) -> None:
self._config = ComputeManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(ComputeManagementClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.compute.v2015_06_15.models>`
* 2016-03-30: :mod:`v2016_03_30.models<azure.mgmt.compute.v2016_03_30.models>`
* 2016-04-30-preview: :mod:`v2016_04_30_preview.models<azure.mgmt.compute.v2016_04_30_preview.models>`
* 2017-03-30: :mod:`v2017_03_30.models<azure.mgmt.compute.v2017_03_30.models>`
* 2017-09-01: :mod:`v2017_09_01.models<azure.mgmt.compute.v2017_09_01.models>`
* 2017-12-01: :mod:`v2017_12_01.models<azure.mgmt.compute.v2017_12_01.models>`
* 2018-04-01: :mod:`v2018_04_01.models<azure.mgmt.compute.v2018_04_01.models>`
* 2018-06-01: :mod:`v2018_06_01.models<azure.mgmt.compute.v2018_06_01.models>`
* 2018-09-30: :mod:`v2018_09_30.models<azure.mgmt.compute.v2018_09_30.models>`
* 2018-10-01: :mod:`v2018_10_01.models<azure.mgmt.compute.v2018_10_01.models>`
* 2019-03-01: :mod:`v2019_03_01.models<azure.mgmt.compute.v2019_03_01.models>`
* 2019-04-01: :mod:`v2019_04_01.models<azure.mgmt.compute.v2019_04_01.models>`
* 2019-07-01: :mod:`v2019_07_01.models<azure.mgmt.compute.v2019_07_01.models>`
* 2019-11-01: :mod:`v2019_11_01.models<azure.mgmt.compute.v2019_11_01.models>`
* 2019-12-01: :mod:`v2019_12_01.models<azure.mgmt.compute.v2019_12_01.models>`
* 2020-05-01: :mod:`v2020_05_01.models<azure.mgmt.compute.v2020_05_01.models>`
* 2020-06-01: :mod:`v2020_06_01.models<azure.mgmt.compute.v2020_06_01.models>`
* 2020-06-30: :mod:`v2020_06_30.models<azure.mgmt.compute.v2020_06_30.models>`
* 2020-09-30: :mod:`v2020_09_30.models<azure.mgmt.compute.v2020_09_30.models>`
* 2020-10-01-preview: :mod:`v2020_10_01_preview.models<azure.mgmt.compute.v2020_10_01_preview.models>`
* 2020-12-01: :mod:`v2020_12_01.models<azure.mgmt.compute.v2020_12_01.models>`
* 2021-03-01: :mod:`v2021_03_01.models<azure.mgmt.compute.v2021_03_01.models>`
* 2021-04-01: :mod:`v2021_04_01.models<azure.mgmt.compute.v2021_04_01.models>`
* 2021-07-01: :mod:`v2021_07_01.models<azure.mgmt.compute.v2021_07_01.models>`
* 2021-08-01: :mod:`v2021_08_01.models<azure.mgmt.compute.v2021_08_01.models>`
* 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.compute.v2021_10_01.models>`
* 2021-11-01: :mod:`v2021_11_01.models<azure.mgmt.compute.v2021_11_01.models>`
* 2021-12-01: :mod:`v2021_12_01.models<azure.mgmt.compute.v2021_12_01.models>`
* 2022-01-03: :mod:`v2022_01_03.models<azure.mgmt.compute.v2022_01_03.models>`
* 2022-03-01: :mod:`v2022_03_01.models<azure.mgmt.compute.v2022_03_01.models>`
* 2022-03-02: :mod:`v2022_03_02.models<azure.mgmt.compute.v2022_03_02.models>`
* 2022-03-03: :mod:`v2022_03_03.models<azure.mgmt.compute.v2022_03_03.models>`
* 2022-04-04: :mod:`v2022_04_04.models<azure.mgmt.compute.v2022_04_04.models>`
* 2022-07-02: :mod:`v2022_07_02.models<azure.mgmt.compute.v2022_07_02.models>`
* 2022-08-01: :mod:`v2022_08_01.models<azure.mgmt.compute.v2022_08_01.models>`
"""
if api_version == '2015-06-15':
from ..v2015_06_15 import models
return models
elif api_version == '2016-03-30':
from ..v2016_03_30 import models
return models
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview import models
return models
elif api_version == '2017-03-30':
from ..v2017_03_30 import models
return models
elif api_version == '2017-09-01':
from ..v2017_09_01 import models
return models
elif api_version == '2017-12-01':
from ..v2017_12_01 import models
return models
elif api_version == '2018-04-01':
from ..v2018_04_01 import models
return models
elif api_version == '2018-06-01':
from ..v2018_06_01 import models
return models
elif api_version == '2018-09-30':
from ..v2018_09_30 import models
return models
elif api_version == '2018-10-01':
from ..v2018_10_01 import models
return models
elif api_version == '2019-03-01':
from ..v2019_03_01 import models
return models
elif api_version == '2019-04-01':
from ..v2019_04_01 import models
return models
elif api_version == '2019-07-01':
from ..v2019_07_01 import models
return models
elif api_version == '2019-11-01':
from ..v2019_11_01 import models
return models
elif api_version == '2019-12-01':
from ..v2019_12_01 import models
return models
elif api_version == '2020-05-01':
from ..v2020_05_01 import models
return models
elif api_version == '2020-06-01':
from ..v2020_06_01 import models
return models
elif api_version == '2020-06-30':
from ..v2020_06_30 import models
return models
elif api_version == '2020-09-30':
from ..v2020_09_30 import models
return models
elif api_version == '2020-10-01-preview':
from ..v2020_10_01_preview import models
return models
elif api_version == '2020-12-01':
from ..v2020_12_01 import models
return models
elif api_version == '2021-03-01':
from ..v2021_03_01 import models
return models
elif api_version == '2021-04-01':
from ..v2021_04_01 import models
return models
elif api_version == '2021-07-01':
from ..v2021_07_01 import models
return models
elif api_version == '2021-08-01':
from ..v2021_08_01 import models
return models
elif api_version == '2021-10-01':
from ..v2021_10_01 import models
return models
elif api_version == '2021-11-01':
from ..v2021_11_01 import models
return models
elif api_version == '2021-12-01':
from ..v2021_12_01 import models
return models
elif api_version == '2022-01-03':
from ..v2022_01_03 import models
return models
elif api_version == '2022-03-01':
from ..v2022_03_01 import models
return models
elif api_version == '2022-03-02':
from ..v2022_03_02 import models
return models
elif api_version == '2022-03-03':
from ..v2022_03_03 import models
return models
elif api_version == '2022-04-04':
from ..v2022_04_04 import models
return models
elif api_version == '2022-07-02':
from ..v2022_07_02 import models
return models
elif api_version == '2022-08-01':
from ..v2022_08_01 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def availability_sets(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2015_06_15.aio.operations.AvailabilitySetsOperations>`
* 2016-03-30: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2016_03_30.aio.operations.AvailabilitySetsOperations>`
* 2016-04-30-preview: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.AvailabilitySetsOperations>`
* 2017-03-30: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2017_03_30.aio.operations.AvailabilitySetsOperations>`
* 2017-12-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2017_12_01.aio.operations.AvailabilitySetsOperations>`
* 2018-04-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.AvailabilitySetsOperations>`
* 2018-06-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.AvailabilitySetsOperations>`
* 2018-10-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2018_10_01.aio.operations.AvailabilitySetsOperations>`
* 2019-03-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.AvailabilitySetsOperations>`
* 2019-07-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.AvailabilitySetsOperations>`
* 2019-12-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.AvailabilitySetsOperations>`
* 2020-06-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.AvailabilitySetsOperations>`
* 2020-12-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.AvailabilitySetsOperations>`
* 2021-03-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.AvailabilitySetsOperations>`
* 2021-04-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.AvailabilitySetsOperations>`
* 2021-07-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.AvailabilitySetsOperations>`
* 2021-11-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.AvailabilitySetsOperations>`
* 2022-03-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.AvailabilitySetsOperations>`
* 2022-08-01: :class:`AvailabilitySetsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.AvailabilitySetsOperations>`
"""
api_version = self._get_api_version('availability_sets')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import AvailabilitySetsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import AvailabilitySetsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'availability_sets'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def capacity_reservation_groups(self):
"""Instance depends on the API version:
* 2021-04-01: :class:`CapacityReservationGroupsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.CapacityReservationGroupsOperations>`
* 2021-07-01: :class:`CapacityReservationGroupsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.CapacityReservationGroupsOperations>`
* 2021-11-01: :class:`CapacityReservationGroupsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.CapacityReservationGroupsOperations>`
* 2022-03-01: :class:`CapacityReservationGroupsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.CapacityReservationGroupsOperations>`
* 2022-08-01: :class:`CapacityReservationGroupsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.CapacityReservationGroupsOperations>`
"""
api_version = self._get_api_version('capacity_reservation_groups')
if api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import CapacityReservationGroupsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import CapacityReservationGroupsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import CapacityReservationGroupsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import CapacityReservationGroupsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import CapacityReservationGroupsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'capacity_reservation_groups'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def capacity_reservations(self):
"""Instance depends on the API version:
* 2021-04-01: :class:`CapacityReservationsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.CapacityReservationsOperations>`
* 2021-07-01: :class:`CapacityReservationsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.CapacityReservationsOperations>`
* 2021-11-01: :class:`CapacityReservationsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.CapacityReservationsOperations>`
* 2022-03-01: :class:`CapacityReservationsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.CapacityReservationsOperations>`
* 2022-08-01: :class:`CapacityReservationsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.CapacityReservationsOperations>`
"""
api_version = self._get_api_version('capacity_reservations')
if api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import CapacityReservationsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import CapacityReservationsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import CapacityReservationsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import CapacityReservationsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import CapacityReservationsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'capacity_reservations'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def cloud_service_operating_systems(self):
"""Instance depends on the API version:
* 2021-03-01: :class:`CloudServiceOperatingSystemsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.CloudServiceOperatingSystemsOperations>`
* 2022-04-04: :class:`CloudServiceOperatingSystemsOperations<azure.mgmt.compute.v2022_04_04.aio.operations.CloudServiceOperatingSystemsOperations>`
"""
api_version = self._get_api_version('cloud_service_operating_systems')
if api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import CloudServiceOperatingSystemsOperations as OperationClass
elif api_version == '2022-04-04':
from ..v2022_04_04.aio.operations import CloudServiceOperatingSystemsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'cloud_service_operating_systems'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def cloud_service_role_instances(self):
"""Instance depends on the API version:
* 2020-10-01-preview: :class:`CloudServiceRoleInstancesOperations<azure.mgmt.compute.v2020_10_01_preview.aio.operations.CloudServiceRoleInstancesOperations>`
* 2021-03-01: :class:`CloudServiceRoleInstancesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.CloudServiceRoleInstancesOperations>`
* 2022-04-04: :class:`CloudServiceRoleInstancesOperations<azure.mgmt.compute.v2022_04_04.aio.operations.CloudServiceRoleInstancesOperations>`
"""
api_version = self._get_api_version('cloud_service_role_instances')
if api_version == '2020-10-01-preview':
from ..v2020_10_01_preview.aio.operations import CloudServiceRoleInstancesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import CloudServiceRoleInstancesOperations as OperationClass
elif api_version == '2022-04-04':
from ..v2022_04_04.aio.operations import CloudServiceRoleInstancesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'cloud_service_role_instances'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def cloud_service_roles(self):
"""Instance depends on the API version:
* 2020-10-01-preview: :class:`CloudServiceRolesOperations<azure.mgmt.compute.v2020_10_01_preview.aio.operations.CloudServiceRolesOperations>`
* 2021-03-01: :class:`CloudServiceRolesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.CloudServiceRolesOperations>`
* 2022-04-04: :class:`CloudServiceRolesOperations<azure.mgmt.compute.v2022_04_04.aio.operations.CloudServiceRolesOperations>`
"""
api_version = self._get_api_version('cloud_service_roles')
if api_version == '2020-10-01-preview':
from ..v2020_10_01_preview.aio.operations import CloudServiceRolesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import CloudServiceRolesOperations as OperationClass
elif api_version == '2022-04-04':
from ..v2022_04_04.aio.operations import CloudServiceRolesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'cloud_service_roles'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def cloud_services(self):
"""Instance depends on the API version:
* 2020-10-01-preview: :class:`CloudServicesOperations<azure.mgmt.compute.v2020_10_01_preview.aio.operations.CloudServicesOperations>`
* 2021-03-01: :class:`CloudServicesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.CloudServicesOperations>`
* 2022-04-04: :class:`CloudServicesOperations<azure.mgmt.compute.v2022_04_04.aio.operations.CloudServicesOperations>`
"""
api_version = self._get_api_version('cloud_services')
if api_version == '2020-10-01-preview':
from ..v2020_10_01_preview.aio.operations import CloudServicesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import CloudServicesOperations as OperationClass
elif api_version == '2022-04-04':
from ..v2022_04_04.aio.operations import CloudServicesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'cloud_services'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def cloud_services_update_domain(self):
"""Instance depends on the API version:
* 2020-10-01-preview: :class:`CloudServicesUpdateDomainOperations<azure.mgmt.compute.v2020_10_01_preview.aio.operations.CloudServicesUpdateDomainOperations>`
* 2021-03-01: :class:`CloudServicesUpdateDomainOperations<azure.mgmt.compute.v2021_03_01.aio.operations.CloudServicesUpdateDomainOperations>`
* 2022-04-04: :class:`CloudServicesUpdateDomainOperations<azure.mgmt.compute.v2022_04_04.aio.operations.CloudServicesUpdateDomainOperations>`
"""
api_version = self._get_api_version('cloud_services_update_domain')
if api_version == '2020-10-01-preview':
from ..v2020_10_01_preview.aio.operations import CloudServicesUpdateDomainOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import CloudServicesUpdateDomainOperations as OperationClass
elif api_version == '2022-04-04':
from ..v2022_04_04.aio.operations import CloudServicesUpdateDomainOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'cloud_services_update_domain'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def community_galleries(self):
"""Instance depends on the API version:
* 2021-07-01: :class:`CommunityGalleriesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.CommunityGalleriesOperations>`
* 2022-01-03: :class:`CommunityGalleriesOperations<azure.mgmt.compute.v2022_01_03.aio.operations.CommunityGalleriesOperations>`
* 2022-03-03: :class:`CommunityGalleriesOperations<azure.mgmt.compute.v2022_03_03.aio.operations.CommunityGalleriesOperations>`
"""
api_version = self._get_api_version('community_galleries')
if api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import CommunityGalleriesOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import CommunityGalleriesOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import CommunityGalleriesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'community_galleries'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def community_gallery_image_versions(self):
"""Instance depends on the API version:
* 2021-07-01: :class:`CommunityGalleryImageVersionsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.CommunityGalleryImageVersionsOperations>`
* 2022-01-03: :class:`CommunityGalleryImageVersionsOperations<azure.mgmt.compute.v2022_01_03.aio.operations.CommunityGalleryImageVersionsOperations>`
* 2022-03-03: :class:`CommunityGalleryImageVersionsOperations<azure.mgmt.compute.v2022_03_03.aio.operations.CommunityGalleryImageVersionsOperations>`
"""
api_version = self._get_api_version('community_gallery_image_versions')
if api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import CommunityGalleryImageVersionsOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import CommunityGalleryImageVersionsOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import CommunityGalleryImageVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'community_gallery_image_versions'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def community_gallery_images(self):
"""Instance depends on the API version:
* 2021-07-01: :class:`CommunityGalleryImagesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.CommunityGalleryImagesOperations>`
* 2022-01-03: :class:`CommunityGalleryImagesOperations<azure.mgmt.compute.v2022_01_03.aio.operations.CommunityGalleryImagesOperations>`
* 2022-03-03: :class:`CommunityGalleryImagesOperations<azure.mgmt.compute.v2022_03_03.aio.operations.CommunityGalleryImagesOperations>`
"""
api_version = self._get_api_version('community_gallery_images')
if api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import CommunityGalleryImagesOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import CommunityGalleryImagesOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import CommunityGalleryImagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'community_gallery_images'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dedicated_host_groups(self):
"""Instance depends on the API version:
* 2019-03-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.DedicatedHostGroupsOperations>`
* 2019-07-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.DedicatedHostGroupsOperations>`
* 2019-12-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.DedicatedHostGroupsOperations>`
* 2020-06-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.DedicatedHostGroupsOperations>`
* 2020-12-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.DedicatedHostGroupsOperations>`
* 2021-03-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.DedicatedHostGroupsOperations>`
* 2021-04-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.DedicatedHostGroupsOperations>`
* 2021-07-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.DedicatedHostGroupsOperations>`
* 2021-11-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.DedicatedHostGroupsOperations>`
* 2022-03-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.DedicatedHostGroupsOperations>`
* 2022-08-01: :class:`DedicatedHostGroupsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.DedicatedHostGroupsOperations>`
"""
api_version = self._get_api_version('dedicated_host_groups')
if api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import DedicatedHostGroupsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dedicated_host_groups'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dedicated_hosts(self):
"""Instance depends on the API version:
* 2019-03-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.DedicatedHostsOperations>`
* 2019-07-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.DedicatedHostsOperations>`
* 2019-12-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.DedicatedHostsOperations>`
* 2020-06-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.DedicatedHostsOperations>`
* 2020-12-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.DedicatedHostsOperations>`
* 2021-03-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.DedicatedHostsOperations>`
* 2021-04-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.DedicatedHostsOperations>`
* 2021-07-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.DedicatedHostsOperations>`
* 2021-11-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.DedicatedHostsOperations>`
* 2022-03-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.DedicatedHostsOperations>`
* 2022-08-01: :class:`DedicatedHostsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.DedicatedHostsOperations>`
"""
api_version = self._get_api_version('dedicated_hosts')
if api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import DedicatedHostsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import DedicatedHostsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dedicated_hosts'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def disk_accesses(self):
"""Instance depends on the API version:
* 2020-05-01: :class:`DiskAccessesOperations<azure.mgmt.compute.v2020_05_01.aio.operations.DiskAccessesOperations>`
* 2020-06-30: :class:`DiskAccessesOperations<azure.mgmt.compute.v2020_06_30.aio.operations.DiskAccessesOperations>`
* 2020-09-30: :class:`DiskAccessesOperations<azure.mgmt.compute.v2020_09_30.aio.operations.DiskAccessesOperations>`
* 2020-12-01: :class:`DiskAccessesOperations<azure.mgmt.compute.v2020_12_01.aio.operations.DiskAccessesOperations>`
* 2021-04-01: :class:`DiskAccessesOperations<azure.mgmt.compute.v2021_04_01.aio.operations.DiskAccessesOperations>`
* 2021-08-01: :class:`DiskAccessesOperations<azure.mgmt.compute.v2021_08_01.aio.operations.DiskAccessesOperations>`
* 2021-12-01: :class:`DiskAccessesOperations<azure.mgmt.compute.v2021_12_01.aio.operations.DiskAccessesOperations>`
* 2022-03-02: :class:`DiskAccessesOperations<azure.mgmt.compute.v2022_03_02.aio.operations.DiskAccessesOperations>`
* 2022-07-02: :class:`DiskAccessesOperations<azure.mgmt.compute.v2022_07_02.aio.operations.DiskAccessesOperations>`
"""
api_version = self._get_api_version('disk_accesses')
if api_version == '2020-05-01':
from ..v2020_05_01.aio.operations import DiskAccessesOperations as OperationClass
elif api_version == '2020-06-30':
from ..v2020_06_30.aio.operations import DiskAccessesOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import DiskAccessesOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import DiskAccessesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import DiskAccessesOperations as OperationClass
elif api_version == '2021-08-01':
from ..v2021_08_01.aio.operations import DiskAccessesOperations as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import DiskAccessesOperations as OperationClass
elif api_version == '2022-03-02':
from ..v2022_03_02.aio.operations import DiskAccessesOperations as OperationClass
elif api_version == '2022-07-02':
from ..v2022_07_02.aio.operations import DiskAccessesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'disk_accesses'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def disk_encryption_sets(self):
"""Instance depends on the API version:
* 2019-07-01: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.DiskEncryptionSetsOperations>`
* 2019-11-01: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2019_11_01.aio.operations.DiskEncryptionSetsOperations>`
* 2020-05-01: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2020_05_01.aio.operations.DiskEncryptionSetsOperations>`
* 2020-06-30: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2020_06_30.aio.operations.DiskEncryptionSetsOperations>`
* 2020-09-30: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2020_09_30.aio.operations.DiskEncryptionSetsOperations>`
* 2020-12-01: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.DiskEncryptionSetsOperations>`
* 2021-04-01: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.DiskEncryptionSetsOperations>`
* 2021-08-01: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2021_08_01.aio.operations.DiskEncryptionSetsOperations>`
* 2021-12-01: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2021_12_01.aio.operations.DiskEncryptionSetsOperations>`
* 2022-03-02: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2022_03_02.aio.operations.DiskEncryptionSetsOperations>`
* 2022-07-02: :class:`DiskEncryptionSetsOperations<azure.mgmt.compute.v2022_07_02.aio.operations.DiskEncryptionSetsOperations>`
"""
api_version = self._get_api_version('disk_encryption_sets')
if api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2020-05-01':
from ..v2020_05_01.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2020-06-30':
from ..v2020_06_30.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2021-08-01':
from ..v2021_08_01.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2022-03-02':
from ..v2022_03_02.aio.operations import DiskEncryptionSetsOperations as OperationClass
elif api_version == '2022-07-02':
from ..v2022_07_02.aio.operations import DiskEncryptionSetsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'disk_encryption_sets'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def disk_restore_point(self):
"""Instance depends on the API version:
* 2020-09-30: :class:`DiskRestorePointOperations<azure.mgmt.compute.v2020_09_30.aio.operations.DiskRestorePointOperations>`
* 2020-12-01: :class:`DiskRestorePointOperations<azure.mgmt.compute.v2020_12_01.aio.operations.DiskRestorePointOperations>`
* 2021-04-01: :class:`DiskRestorePointOperations<azure.mgmt.compute.v2021_04_01.aio.operations.DiskRestorePointOperations>`
* 2021-08-01: :class:`DiskRestorePointOperations<azure.mgmt.compute.v2021_08_01.aio.operations.DiskRestorePointOperations>`
* 2021-12-01: :class:`DiskRestorePointOperations<azure.mgmt.compute.v2021_12_01.aio.operations.DiskRestorePointOperations>`
* 2022-03-02: :class:`DiskRestorePointOperations<azure.mgmt.compute.v2022_03_02.aio.operations.DiskRestorePointOperations>`
* 2022-07-02: :class:`DiskRestorePointOperations<azure.mgmt.compute.v2022_07_02.aio.operations.DiskRestorePointOperations>`
"""
api_version = self._get_api_version('disk_restore_point')
if api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import DiskRestorePointOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import DiskRestorePointOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import DiskRestorePointOperations as OperationClass
elif api_version == '2021-08-01':
from ..v2021_08_01.aio.operations import DiskRestorePointOperations as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import DiskRestorePointOperations as OperationClass
elif api_version == '2022-03-02':
from ..v2022_03_02.aio.operations import DiskRestorePointOperations as OperationClass
elif api_version == '2022-07-02':
from ..v2022_07_02.aio.operations import DiskRestorePointOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'disk_restore_point'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def disks(self):
"""Instance depends on the API version:
* 2016-04-30-preview: :class:`DisksOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.DisksOperations>`
* 2017-03-30: :class:`DisksOperations<azure.mgmt.compute.v2017_03_30.aio.operations.DisksOperations>`
* 2018-04-01: :class:`DisksOperations<azure.mgmt.compute.v2018_04_01.aio.operations.DisksOperations>`
* 2018-06-01: :class:`DisksOperations<azure.mgmt.compute.v2018_06_01.aio.operations.DisksOperations>`
* 2018-09-30: :class:`DisksOperations<azure.mgmt.compute.v2018_09_30.aio.operations.DisksOperations>`
* 2019-03-01: :class:`DisksOperations<azure.mgmt.compute.v2019_03_01.aio.operations.DisksOperations>`
* 2019-07-01: :class:`DisksOperations<azure.mgmt.compute.v2019_07_01.aio.operations.DisksOperations>`
* 2019-11-01: :class:`DisksOperations<azure.mgmt.compute.v2019_11_01.aio.operations.DisksOperations>`
* 2020-05-01: :class:`DisksOperations<azure.mgmt.compute.v2020_05_01.aio.operations.DisksOperations>`
* 2020-06-30: :class:`DisksOperations<azure.mgmt.compute.v2020_06_30.aio.operations.DisksOperations>`
* 2020-09-30: :class:`DisksOperations<azure.mgmt.compute.v2020_09_30.aio.operations.DisksOperations>`
* 2020-12-01: :class:`DisksOperations<azure.mgmt.compute.v2020_12_01.aio.operations.DisksOperations>`
* 2021-04-01: :class:`DisksOperations<azure.mgmt.compute.v2021_04_01.aio.operations.DisksOperations>`
* 2021-08-01: :class:`DisksOperations<azure.mgmt.compute.v2021_08_01.aio.operations.DisksOperations>`
* 2021-12-01: :class:`DisksOperations<azure.mgmt.compute.v2021_12_01.aio.operations.DisksOperations>`
* 2022-03-02: :class:`DisksOperations<azure.mgmt.compute.v2022_03_02.aio.operations.DisksOperations>`
* 2022-07-02: :class:`DisksOperations<azure.mgmt.compute.v2022_07_02.aio.operations.DisksOperations>`
"""
api_version = self._get_api_version('disks')
if api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import DisksOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import DisksOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2018-09-30':
from ..v2018_09_30.aio.operations import DisksOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2020-05-01':
from ..v2020_05_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2020-06-30':
from ..v2020_06_30.aio.operations import DisksOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import DisksOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2021-08-01':
from ..v2021_08_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import DisksOperations as OperationClass
elif api_version == '2022-03-02':
from ..v2022_03_02.aio.operations import DisksOperations as OperationClass
elif api_version == '2022-07-02':
from ..v2022_07_02.aio.operations import DisksOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'disks'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def galleries(self):
"""Instance depends on the API version:
* 2018-06-01: :class:`GalleriesOperations<azure.mgmt.compute.v2018_06_01.aio.operations.GalleriesOperations>`
* 2019-03-01: :class:`GalleriesOperations<azure.mgmt.compute.v2019_03_01.aio.operations.GalleriesOperations>`
* 2019-07-01: :class:`GalleriesOperations<azure.mgmt.compute.v2019_07_01.aio.operations.GalleriesOperations>`
* 2019-12-01: :class:`GalleriesOperations<azure.mgmt.compute.v2019_12_01.aio.operations.GalleriesOperations>`
* 2020-09-30: :class:`GalleriesOperations<azure.mgmt.compute.v2020_09_30.aio.operations.GalleriesOperations>`
* 2021-07-01: :class:`GalleriesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.GalleriesOperations>`
* 2021-10-01: :class:`GalleriesOperations<azure.mgmt.compute.v2021_10_01.aio.operations.GalleriesOperations>`
* 2022-01-03: :class:`GalleriesOperations<azure.mgmt.compute.v2022_01_03.aio.operations.GalleriesOperations>`
* 2022-03-03: :class:`GalleriesOperations<azure.mgmt.compute.v2022_03_03.aio.operations.GalleriesOperations>`
"""
api_version = self._get_api_version('galleries')
if api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import GalleriesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import GalleriesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import GalleriesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import GalleriesOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import GalleriesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import GalleriesOperations as OperationClass
elif api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import GalleriesOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import GalleriesOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import GalleriesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'galleries'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def gallery_application_versions(self):
"""Instance depends on the API version:
* 2019-03-01: :class:`GalleryApplicationVersionsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.GalleryApplicationVersionsOperations>`
* 2019-07-01: :class:`GalleryApplicationVersionsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.GalleryApplicationVersionsOperations>`
* 2019-12-01: :class:`GalleryApplicationVersionsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.GalleryApplicationVersionsOperations>`
* 2020-09-30: :class:`GalleryApplicationVersionsOperations<azure.mgmt.compute.v2020_09_30.aio.operations.GalleryApplicationVersionsOperations>`
* 2021-07-01: :class:`GalleryApplicationVersionsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.GalleryApplicationVersionsOperations>`
* 2021-10-01: :class:`GalleryApplicationVersionsOperations<azure.mgmt.compute.v2021_10_01.aio.operations.GalleryApplicationVersionsOperations>`
* 2022-01-03: :class:`GalleryApplicationVersionsOperations<azure.mgmt.compute.v2022_01_03.aio.operations.GalleryApplicationVersionsOperations>`
* 2022-03-03: :class:`GalleryApplicationVersionsOperations<azure.mgmt.compute.v2022_03_03.aio.operations.GalleryApplicationVersionsOperations>`
"""
api_version = self._get_api_version('gallery_application_versions')
if api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import GalleryApplicationVersionsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import GalleryApplicationVersionsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import GalleryApplicationVersionsOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import GalleryApplicationVersionsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import GalleryApplicationVersionsOperations as OperationClass
elif api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import GalleryApplicationVersionsOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import GalleryApplicationVersionsOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import GalleryApplicationVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'gallery_application_versions'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def gallery_applications(self):
"""Instance depends on the API version:
* 2019-03-01: :class:`GalleryApplicationsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.GalleryApplicationsOperations>`
* 2019-07-01: :class:`GalleryApplicationsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.GalleryApplicationsOperations>`
* 2019-12-01: :class:`GalleryApplicationsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.GalleryApplicationsOperations>`
* 2020-09-30: :class:`GalleryApplicationsOperations<azure.mgmt.compute.v2020_09_30.aio.operations.GalleryApplicationsOperations>`
* 2021-07-01: :class:`GalleryApplicationsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.GalleryApplicationsOperations>`
* 2021-10-01: :class:`GalleryApplicationsOperations<azure.mgmt.compute.v2021_10_01.aio.operations.GalleryApplicationsOperations>`
* 2022-01-03: :class:`GalleryApplicationsOperations<azure.mgmt.compute.v2022_01_03.aio.operations.GalleryApplicationsOperations>`
* 2022-03-03: :class:`GalleryApplicationsOperations<azure.mgmt.compute.v2022_03_03.aio.operations.GalleryApplicationsOperations>`
"""
api_version = self._get_api_version('gallery_applications')
if api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import GalleryApplicationsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import GalleryApplicationsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import GalleryApplicationsOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import GalleryApplicationsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import GalleryApplicationsOperations as OperationClass
elif api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import GalleryApplicationsOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import GalleryApplicationsOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import GalleryApplicationsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'gallery_applications'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def gallery_image_versions(self):
"""Instance depends on the API version:
* 2018-06-01: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.GalleryImageVersionsOperations>`
* 2019-03-01: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.GalleryImageVersionsOperations>`
* 2019-07-01: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.GalleryImageVersionsOperations>`
* 2019-12-01: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.GalleryImageVersionsOperations>`
* 2020-09-30: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2020_09_30.aio.operations.GalleryImageVersionsOperations>`
* 2021-07-01: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.GalleryImageVersionsOperations>`
* 2021-10-01: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2021_10_01.aio.operations.GalleryImageVersionsOperations>`
* 2022-01-03: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2022_01_03.aio.operations.GalleryImageVersionsOperations>`
* 2022-03-03: :class:`GalleryImageVersionsOperations<azure.mgmt.compute.v2022_03_03.aio.operations.GalleryImageVersionsOperations>`
"""
api_version = self._get_api_version('gallery_image_versions')
if api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import GalleryImageVersionsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import GalleryImageVersionsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import GalleryImageVersionsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import GalleryImageVersionsOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import GalleryImageVersionsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import GalleryImageVersionsOperations as OperationClass
elif api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import GalleryImageVersionsOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import GalleryImageVersionsOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import GalleryImageVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'gallery_image_versions'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def gallery_images(self):
"""Instance depends on the API version:
* 2018-06-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2018_06_01.aio.operations.GalleryImagesOperations>`
* 2019-03-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2019_03_01.aio.operations.GalleryImagesOperations>`
* 2019-07-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2019_07_01.aio.operations.GalleryImagesOperations>`
* 2019-12-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2019_12_01.aio.operations.GalleryImagesOperations>`
* 2020-09-30: :class:`GalleryImagesOperations<azure.mgmt.compute.v2020_09_30.aio.operations.GalleryImagesOperations>`
* 2021-07-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.GalleryImagesOperations>`
* 2021-10-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2021_10_01.aio.operations.GalleryImagesOperations>`
* 2022-01-03: :class:`GalleryImagesOperations<azure.mgmt.compute.v2022_01_03.aio.operations.GalleryImagesOperations>`
* 2022-03-03: :class:`GalleryImagesOperations<azure.mgmt.compute.v2022_03_03.aio.operations.GalleryImagesOperations>`
"""
api_version = self._get_api_version('gallery_images')
if api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import GalleryImagesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import GalleryImagesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import GalleryImagesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import GalleryImagesOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import GalleryImagesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import GalleryImagesOperations as OperationClass
elif api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import GalleryImagesOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import GalleryImagesOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import GalleryImagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'gallery_images'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def gallery_sharing_profile(self):
"""Instance depends on the API version:
* 2020-09-30: :class:`GallerySharingProfileOperations<azure.mgmt.compute.v2020_09_30.aio.operations.GallerySharingProfileOperations>`
* 2021-07-01: :class:`GallerySharingProfileOperations<azure.mgmt.compute.v2021_07_01.aio.operations.GallerySharingProfileOperations>`
* 2021-10-01: :class:`GallerySharingProfileOperations<azure.mgmt.compute.v2021_10_01.aio.operations.GallerySharingProfileOperations>`
* 2022-01-03: :class:`GallerySharingProfileOperations<azure.mgmt.compute.v2022_01_03.aio.operations.GallerySharingProfileOperations>`
* 2022-03-03: :class:`GallerySharingProfileOperations<azure.mgmt.compute.v2022_03_03.aio.operations.GallerySharingProfileOperations>`
"""
api_version = self._get_api_version('gallery_sharing_profile')
if api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import GallerySharingProfileOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import GallerySharingProfileOperations as OperationClass
elif api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import GallerySharingProfileOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import GallerySharingProfileOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import GallerySharingProfileOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'gallery_sharing_profile'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def images(self):
"""Instance depends on the API version:
* 2016-04-30-preview: :class:`ImagesOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.ImagesOperations>`
* 2017-03-30: :class:`ImagesOperations<azure.mgmt.compute.v2017_03_30.aio.operations.ImagesOperations>`
* 2017-12-01: :class:`ImagesOperations<azure.mgmt.compute.v2017_12_01.aio.operations.ImagesOperations>`
* 2018-04-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_04_01.aio.operations.ImagesOperations>`
* 2018-06-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_06_01.aio.operations.ImagesOperations>`
* 2018-10-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_10_01.aio.operations.ImagesOperations>`
* 2019-03-01: :class:`ImagesOperations<azure.mgmt.compute.v2019_03_01.aio.operations.ImagesOperations>`
* 2019-07-01: :class:`ImagesOperations<azure.mgmt.compute.v2019_07_01.aio.operations.ImagesOperations>`
* 2019-12-01: :class:`ImagesOperations<azure.mgmt.compute.v2019_12_01.aio.operations.ImagesOperations>`
* 2020-06-01: :class:`ImagesOperations<azure.mgmt.compute.v2020_06_01.aio.operations.ImagesOperations>`
* 2020-12-01: :class:`ImagesOperations<azure.mgmt.compute.v2020_12_01.aio.operations.ImagesOperations>`
* 2021-03-01: :class:`ImagesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.ImagesOperations>`
* 2021-04-01: :class:`ImagesOperations<azure.mgmt.compute.v2021_04_01.aio.operations.ImagesOperations>`
* 2021-07-01: :class:`ImagesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.ImagesOperations>`
* 2021-11-01: :class:`ImagesOperations<azure.mgmt.compute.v2021_11_01.aio.operations.ImagesOperations>`
* 2022-03-01: :class:`ImagesOperations<azure.mgmt.compute.v2022_03_01.aio.operations.ImagesOperations>`
* 2022-08-01: :class:`ImagesOperations<azure.mgmt.compute.v2022_08_01.aio.operations.ImagesOperations>`
"""
api_version = self._get_api_version('images')
if api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import ImagesOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import ImagesOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import ImagesOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import ImagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'images'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def log_analytics(self):
"""Instance depends on the API version:
* 2017-12-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2017_12_01.aio.operations.LogAnalyticsOperations>`
* 2018-04-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.LogAnalyticsOperations>`
* 2018-06-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.LogAnalyticsOperations>`
* 2018-10-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2018_10_01.aio.operations.LogAnalyticsOperations>`
* 2019-03-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.LogAnalyticsOperations>`
* 2019-07-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.LogAnalyticsOperations>`
* 2019-12-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.LogAnalyticsOperations>`
* 2020-06-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.LogAnalyticsOperations>`
* 2020-12-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.LogAnalyticsOperations>`
* 2021-03-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.LogAnalyticsOperations>`
* 2021-04-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.LogAnalyticsOperations>`
* 2021-07-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.LogAnalyticsOperations>`
* 2021-11-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.LogAnalyticsOperations>`
* 2022-03-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.LogAnalyticsOperations>`
* 2022-08-01: :class:`LogAnalyticsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.LogAnalyticsOperations>`
"""
api_version = self._get_api_version('log_analytics')
if api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import LogAnalyticsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import LogAnalyticsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'log_analytics'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2017-12-01: :class:`Operations<azure.mgmt.compute.v2017_12_01.aio.operations.Operations>`
* 2018-04-01: :class:`Operations<azure.mgmt.compute.v2018_04_01.aio.operations.Operations>`
* 2018-06-01: :class:`Operations<azure.mgmt.compute.v2018_06_01.aio.operations.Operations>`
* 2018-10-01: :class:`Operations<azure.mgmt.compute.v2018_10_01.aio.operations.Operations>`
* 2019-03-01: :class:`Operations<azure.mgmt.compute.v2019_03_01.aio.operations.Operations>`
* 2019-07-01: :class:`Operations<azure.mgmt.compute.v2019_07_01.aio.operations.Operations>`
* 2019-12-01: :class:`Operations<azure.mgmt.compute.v2019_12_01.aio.operations.Operations>`
* 2020-06-01: :class:`Operations<azure.mgmt.compute.v2020_06_01.aio.operations.Operations>`
* 2020-12-01: :class:`Operations<azure.mgmt.compute.v2020_12_01.aio.operations.Operations>`
* 2021-03-01: :class:`Operations<azure.mgmt.compute.v2021_03_01.aio.operations.Operations>`
* 2021-04-01: :class:`Operations<azure.mgmt.compute.v2021_04_01.aio.operations.Operations>`
* 2021-07-01: :class:`Operations<azure.mgmt.compute.v2021_07_01.aio.operations.Operations>`
* 2021-11-01: :class:`Operations<azure.mgmt.compute.v2021_11_01.aio.operations.Operations>`
* 2022-03-01: :class:`Operations<azure.mgmt.compute.v2022_03_01.aio.operations.Operations>`
* 2022-08-01: :class:`Operations<azure.mgmt.compute.v2022_08_01.aio.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import Operations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import Operations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import Operations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import Operations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import Operations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import Operations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import Operations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import Operations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import Operations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import Operations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import Operations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import Operations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import Operations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import Operations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def proximity_placement_groups(self):
"""Instance depends on the API version:
* 2018-04-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2018-06-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2018-10-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2018_10_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2019-03-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2019-07-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2019-12-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2020-06-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2020-12-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2021-03-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2021-04-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2021-07-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2021-11-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2022-03-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.ProximityPlacementGroupsOperations>`
* 2022-08-01: :class:`ProximityPlacementGroupsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.ProximityPlacementGroupsOperations>`
"""
api_version = self._get_api_version('proximity_placement_groups')
if api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import ProximityPlacementGroupsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'proximity_placement_groups'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def resource_skus(self):
"""Instance depends on the API version:
* 2017-03-30: :class:`ResourceSkusOperations<azure.mgmt.compute.v2017_03_30.aio.operations.ResourceSkusOperations>`
* 2017-09-01: :class:`ResourceSkusOperations<azure.mgmt.compute.v2017_09_01.aio.operations.ResourceSkusOperations>`
* 2019-04-01: :class:`ResourceSkusOperations<azure.mgmt.compute.v2019_04_01.aio.operations.ResourceSkusOperations>`
* 2021-07-01: :class:`ResourceSkusOperations<azure.mgmt.compute.v2021_07_01.aio.operations.ResourceSkusOperations>`
"""
api_version = self._get_api_version('resource_skus')
if api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import ResourceSkusOperations as OperationClass
elif api_version == '2017-09-01':
from ..v2017_09_01.aio.operations import ResourceSkusOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations import ResourceSkusOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import ResourceSkusOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'resource_skus'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def restore_point_collections(self):
"""Instance depends on the API version:
* 2021-03-01: :class:`RestorePointCollectionsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.RestorePointCollectionsOperations>`
* 2021-04-01: :class:`RestorePointCollectionsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.RestorePointCollectionsOperations>`
* 2021-07-01: :class:`RestorePointCollectionsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.RestorePointCollectionsOperations>`
* 2021-11-01: :class:`RestorePointCollectionsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.RestorePointCollectionsOperations>`
* 2022-03-01: :class:`RestorePointCollectionsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.RestorePointCollectionsOperations>`
* 2022-08-01: :class:`RestorePointCollectionsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.RestorePointCollectionsOperations>`
"""
api_version = self._get_api_version('restore_point_collections')
if api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import RestorePointCollectionsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import RestorePointCollectionsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import RestorePointCollectionsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import RestorePointCollectionsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import RestorePointCollectionsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import RestorePointCollectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'restore_point_collections'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def restore_points(self):
"""Instance depends on the API version:
* 2021-03-01: :class:`RestorePointsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.RestorePointsOperations>`
* 2021-04-01: :class:`RestorePointsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.RestorePointsOperations>`
* 2021-07-01: :class:`RestorePointsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.RestorePointsOperations>`
* 2021-11-01: :class:`RestorePointsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.RestorePointsOperations>`
* 2022-03-01: :class:`RestorePointsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.RestorePointsOperations>`
* 2022-08-01: :class:`RestorePointsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.RestorePointsOperations>`
"""
api_version = self._get_api_version('restore_points')
if api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import RestorePointsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import RestorePointsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import RestorePointsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import RestorePointsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import RestorePointsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import RestorePointsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'restore_points'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def shared_galleries(self):
"""Instance depends on the API version:
* 2020-09-30: :class:`SharedGalleriesOperations<azure.mgmt.compute.v2020_09_30.aio.operations.SharedGalleriesOperations>`
* 2021-07-01: :class:`SharedGalleriesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.SharedGalleriesOperations>`
* 2022-01-03: :class:`SharedGalleriesOperations<azure.mgmt.compute.v2022_01_03.aio.operations.SharedGalleriesOperations>`
* 2022-03-03: :class:`SharedGalleriesOperations<azure.mgmt.compute.v2022_03_03.aio.operations.SharedGalleriesOperations>`
"""
api_version = self._get_api_version('shared_galleries')
if api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import SharedGalleriesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import SharedGalleriesOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import SharedGalleriesOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import SharedGalleriesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'shared_galleries'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def shared_gallery_image_versions(self):
"""Instance depends on the API version:
* 2020-09-30: :class:`SharedGalleryImageVersionsOperations<azure.mgmt.compute.v2020_09_30.aio.operations.SharedGalleryImageVersionsOperations>`
* 2021-07-01: :class:`SharedGalleryImageVersionsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.SharedGalleryImageVersionsOperations>`
* 2022-01-03: :class:`SharedGalleryImageVersionsOperations<azure.mgmt.compute.v2022_01_03.aio.operations.SharedGalleryImageVersionsOperations>`
* 2022-03-03: :class:`SharedGalleryImageVersionsOperations<azure.mgmt.compute.v2022_03_03.aio.operations.SharedGalleryImageVersionsOperations>`
"""
api_version = self._get_api_version('shared_gallery_image_versions')
if api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import SharedGalleryImageVersionsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import SharedGalleryImageVersionsOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import SharedGalleryImageVersionsOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import SharedGalleryImageVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'shared_gallery_image_versions'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def shared_gallery_images(self):
"""Instance depends on the API version:
* 2020-09-30: :class:`SharedGalleryImagesOperations<azure.mgmt.compute.v2020_09_30.aio.operations.SharedGalleryImagesOperations>`
* 2021-07-01: :class:`SharedGalleryImagesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.SharedGalleryImagesOperations>`
* 2022-01-03: :class:`SharedGalleryImagesOperations<azure.mgmt.compute.v2022_01_03.aio.operations.SharedGalleryImagesOperations>`
* 2022-03-03: :class:`SharedGalleryImagesOperations<azure.mgmt.compute.v2022_03_03.aio.operations.SharedGalleryImagesOperations>`
"""
api_version = self._get_api_version('shared_gallery_images')
if api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import SharedGalleryImagesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import SharedGalleryImagesOperations as OperationClass
elif api_version == '2022-01-03':
from ..v2022_01_03.aio.operations import SharedGalleryImagesOperations as OperationClass
elif api_version == '2022-03-03':
from ..v2022_03_03.aio.operations import SharedGalleryImagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'shared_gallery_images'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def snapshots(self):
"""Instance depends on the API version:
* 2016-04-30-preview: :class:`SnapshotsOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.SnapshotsOperations>`
* 2017-03-30: :class:`SnapshotsOperations<azure.mgmt.compute.v2017_03_30.aio.operations.SnapshotsOperations>`
* 2018-04-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.SnapshotsOperations>`
* 2018-06-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.SnapshotsOperations>`
* 2018-09-30: :class:`SnapshotsOperations<azure.mgmt.compute.v2018_09_30.aio.operations.SnapshotsOperations>`
* 2019-03-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.SnapshotsOperations>`
* 2019-07-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.SnapshotsOperations>`
* 2019-11-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2019_11_01.aio.operations.SnapshotsOperations>`
* 2020-05-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2020_05_01.aio.operations.SnapshotsOperations>`
* 2020-06-30: :class:`SnapshotsOperations<azure.mgmt.compute.v2020_06_30.aio.operations.SnapshotsOperations>`
* 2020-09-30: :class:`SnapshotsOperations<azure.mgmt.compute.v2020_09_30.aio.operations.SnapshotsOperations>`
* 2020-12-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.SnapshotsOperations>`
* 2021-04-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.SnapshotsOperations>`
* 2021-08-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2021_08_01.aio.operations.SnapshotsOperations>`
* 2021-12-01: :class:`SnapshotsOperations<azure.mgmt.compute.v2021_12_01.aio.operations.SnapshotsOperations>`
* 2022-03-02: :class:`SnapshotsOperations<azure.mgmt.compute.v2022_03_02.aio.operations.SnapshotsOperations>`
* 2022-07-02: :class:`SnapshotsOperations<azure.mgmt.compute.v2022_07_02.aio.operations.SnapshotsOperations>`
"""
api_version = self._get_api_version('snapshots')
if api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2018-09-30':
from ..v2018_09_30.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2020-05-01':
from ..v2020_05_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2020-06-30':
from ..v2020_06_30.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2020-09-30':
from ..v2020_09_30.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2021-08-01':
from ..v2021_08_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2022-03-02':
from ..v2022_03_02.aio.operations import SnapshotsOperations as OperationClass
elif api_version == '2022-07-02':
from ..v2022_07_02.aio.operations import SnapshotsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'snapshots'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def ssh_public_keys(self):
"""Instance depends on the API version:
* 2019-12-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2019_12_01.aio.operations.SshPublicKeysOperations>`
* 2020-06-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2020_06_01.aio.operations.SshPublicKeysOperations>`
* 2020-12-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2020_12_01.aio.operations.SshPublicKeysOperations>`
* 2021-03-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2021_03_01.aio.operations.SshPublicKeysOperations>`
* 2021-04-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2021_04_01.aio.operations.SshPublicKeysOperations>`
* 2021-07-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2021_07_01.aio.operations.SshPublicKeysOperations>`
* 2021-11-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2021_11_01.aio.operations.SshPublicKeysOperations>`
* 2022-03-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2022_03_01.aio.operations.SshPublicKeysOperations>`
* 2022-08-01: :class:`SshPublicKeysOperations<azure.mgmt.compute.v2022_08_01.aio.operations.SshPublicKeysOperations>`
"""
api_version = self._get_api_version('ssh_public_keys')
if api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import SshPublicKeysOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import SshPublicKeysOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import SshPublicKeysOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import SshPublicKeysOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import SshPublicKeysOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import SshPublicKeysOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import SshPublicKeysOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import SshPublicKeysOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import SshPublicKeysOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'ssh_public_keys'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def usage(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`UsageOperations<azure.mgmt.compute.v2015_06_15.aio.operations.UsageOperations>`
* 2016-03-30: :class:`UsageOperations<azure.mgmt.compute.v2016_03_30.aio.operations.UsageOperations>`
* 2016-04-30-preview: :class:`UsageOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.UsageOperations>`
* 2017-03-30: :class:`UsageOperations<azure.mgmt.compute.v2017_03_30.aio.operations.UsageOperations>`
* 2017-12-01: :class:`UsageOperations<azure.mgmt.compute.v2017_12_01.aio.operations.UsageOperations>`
* 2018-04-01: :class:`UsageOperations<azure.mgmt.compute.v2018_04_01.aio.operations.UsageOperations>`
* 2018-06-01: :class:`UsageOperations<azure.mgmt.compute.v2018_06_01.aio.operations.UsageOperations>`
* 2018-10-01: :class:`UsageOperations<azure.mgmt.compute.v2018_10_01.aio.operations.UsageOperations>`
* 2019-03-01: :class:`UsageOperations<azure.mgmt.compute.v2019_03_01.aio.operations.UsageOperations>`
* 2019-07-01: :class:`UsageOperations<azure.mgmt.compute.v2019_07_01.aio.operations.UsageOperations>`
* 2019-12-01: :class:`UsageOperations<azure.mgmt.compute.v2019_12_01.aio.operations.UsageOperations>`
* 2020-06-01: :class:`UsageOperations<azure.mgmt.compute.v2020_06_01.aio.operations.UsageOperations>`
* 2020-12-01: :class:`UsageOperations<azure.mgmt.compute.v2020_12_01.aio.operations.UsageOperations>`
* 2021-03-01: :class:`UsageOperations<azure.mgmt.compute.v2021_03_01.aio.operations.UsageOperations>`
* 2021-04-01: :class:`UsageOperations<azure.mgmt.compute.v2021_04_01.aio.operations.UsageOperations>`
* 2021-07-01: :class:`UsageOperations<azure.mgmt.compute.v2021_07_01.aio.operations.UsageOperations>`
* 2021-11-01: :class:`UsageOperations<azure.mgmt.compute.v2021_11_01.aio.operations.UsageOperations>`
* 2022-03-01: :class:`UsageOperations<azure.mgmt.compute.v2022_03_01.aio.operations.UsageOperations>`
* 2022-08-01: :class:`UsageOperations<azure.mgmt.compute.v2022_08_01.aio.operations.UsageOperations>`
"""
api_version = self._get_api_version('usage')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import UsageOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import UsageOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import UsageOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import UsageOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import UsageOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import UsageOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'usage'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_extension_images(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2015_06_15.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2016-03-30: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2016_03_30.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2016-04-30-preview: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2017-03-30: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2017-12-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2018-04-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2018-06-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2018-10-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2019-03-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2019-07-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2019-12-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2020-06-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2020-12-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2021-03-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2021-04-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2021-07-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2021-11-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2022-03-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineExtensionImagesOperations>`
* 2022-08-01: :class:`VirtualMachineExtensionImagesOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineExtensionImagesOperations>`
"""
api_version = self._get_api_version('virtual_machine_extension_images')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineExtensionImagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_extension_images'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_extensions(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2015_06_15.aio.operations.VirtualMachineExtensionsOperations>`
* 2016-03-30: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2016_03_30.aio.operations.VirtualMachineExtensionsOperations>`
* 2016-04-30-preview: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.VirtualMachineExtensionsOperations>`
* 2017-03-30: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineExtensionsOperations>`
* 2017-12-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2018-04-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2018-06-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2018-10-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2019-03-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2019-07-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2019-12-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2020-06-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2020-12-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2021-03-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2021-04-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2021-07-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2021-11-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2022-03-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineExtensionsOperations>`
* 2022-08-01: :class:`VirtualMachineExtensionsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineExtensionsOperations>`
"""
api_version = self._get_api_version('virtual_machine_extensions')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineExtensionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_extensions'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_images(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2015_06_15.aio.operations.VirtualMachineImagesOperations>`
* 2016-03-30: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2016_03_30.aio.operations.VirtualMachineImagesOperations>`
* 2016-04-30-preview: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.VirtualMachineImagesOperations>`
* 2017-03-30: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineImagesOperations>`
* 2017-12-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineImagesOperations>`
* 2018-04-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineImagesOperations>`
* 2018-06-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineImagesOperations>`
* 2018-10-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineImagesOperations>`
* 2019-03-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineImagesOperations>`
* 2019-07-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineImagesOperations>`
* 2019-12-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineImagesOperations>`
* 2020-06-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineImagesOperations>`
* 2020-12-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineImagesOperations>`
* 2021-03-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineImagesOperations>`
* 2021-04-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineImagesOperations>`
* 2021-07-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineImagesOperations>`
* 2021-11-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineImagesOperations>`
* 2022-03-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineImagesOperations>`
* 2022-08-01: :class:`VirtualMachineImagesOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineImagesOperations>`
"""
api_version = self._get_api_version('virtual_machine_images')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineImagesOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineImagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_images'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_images_edge_zone(self):
"""Instance depends on the API version:
* 2020-12-01: :class:`VirtualMachineImagesEdgeZoneOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineImagesEdgeZoneOperations>`
* 2021-03-01: :class:`VirtualMachineImagesEdgeZoneOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineImagesEdgeZoneOperations>`
* 2021-04-01: :class:`VirtualMachineImagesEdgeZoneOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineImagesEdgeZoneOperations>`
* 2021-07-01: :class:`VirtualMachineImagesEdgeZoneOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineImagesEdgeZoneOperations>`
* 2021-11-01: :class:`VirtualMachineImagesEdgeZoneOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineImagesEdgeZoneOperations>`
* 2022-03-01: :class:`VirtualMachineImagesEdgeZoneOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineImagesEdgeZoneOperations>`
* 2022-08-01: :class:`VirtualMachineImagesEdgeZoneOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineImagesEdgeZoneOperations>`
"""
api_version = self._get_api_version('virtual_machine_images_edge_zone')
if api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineImagesEdgeZoneOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineImagesEdgeZoneOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineImagesEdgeZoneOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineImagesEdgeZoneOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineImagesEdgeZoneOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineImagesEdgeZoneOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineImagesEdgeZoneOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_images_edge_zone'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_run_commands(self):
"""Instance depends on the API version:
* 2017-03-30: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineRunCommandsOperations>`
* 2017-12-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2018-04-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2018-06-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2018-10-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2019-03-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2019-07-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2019-12-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2020-06-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2020-12-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2021-03-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2021-04-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2021-07-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2021-11-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2022-03-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineRunCommandsOperations>`
* 2022-08-01: :class:`VirtualMachineRunCommandsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineRunCommandsOperations>`
"""
api_version = self._get_api_version('virtual_machine_run_commands')
if api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineRunCommandsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_run_commands'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_scale_set_extensions(self):
"""Instance depends on the API version:
* 2017-03-30: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2017-12-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2018-04-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2018-06-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2018-10-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2019-03-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2019-07-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2019-12-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2020-06-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2020-12-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2021-03-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2021-04-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2021-07-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2021-11-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2022-03-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
* 2022-08-01: :class:`VirtualMachineScaleSetExtensionsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineScaleSetExtensionsOperations>`
"""
api_version = self._get_api_version('virtual_machine_scale_set_extensions')
if api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineScaleSetExtensionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_scale_set_extensions'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_scale_set_rolling_upgrades(self):
"""Instance depends on the API version:
* 2017-03-30: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2017-12-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2018-04-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2018-06-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2018-10-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2019-03-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2019-07-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2019-12-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2020-06-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2020-12-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2021-03-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2021-04-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2021-07-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2021-11-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2022-03-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
* 2022-08-01: :class:`VirtualMachineScaleSetRollingUpgradesOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations>`
"""
api_version = self._get_api_version('virtual_machine_scale_set_rolling_upgrades')
if api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineScaleSetRollingUpgradesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_scale_set_rolling_upgrades'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_scale_set_vm_extensions(self):
"""Instance depends on the API version:
* 2019-07-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2019-12-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2020-06-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2020-12-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2021-03-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2021-04-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2021-07-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2021-11-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2022-03-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
* 2022-08-01: :class:`VirtualMachineScaleSetVMExtensionsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations>`
"""
api_version = self._get_api_version('virtual_machine_scale_set_vm_extensions')
if api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineScaleSetVMExtensionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_scale_set_vm_extensions'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_scale_set_vm_run_commands(self):
"""Instance depends on the API version:
* 2020-06-01: :class:`VirtualMachineScaleSetVMRunCommandsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineScaleSetVMRunCommandsOperations>`
* 2020-12-01: :class:`VirtualMachineScaleSetVMRunCommandsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineScaleSetVMRunCommandsOperations>`
* 2021-03-01: :class:`VirtualMachineScaleSetVMRunCommandsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineScaleSetVMRunCommandsOperations>`
* 2021-04-01: :class:`VirtualMachineScaleSetVMRunCommandsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineScaleSetVMRunCommandsOperations>`
* 2021-07-01: :class:`VirtualMachineScaleSetVMRunCommandsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineScaleSetVMRunCommandsOperations>`
* 2021-11-01: :class:`VirtualMachineScaleSetVMRunCommandsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineScaleSetVMRunCommandsOperations>`
* 2022-03-01: :class:`VirtualMachineScaleSetVMRunCommandsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineScaleSetVMRunCommandsOperations>`
* 2022-08-01: :class:`VirtualMachineScaleSetVMRunCommandsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineScaleSetVMRunCommandsOperations>`
"""
api_version = self._get_api_version('virtual_machine_scale_set_vm_run_commands')
if api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineScaleSetVMRunCommandsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineScaleSetVMRunCommandsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineScaleSetVMRunCommandsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineScaleSetVMRunCommandsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineScaleSetVMRunCommandsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineScaleSetVMRunCommandsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineScaleSetVMRunCommandsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineScaleSetVMRunCommandsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_scale_set_vm_run_commands'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_scale_set_vms(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2015_06_15.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2016-03-30: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2016_03_30.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2016-04-30-preview: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2017-03-30: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2017-12-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2018-04-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2018-06-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2018-10-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2019-03-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2019-07-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2019-12-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2020-06-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2020-12-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2021-03-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2021-04-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2021-07-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2021-11-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2022-03-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
* 2022-08-01: :class:`VirtualMachineScaleSetVMsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineScaleSetVMsOperations>`
"""
api_version = self._get_api_version('virtual_machine_scale_set_vms')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineScaleSetVMsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_scale_set_vms'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_scale_sets(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2015_06_15.aio.operations.VirtualMachineScaleSetsOperations>`
* 2016-03-30: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2016_03_30.aio.operations.VirtualMachineScaleSetsOperations>`
* 2016-04-30-preview: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.VirtualMachineScaleSetsOperations>`
* 2017-03-30: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineScaleSetsOperations>`
* 2017-12-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2018-04-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2018-06-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2018-10-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2019-03-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2019-07-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2019-12-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2020-06-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2020-12-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2021-03-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2021-04-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2021-07-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2021-11-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2022-03-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineScaleSetsOperations>`
* 2022-08-01: :class:`VirtualMachineScaleSetsOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineScaleSetsOperations>`
"""
api_version = self._get_api_version('virtual_machine_scale_sets')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineScaleSetsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_scale_sets'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_sizes(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2015_06_15.aio.operations.VirtualMachineSizesOperations>`
* 2016-03-30: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2016_03_30.aio.operations.VirtualMachineSizesOperations>`
* 2016-04-30-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.VirtualMachineSizesOperations>`
* 2017-03-30: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachineSizesOperations>`
* 2017-12-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachineSizesOperations>`
* 2018-04-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachineSizesOperations>`
* 2018-06-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachineSizesOperations>`
* 2018-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachineSizesOperations>`
* 2019-03-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachineSizesOperations>`
* 2019-07-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineSizesOperations>`
* 2019-12-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachineSizesOperations>`
* 2020-06-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachineSizesOperations>`
* 2020-12-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachineSizesOperations>`
* 2021-03-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachineSizesOperations>`
* 2021-04-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachineSizesOperations>`
* 2021-07-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachineSizesOperations>`
* 2021-11-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachineSizesOperations>`
* 2022-03-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachineSizesOperations>`
* 2022-08-01: :class:`VirtualMachineSizesOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachineSizesOperations>`
"""
api_version = self._get_api_version('virtual_machine_sizes')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachineSizesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_sizes'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machines(self):
"""Instance depends on the API version:
* 2015-06-15: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2015_06_15.aio.operations.VirtualMachinesOperations>`
* 2016-03-30: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2016_03_30.aio.operations.VirtualMachinesOperations>`
* 2016-04-30-preview: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2016_04_30_preview.aio.operations.VirtualMachinesOperations>`
* 2017-03-30: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2017_03_30.aio.operations.VirtualMachinesOperations>`
* 2017-12-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2017_12_01.aio.operations.VirtualMachinesOperations>`
* 2018-04-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2018_04_01.aio.operations.VirtualMachinesOperations>`
* 2018-06-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2018_06_01.aio.operations.VirtualMachinesOperations>`
* 2018-10-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2018_10_01.aio.operations.VirtualMachinesOperations>`
* 2019-03-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2019_03_01.aio.operations.VirtualMachinesOperations>`
* 2019-07-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachinesOperations>`
* 2019-12-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2019_12_01.aio.operations.VirtualMachinesOperations>`
* 2020-06-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2020_06_01.aio.operations.VirtualMachinesOperations>`
* 2020-12-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2020_12_01.aio.operations.VirtualMachinesOperations>`
* 2021-03-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2021_03_01.aio.operations.VirtualMachinesOperations>`
* 2021-04-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2021_04_01.aio.operations.VirtualMachinesOperations>`
* 2021-07-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2021_07_01.aio.operations.VirtualMachinesOperations>`
* 2021-11-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2021_11_01.aio.operations.VirtualMachinesOperations>`
* 2022-03-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2022_03_01.aio.operations.VirtualMachinesOperations>`
* 2022-08-01: :class:`VirtualMachinesOperations<azure.mgmt.compute.v2022_08_01.aio.operations.VirtualMachinesOperations>`
"""
api_version = self._get_api_version('virtual_machines')
if api_version == '2015-06-15':
from ..v2015_06_15.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2016-03-30':
from ..v2016_03_30.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2016-04-30-preview':
from ..v2016_04_30_preview.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2017-03-30':
from ..v2017_03_30.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2017-12-01':
from ..v2017_12_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2020-12-01':
from ..v2020_12_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2021-03-01':
from ..v2021_03_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2021-07-01':
from ..v2021_07_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2021-11-01':
from ..v2021_11_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2022-03-01':
from ..v2022_03_01.aio.operations import VirtualMachinesOperations as OperationClass
elif api_version == '2022-08-01':
from ..v2022_08_01.aio.operations import VirtualMachinesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machines'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
async def close(self):
await self._client.close()
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details):
await self._client.__aexit__(*exc_details)
|
{
"content_hash": "c56dbc429b4785f2e2142a144a9dde5e",
"timestamp": "",
"source": "github",
"line_count": 2307,
"max_line_length": 182,
"avg_line_length": 78.42045947117468,
"alnum_prop": 0.7142541289880386,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5eddff3dd99e78b1ab91bff9d406b00abba39e4f",
"size": "181390",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/aio/_compute_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import io
import logging
import socket
import sys
import time
import typing
import warnings
from test import LONG_TIMEOUT, SHORT_TIMEOUT
from threading import Event
from unittest import mock
from urllib.parse import urlencode
import pytest
from dummyserver.server import HAS_IPV6_AND_DNS, NoIPv6Warning
from dummyserver.testcase import HTTPDummyServerTestCase, SocketDummyServerTestCase
from urllib3 import HTTPConnectionPool, encode_multipart_formdata
from urllib3._collections import HTTPHeaderDict
from urllib3.connection import _get_default_user_agent
from urllib3.exceptions import (
ConnectTimeoutError,
DecodeError,
EmptyPoolError,
MaxRetryError,
NameResolutionError,
NewConnectionError,
ReadTimeoutError,
UnrewindableBodyError,
)
from urllib3.fields import _TYPE_FIELD_VALUE_TUPLE
from urllib3.util import SKIP_HEADER, SKIPPABLE_HEADERS
from urllib3.util.retry import RequestHistory, Retry
from urllib3.util.timeout import _TYPE_TIMEOUT, Timeout
from .. import INVALID_SOURCE_ADDRESSES, TARPIT_HOST, VALID_SOURCE_ADDRESSES
from ..port_helpers import find_unused_port
pytestmark = pytest.mark.flaky
log = logging.getLogger("urllib3.connectionpool")
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
def wait_for_socket(ready_event: Event) -> None:
ready_event.wait()
ready_event.clear()
class TestConnectionPoolTimeouts(SocketDummyServerTestCase):
def test_timeout_float(self) -> None:
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=2)
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
wait_for_socket(ready_event)
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/", timeout=SHORT_TIMEOUT)
block_event.set() # Release block
# Shouldn't raise this time
wait_for_socket(ready_event)
block_event.set() # Pre-release block
pool.request("GET", "/", timeout=LONG_TIMEOUT)
def test_conn_closed(self) -> None:
block_event = Event()
self.start_basic_handler(block_send=block_event, num=1)
with HTTPConnectionPool(
self.host, self.port, timeout=SHORT_TIMEOUT, retries=False
) as pool:
conn = pool._get_conn()
pool._put_conn(conn)
try:
with pytest.raises(ReadTimeoutError):
pool.urlopen("GET", "/")
if not conn.is_closed:
with pytest.raises(socket.error):
conn.sock.recv(1024) # type: ignore[attr-defined]
finally:
pool._put_conn(conn)
block_event.set()
def test_timeout(self) -> None:
# Requests should time out when expected
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=3)
# Pool-global timeout
short_timeout = Timeout(read=SHORT_TIMEOUT)
with HTTPConnectionPool(
self.host, self.port, timeout=short_timeout, retries=False
) as pool:
wait_for_socket(ready_event)
block_event.clear()
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/")
block_event.set() # Release request
# Request-specific timeouts should raise errors
with HTTPConnectionPool(
self.host, self.port, timeout=short_timeout, retries=False
) as pool:
wait_for_socket(ready_event)
now = time.time()
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/", timeout=LONG_TIMEOUT)
delta = time.time() - now
message = "timeout was pool-level SHORT_TIMEOUT rather than request-level LONG_TIMEOUT"
assert delta >= LONG_TIMEOUT, message
block_event.set() # Release request
# Timeout passed directly to request should raise a request timeout
wait_for_socket(ready_event)
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/", timeout=SHORT_TIMEOUT)
block_event.set() # Release request
def test_connect_timeout(self) -> None:
url = "/"
host, port = TARPIT_HOST, 80
timeout = Timeout(connect=SHORT_TIMEOUT)
# Pool-global timeout
with HTTPConnectionPool(host, port, timeout=timeout) as pool:
conn = pool._get_conn()
with pytest.raises(ConnectTimeoutError):
pool._make_request(conn, "GET", url)
# Retries
retries = Retry(connect=0)
with pytest.raises(MaxRetryError):
pool.request("GET", url, retries=retries)
# Request-specific connection timeouts
big_timeout = Timeout(read=LONG_TIMEOUT, connect=LONG_TIMEOUT)
with HTTPConnectionPool(host, port, timeout=big_timeout, retries=False) as pool:
conn = pool._get_conn()
with pytest.raises(ConnectTimeoutError):
pool._make_request(conn, "GET", url, timeout=timeout)
pool._put_conn(conn)
with pytest.raises(ConnectTimeoutError):
pool.request("GET", url, timeout=timeout)
def test_total_applies_connect(self) -> None:
host, port = TARPIT_HOST, 80
timeout = Timeout(total=None, connect=SHORT_TIMEOUT)
with HTTPConnectionPool(host, port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
with pytest.raises(ConnectTimeoutError):
pool._make_request(conn, "GET", "/")
finally:
conn.close()
timeout = Timeout(connect=3, read=5, total=SHORT_TIMEOUT)
with HTTPConnectionPool(host, port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
with pytest.raises(ConnectTimeoutError):
pool._make_request(conn, "GET", "/")
finally:
conn.close()
def test_total_timeout(self) -> None:
block_event = Event()
ready_event = self.start_basic_handler(block_send=block_event, num=2)
wait_for_socket(ready_event)
# This will get the socket to raise an EAGAIN on the read
timeout = Timeout(connect=3, read=SHORT_TIMEOUT)
with HTTPConnectionPool(
self.host, self.port, timeout=timeout, retries=False
) as pool:
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/")
block_event.set()
wait_for_socket(ready_event)
block_event.clear()
# The connect should succeed and this should hit the read timeout
timeout = Timeout(connect=3, read=5, total=SHORT_TIMEOUT)
with HTTPConnectionPool(
self.host, self.port, timeout=timeout, retries=False
) as pool:
with pytest.raises(ReadTimeoutError):
pool.request("GET", "/")
def test_create_connection_timeout(self) -> None:
self.start_basic_handler(block_send=Event(), num=0) # needed for self.port
timeout = Timeout(connect=SHORT_TIMEOUT, total=LONG_TIMEOUT)
with HTTPConnectionPool(
TARPIT_HOST, self.port, timeout=timeout, retries=False
) as pool:
conn = pool._new_conn()
with pytest.raises(ConnectTimeoutError):
conn.connect()
class TestConnectionPool(HTTPDummyServerTestCase):
def test_get(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/specific_method", fields={"method": "GET"})
assert r.status == 200, r.data
def test_post_url(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/specific_method", fields={"method": "POST"})
assert r.status == 200, r.data
def test_urlopen_put(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.urlopen("PUT", "/specific_method?method=PUT")
assert r.status == 200, r.data
def test_wrong_specific_method(self) -> None:
# To make sure the dummy server is actually returning failed responses
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/specific_method", fields={"method": "POST"})
assert r.status == 400, r.data
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/specific_method", fields={"method": "GET"})
assert r.status == 400, r.data
def test_upload(self) -> None:
data = "I'm in ur multipart form-data, hazing a cheezburgr"
fields: dict[str, _TYPE_FIELD_VALUE_TUPLE] = {
"upload_param": "filefield",
"upload_filename": "lolcat.txt",
"filefield": ("lolcat.txt", data),
}
fields["upload_size"] = len(data) # type: ignore
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/upload", fields=fields)
assert r.status == 200, r.data
def test_one_name_multiple_values(self) -> None:
fields = [("foo", "a"), ("foo", "b")]
with HTTPConnectionPool(self.host, self.port) as pool:
# urlencode
r = pool.request("GET", "/echo", fields=fields)
assert r.data == b"foo=a&foo=b"
# multipart
r = pool.request("POST", "/echo", fields=fields)
assert r.data.count(b'name="foo"') == 2
def test_request_method_body(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
body = b"hi"
r = pool.request("POST", "/echo", body=body)
assert r.data == body
fields = [("hi", "hello")]
with pytest.raises(TypeError):
pool.request("POST", "/echo", body=body, fields=fields)
def test_unicode_upload(self) -> None:
fieldname = "myfile"
filename = "\xe2\x99\xa5.txt"
data = "\xe2\x99\xa5".encode()
size = len(data)
fields: dict[str, _TYPE_FIELD_VALUE_TUPLE] = {
"upload_param": fieldname,
"upload_filename": filename,
fieldname: (filename, data),
}
fields["upload_size"] = size # type: ignore
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("POST", "/upload", fields=fields)
assert r.status == 200, r.data
def test_nagle(self) -> None:
"""Test that connections have TCP_NODELAY turned on"""
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(self.host, self.port) as pool:
conn = pool._get_conn()
try:
pool._make_request(conn, "GET", "/")
tcp_nodelay_setting = conn.sock.getsockopt( # type: ignore[attr-defined]
socket.IPPROTO_TCP, socket.TCP_NODELAY
)
assert tcp_nodelay_setting
finally:
conn.close()
@pytest.mark.parametrize(
"socket_options",
[
[(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)],
((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),),
],
)
def test_socket_options(self, socket_options: tuple[int, int, int]) -> None:
"""Test that connections accept socket options."""
# This test needs to be here in order to be run. socket.create_connection actually tries to
# connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(
self.host,
self.port,
socket_options=socket_options,
) as pool:
# Get the socket of a new connection.
s = pool._new_conn()._new_conn() # type: ignore[attr-defined]
try:
using_keepalive = (
s.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) > 0
)
assert using_keepalive
finally:
s.close()
@pytest.mark.parametrize("socket_options", [None, []])
def test_disable_default_socket_options(
self, socket_options: list[int] | None
) -> None:
"""Test that passing None or empty list disables all socket options."""
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(
self.host, self.port, socket_options=socket_options
) as pool:
s = pool._new_conn()._new_conn() # type: ignore[attr-defined]
try:
using_nagle = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) == 0
assert using_nagle
finally:
s.close()
def test_defaults_are_applied(self) -> None:
"""Test that modifying the default socket options works."""
# This test needs to be here in order to be run. socket.create_connection actually tries
# to connect to the host provided so we need a dummyserver to be running.
with HTTPConnectionPool(self.host, self.port) as pool:
# Get the HTTPConnection instance
conn = pool._new_conn()
try:
# Update the default socket options
assert conn.socket_options is not None
conn.socket_options += [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)] # type: ignore[operator]
s = conn._new_conn() # type: ignore[attr-defined]
nagle_disabled = (
s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) > 0
)
using_keepalive = (
s.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) > 0
)
assert nagle_disabled
assert using_keepalive
finally:
conn.close()
s.close()
def test_connection_error_retries(self) -> None:
"""ECONNREFUSED error should raise a connection error, with retries"""
port = find_unused_port()
with HTTPConnectionPool(self.host, port) as pool:
with pytest.raises(MaxRetryError) as e:
pool.request("GET", "/", retries=Retry(connect=3))
assert type(e.value.reason) == NewConnectionError
def test_timeout_success(self) -> None:
timeout = Timeout(connect=3, read=5, total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
pool.request("GET", "/")
# This should not raise a "Timeout already started" error
pool.request("GET", "/")
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
# This should also not raise a "Timeout already started" error
pool.request("GET", "/")
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
pool.request("GET", "/")
socket_timeout_reuse_testdata = pytest.mark.parametrize(
["timeout", "expect_settimeout_calls"],
[
(1, (1, 1)),
(None, (None, None)),
(Timeout(read=4), (None, 4)),
(Timeout(read=4, connect=5), (5, 4)),
(Timeout(connect=6), (6, None)),
],
)
@socket_timeout_reuse_testdata
def test_socket_timeout_updated_on_reuse_constructor(
self,
timeout: _TYPE_TIMEOUT,
expect_settimeout_calls: typing.Sequence[float | None],
) -> None:
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
# Make a request to create a new connection.
pool.urlopen("GET", "/")
# Grab the connection and mock the inner socket.
assert pool.pool is not None
conn = pool.pool.get_nowait()
conn_sock = mock.Mock(wraps=conn.sock)
conn.sock = conn_sock
pool._put_conn(conn)
# Assert that sock.settimeout() is called with the new connect timeout, then the read timeout.
pool.urlopen("GET", "/", timeout=timeout)
conn_sock.settimeout.assert_has_calls(
[mock.call(x) for x in expect_settimeout_calls]
)
@socket_timeout_reuse_testdata
def test_socket_timeout_updated_on_reuse_parameter(
self,
timeout: _TYPE_TIMEOUT,
expect_settimeout_calls: typing.Sequence[float | None],
) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
# Make a request to create a new connection.
pool.urlopen("GET", "/", timeout=LONG_TIMEOUT)
# Grab the connection and mock the inner socket.
assert pool.pool is not None
conn = pool.pool.get_nowait()
conn_sock = mock.Mock(wraps=conn.sock)
conn.sock = conn_sock
pool._put_conn(conn)
# Assert that sock.settimeout() is called with the new connect timeout, then the read timeout.
pool.urlopen("GET", "/", timeout=timeout)
conn_sock.settimeout.assert_has_calls(
[mock.call(x) for x in expect_settimeout_calls]
)
def test_tunnel(self) -> None:
# note the actual httplib.py has no tests for this functionality
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
conn.set_tunnel(self.host, self.port)
with mock.patch.object(
conn, "_tunnel", create=True, return_value=None
) as conn_tunnel:
pool._make_request(conn, "GET", "/")
conn_tunnel.assert_called_once_with()
finally:
conn.close()
# test that it's not called when tunnel is not set
timeout = Timeout(total=None)
with HTTPConnectionPool(self.host, self.port, timeout=timeout) as pool:
conn = pool._get_conn()
try:
with mock.patch.object(
conn, "_tunnel", create=True, return_value=None
) as conn_tunnel:
pool._make_request(conn, "GET", "/")
assert not conn_tunnel.called
finally:
conn.close()
def test_redirect(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/redirect", fields={"target": "/"}, redirect=False)
assert r.status == 303
r = pool.request("GET", "/redirect", fields={"target": "/"})
assert r.status == 200
assert r.data == b"Dummy server!"
def test_bad_connect(self) -> None:
with HTTPConnectionPool("badhost.invalid", self.port) as pool:
with pytest.raises(MaxRetryError) as e:
pool.request("GET", "/", retries=5)
assert type(e.value.reason) == NameResolutionError
def test_keepalive(self) -> None:
with HTTPConnectionPool(self.host, self.port, block=True, maxsize=1) as pool:
r = pool.request("GET", "/keepalive?close=0")
r = pool.request("GET", "/keepalive?close=0")
assert r.status == 200
assert pool.num_connections == 1
assert pool.num_requests == 2
def test_keepalive_close(self) -> None:
with HTTPConnectionPool(
self.host, self.port, block=True, maxsize=1, timeout=2
) as pool:
r = pool.request(
"GET", "/keepalive?close=1", retries=0, headers={"Connection": "close"}
)
assert pool.num_connections == 1
# The dummyserver will have responded with Connection:close,
# and httplib will properly cleanup the socket.
# We grab the HTTPConnection object straight from the Queue,
# because _get_conn() is where the check & reset occurs
assert pool.pool is not None
conn = pool.pool.get()
assert conn.sock is None
pool._put_conn(conn)
# Now with keep-alive
r = pool.request(
"GET",
"/keepalive?close=0",
retries=0,
headers={"Connection": "keep-alive"},
)
# The dummyserver responded with Connection:keep-alive, the connection
# persists.
conn = pool.pool.get()
assert conn.sock is not None
pool._put_conn(conn)
# Another request asking the server to close the connection. This one
# should get cleaned up for the next request.
r = pool.request(
"GET", "/keepalive?close=1", retries=0, headers={"Connection": "close"}
)
assert r.status == 200
conn = pool.pool.get()
assert conn.sock is None
pool._put_conn(conn)
# Next request
r = pool.request("GET", "/keepalive?close=0")
def test_post_with_urlencode(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"banana": "hammock", "lol": "cat"}
r = pool.request("POST", "/echo", fields=data, encode_multipart=False)
assert r.data.decode("utf-8") == urlencode(data)
def test_post_with_multipart(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"banana": "hammock", "lol": "cat"}
r = pool.request("POST", "/echo", fields=data, encode_multipart=True)
body = r.data.split(b"\r\n")
encoded_data = encode_multipart_formdata(data)[0]
expected_body = encoded_data.split(b"\r\n")
# TODO: Get rid of extra parsing stuff when you can specify
# a custom boundary to encode_multipart_formdata
"""
We need to loop the return lines because a timestamp is attached
from within encode_multipart_formdata. When the server echos back
the data, it has the timestamp from when the data was encoded, which
is not equivalent to when we run encode_multipart_formdata on
the data again.
"""
for i, line in enumerate(body):
if line.startswith(b"--"):
continue
assert body[i] == expected_body[i]
def test_post_with_multipart__iter__(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
data = {"hello": "world"}
r = pool.request(
"POST",
"/echo",
fields=data,
preload_content=False,
multipart_boundary="boundary",
encode_multipart=True,
)
chunks = [chunk for chunk in r]
assert chunks == [
b"--boundary\r\n",
b'Content-Disposition: form-data; name="hello"\r\n',
b"\r\n",
b"world\r\n",
b"--boundary--\r\n",
]
def test_check_gzip(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET", "/encodingrequest", headers={"accept-encoding": "gzip"}
)
assert r.headers.get("content-encoding") == "gzip"
assert r.data == b"hello, world!"
def test_check_deflate(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET", "/encodingrequest", headers={"accept-encoding": "deflate"}
)
assert r.headers.get("content-encoding") == "deflate"
assert r.data == b"hello, world!"
def test_bad_decode(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
with pytest.raises(DecodeError):
pool.request(
"GET",
"/encodingrequest",
headers={"accept-encoding": "garbage-deflate"},
)
with pytest.raises(DecodeError):
pool.request(
"GET",
"/encodingrequest",
headers={"accept-encoding": "garbage-gzip"},
)
def test_connection_count(self) -> None:
with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool:
pool.request("GET", "/")
pool.request("GET", "/")
pool.request("GET", "/")
assert pool.num_connections == 1
assert pool.num_requests == 3
def test_connection_count_bigpool(self) -> None:
with HTTPConnectionPool(self.host, self.port, maxsize=16) as http_pool:
http_pool.request("GET", "/")
http_pool.request("GET", "/")
http_pool.request("GET", "/")
assert http_pool.num_connections == 1
assert http_pool.num_requests == 3
def test_partial_response(self) -> None:
with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool:
req_data = {"lol": "cat"}
resp_data = urlencode(req_data).encode("utf-8")
r = pool.request("GET", "/echo", fields=req_data, preload_content=False)
assert r.read(5) == resp_data[:5]
assert r.read() == resp_data[5:]
def test_lazy_load_twice(self) -> None:
# This test is sad and confusing. Need to figure out what's
# going on with partial reads and socket reuse.
with HTTPConnectionPool(
self.host, self.port, block=True, maxsize=1, timeout=2
) as pool:
payload_size = 1024 * 2
first_chunk = 512
boundary = "foo"
req_data = {"count": "a" * payload_size}
resp_data = encode_multipart_formdata(req_data, boundary=boundary)[0]
req2_data = {"count": "b" * payload_size}
resp2_data = encode_multipart_formdata(req2_data, boundary=boundary)[0]
r1 = pool.request(
"POST",
"/echo",
fields=req_data,
multipart_boundary=boundary,
preload_content=False,
)
assert r1.read(first_chunk) == resp_data[:first_chunk]
try:
r2 = pool.request(
"POST",
"/echo",
fields=req2_data,
multipart_boundary=boundary,
preload_content=False,
pool_timeout=0.001,
)
# This branch should generally bail here, but maybe someday it will
# work? Perhaps by some sort of magic. Consider it a TODO.
assert r2.read(first_chunk) == resp2_data[:first_chunk]
assert r1.read() == resp_data[first_chunk:]
assert r2.read() == resp2_data[first_chunk:]
assert pool.num_requests == 2
except EmptyPoolError:
assert r1.read() == resp_data[first_chunk:]
assert pool.num_requests == 1
assert pool.num_connections == 1
def test_for_double_release(self) -> None:
MAXSIZE = 5
# Check default state
with HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE) as pool:
assert pool.num_connections == 0
assert pool.pool is not None
assert pool.pool.qsize() == MAXSIZE
# Make an empty slot for testing
pool.pool.get()
assert pool.pool.qsize() == MAXSIZE - 1
# Check state after simple request
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 1
# Check state without release
pool.urlopen("GET", "/", preload_content=False)
assert pool.pool.qsize() == MAXSIZE - 2
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 2
# Check state after read
pool.urlopen("GET", "/").data
assert pool.pool.qsize() == MAXSIZE - 2
pool.urlopen("GET", "/")
assert pool.pool.qsize() == MAXSIZE - 2
def test_release_conn_parameter(self) -> None:
MAXSIZE = 5
with HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE) as pool:
assert pool.pool is not None
assert pool.pool.qsize() == MAXSIZE
# Make request without releasing connection
pool.request("GET", "/", release_conn=False, preload_content=False)
assert pool.pool.qsize() == MAXSIZE - 1
def test_dns_error(self) -> None:
with HTTPConnectionPool(
"thishostdoesnotexist.invalid", self.port, timeout=0.001
) as pool:
with pytest.raises(MaxRetryError):
pool.request("GET", "/test", retries=2)
@pytest.mark.parametrize("char", [" ", "\r", "\n", "\x00"])
def test_invalid_method_not_allowed(self, char: str) -> None:
with pytest.raises(ValueError):
with HTTPConnectionPool(self.host, self.port) as pool:
pool.request("GET" + char, "/")
def test_percent_encode_invalid_target_chars(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/echo_params?q=\r&k=\n \n")
assert r.data == b"[('k', '\\n \\n'), ('q', '\\r')]"
def test_source_address(self) -> None:
for addr, is_ipv6 in VALID_SOURCE_ADDRESSES:
if is_ipv6 and not HAS_IPV6_AND_DNS:
warnings.warn("No IPv6 support: skipping.", NoIPv6Warning)
continue
with HTTPConnectionPool(
self.host, self.port, source_address=addr, retries=False
) as pool:
r = pool.request("GET", "/source_address")
assert r.data == addr[0].encode()
@pytest.mark.parametrize(
"invalid_source_address, is_ipv6", INVALID_SOURCE_ADDRESSES
)
def test_source_address_error(
self, invalid_source_address: tuple[str, int], is_ipv6: bool
) -> None:
with HTTPConnectionPool(
self.host, self.port, source_address=invalid_source_address, retries=False
) as pool:
if is_ipv6:
with pytest.raises(NameResolutionError):
pool.request("GET", f"/source_address?{invalid_source_address}")
else:
with pytest.raises(NewConnectionError):
pool.request("GET", f"/source_address?{invalid_source_address}")
def test_stream_keepalive(self) -> None:
x = 2
with HTTPConnectionPool(self.host, self.port) as pool:
for _ in range(x):
response = pool.request(
"GET",
"/chunked",
headers={"Connection": "keep-alive"},
preload_content=False,
retries=False,
)
for chunk in response.stream():
assert chunk == b"123"
assert pool.num_connections == 1
assert pool.num_requests == x
def test_read_chunked_short_circuit(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request("GET", "/chunked", preload_content=False)
response.read()
with pytest.raises(StopIteration):
next(response.read_chunked())
def test_read_chunked_on_closed_response(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request("GET", "/chunked", preload_content=False)
response.close()
with pytest.raises(StopIteration):
next(response.read_chunked())
def test_chunked_gzip(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request(
"GET", "/chunked_gzip", preload_content=False, decode_content=True
)
assert b"123" * 4 == response.read()
def test_cleanup_on_connection_error(self) -> None:
"""
Test that connections are recycled to the pool on
connection errors where no http response is received.
"""
poolsize = 3
with HTTPConnectionPool(
self.host, self.port, maxsize=poolsize, block=True
) as http:
assert http.pool is not None
assert http.pool.qsize() == poolsize
# force a connection error by supplying a non-existent
# url. We won't get a response for this and so the
# conn won't be implicitly returned to the pool.
with pytest.raises(MaxRetryError):
http.request(
"GET",
"/redirect",
fields={"target": "/"},
release_conn=False,
retries=0,
)
r = http.request(
"GET",
"/redirect",
fields={"target": "/"},
release_conn=False,
retries=1,
)
r.release_conn()
# the pool should still contain poolsize elements
assert http.pool.qsize() == http.pool.maxsize
def test_mixed_case_hostname(self) -> None:
with HTTPConnectionPool("LoCaLhOsT", self.port) as pool:
response = pool.request("GET", f"http://LoCaLhOsT:{self.port}/")
assert response.status == 200
def test_preserves_path_dot_segments(self) -> None:
"""ConnectionPool preserves dot segments in the URI"""
with HTTPConnectionPool(self.host, self.port) as pool:
response = pool.request("GET", "/echo_uri/seg0/../seg2")
assert response.data == b"/echo_uri/seg0/../seg2"
def test_default_user_agent_header(self) -> None:
"""ConnectionPool has a default user agent"""
default_ua = _get_default_user_agent()
custom_ua = "I'm not a web scraper, what are you talking about?"
custom_ua2 = "Yet Another User Agent"
with HTTPConnectionPool(self.host, self.port) as pool:
# Use default user agent if no user agent was specified.
r = pool.request("GET", "/headers")
request_headers = r.json()
assert request_headers.get("User-Agent") == _get_default_user_agent()
# Prefer the request user agent over the default.
headers = {"UsEr-AGENt": custom_ua}
r = pool.request("GET", "/headers", headers=headers)
request_headers = r.json()
assert request_headers.get("User-Agent") == custom_ua
# Do not modify pool headers when using the default user agent.
pool_headers = {"foo": "bar"}
pool.headers = pool_headers
r = pool.request("GET", "/headers")
request_headers = r.json()
assert request_headers.get("User-Agent") == default_ua
assert "User-Agent" not in pool_headers
pool.headers.update({"User-Agent": custom_ua2})
r = pool.request("GET", "/headers")
request_headers = r.json()
assert request_headers.get("User-Agent") == custom_ua2
@pytest.mark.parametrize(
"headers",
[
None,
{},
{"User-Agent": "key"},
{"user-agent": "key"},
{b"uSeR-AgEnT": b"key"},
{b"user-agent": "key"},
],
)
@pytest.mark.parametrize("chunked", [True, False])
def test_user_agent_header_not_sent_twice(
self, headers: dict[str, str] | None, chunked: bool
) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/headers", headers=headers, chunked=chunked)
request_headers = r.json()
if not headers:
assert request_headers["User-Agent"].startswith("python-urllib3/")
assert "key" not in request_headers["User-Agent"]
else:
assert request_headers["User-Agent"] == "key"
def test_no_user_agent_header(self) -> None:
"""ConnectionPool can suppress sending a user agent header"""
custom_ua = "I'm not a web scraper, what are you talking about?"
with HTTPConnectionPool(self.host, self.port) as pool:
# Suppress user agent in the request headers.
no_ua_headers = {"User-Agent": SKIP_HEADER}
r = pool.request("GET", "/headers", headers=no_ua_headers)
request_headers = r.json()
assert "User-Agent" not in request_headers
assert no_ua_headers["User-Agent"] == SKIP_HEADER
# Suppress user agent in the pool headers.
pool.headers = no_ua_headers
r = pool.request("GET", "/headers")
request_headers = r.json()
assert "User-Agent" not in request_headers
assert no_ua_headers["User-Agent"] == SKIP_HEADER
# Request headers override pool headers.
pool_headers = {"User-Agent": custom_ua}
pool.headers = pool_headers
r = pool.request("GET", "/headers", headers=no_ua_headers)
request_headers = r.json()
assert "User-Agent" not in request_headers
assert no_ua_headers["User-Agent"] == SKIP_HEADER
assert pool_headers.get("User-Agent") == custom_ua
@pytest.mark.parametrize(
"accept_encoding",
[
"Accept-Encoding",
"accept-encoding",
b"Accept-Encoding",
b"accept-encoding",
None,
],
)
@pytest.mark.parametrize("host", ["Host", "host", b"Host", b"host", None])
@pytest.mark.parametrize(
"user_agent", ["User-Agent", "user-agent", b"User-Agent", b"user-agent", None]
)
@pytest.mark.parametrize("chunked", [True, False])
def test_skip_header(
self,
accept_encoding: str | None,
host: str | None,
user_agent: str | None,
chunked: bool,
) -> None:
headers = {}
if accept_encoding is not None:
headers[accept_encoding] = SKIP_HEADER
if host is not None:
headers[host] = SKIP_HEADER
if user_agent is not None:
headers[user_agent] = SKIP_HEADER
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/headers", headers=headers, chunked=chunked)
request_headers = r.json()
if accept_encoding is None:
assert "Accept-Encoding" in request_headers
else:
assert accept_encoding not in request_headers
if host is None:
assert "Host" in request_headers
else:
assert host not in request_headers
if user_agent is None:
assert "User-Agent" in request_headers
else:
assert user_agent not in request_headers
@pytest.mark.parametrize("header", ["Content-Length", "content-length"])
@pytest.mark.parametrize("chunked", [True, False])
def test_skip_header_non_supported(self, header: str, chunked: bool) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
with pytest.raises(
ValueError,
match="urllib3.util.SKIP_HEADER only supports 'Accept-Encoding', 'Host', 'User-Agent'",
) as e:
pool.request(
"GET", "/headers", headers={header: SKIP_HEADER}, chunked=chunked
)
# Ensure that the error message stays up to date with 'SKIP_HEADER_SUPPORTED_HEADERS'
assert all(
("'" + header.title() + "'") in str(e.value)
for header in SKIPPABLE_HEADERS
)
@pytest.mark.parametrize("chunked", [True, False])
@pytest.mark.parametrize("pool_request", [True, False])
@pytest.mark.parametrize("header_type", [dict, HTTPHeaderDict])
def test_headers_not_modified_by_request(
self,
chunked: bool,
pool_request: bool,
header_type: type[dict[str, str] | HTTPHeaderDict],
) -> None:
# Test that the .request*() methods of ConnectionPool and HTTPConnection
# don't modify the given 'headers' structure, instead they should
# make their own internal copies at request time.
headers = header_type()
headers["key"] = "val"
with HTTPConnectionPool(self.host, self.port) as pool:
pool.headers = headers
if pool_request:
pool.request("GET", "/headers", chunked=chunked)
else:
conn = pool._get_conn()
conn.request("GET", "/headers", chunked=chunked)
assert pool.headers == {"key": "val"}
assert isinstance(pool.headers, header_type)
with HTTPConnectionPool(self.host, self.port) as pool:
if pool_request:
pool.request("GET", "/headers", headers=headers, chunked=chunked)
else:
conn = pool._get_conn()
conn.request("GET", "/headers", headers=headers, chunked=chunked)
assert headers == {"key": "val"}
def test_request_chunked_is_deprecated(
self,
) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
conn = pool._get_conn()
with pytest.warns(DeprecationWarning) as w:
conn.request_chunked("GET", "/headers") # type: ignore[attr-defined]
assert len(w) == 1 and str(w[0].message) == (
"HTTPConnection.request_chunked() is deprecated and will be removed in urllib3 v2.1.0. "
"Instead use HTTPConnection.request(..., chunked=True)."
)
resp = conn.getresponse()
assert resp.status == 200
assert resp.json()["Transfer-Encoding"] == "chunked"
def test_bytes_header(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"User-Agent": "test header"}
r = pool.request("GET", "/headers", headers=headers)
request_headers = r.json()
assert "User-Agent" in request_headers
assert request_headers["User-Agent"] == "test header"
@pytest.mark.parametrize(
"user_agent", ["Schönefeld/1.18.0", "Schönefeld/1.18.0".encode("iso-8859-1")]
)
def test_user_agent_non_ascii_user_agent(self, user_agent: str) -> None:
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
r = pool.urlopen(
"GET",
"/headers",
headers={"User-Agent": user_agent},
)
request_headers = r.json()
assert "User-Agent" in request_headers
assert request_headers["User-Agent"] == "Schönefeld/1.18.0"
class TestRetry(HTTPDummyServerTestCase):
def test_max_retry(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
with pytest.raises(MaxRetryError):
pool.request("GET", "/redirect", fields={"target": "/"}, retries=0)
def test_disabled_retry(self) -> None:
"""Disabled retries should disable redirect handling."""
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/redirect", fields={"target": "/"}, retries=False)
assert r.status == 303
r = pool.request(
"GET",
"/redirect",
fields={"target": "/"},
retries=Retry(redirect=False),
)
assert r.status == 303
with HTTPConnectionPool(
"thishostdoesnotexist.invalid", self.port, timeout=0.001
) as pool:
with pytest.raises(NameResolutionError):
pool.request("GET", "/test", retries=False)
def test_read_retries(self) -> None:
"""Should retry for status codes in the forcelist"""
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(read=1, status_forcelist=[418])
resp = pool.request(
"GET",
"/successful_retry",
headers={"test-name": "test_read_retries"},
retries=retry,
)
assert resp.status == 200
def test_read_total_retries(self) -> None:
"""HTTP response w/ status code in the forcelist should be retried"""
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_read_total_retries"}
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
def test_retries_wrong_forcelist(self) -> None:
"""HTTP response w/ status code not in forcelist shouldn't be retried"""
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(total=1, status_forcelist=[202])
resp = pool.request(
"GET",
"/successful_retry",
headers={"test-name": "test_wrong_forcelist"},
retries=retry,
)
assert resp.status == 418
def test_default_method_forcelist_retried(self) -> None:
"""urllib3 should retry methods in the default method forcelist"""
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"OPTIONS",
"/successful_retry",
headers={"test-name": "test_default_forcelist"},
retries=retry,
)
assert resp.status == 200
def test_retries_wrong_method_list(self) -> None:
"""Method not in our allowed list should not be retried, even if code matches"""
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_wrong_allowed_method"}
retry = Retry(total=1, status_forcelist=[418], allowed_methods=["POST"])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 418
def test_read_retries_unsuccessful(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_read_retries_unsuccessful"}
resp = pool.request("GET", "/successful_retry", headers=headers, retries=1)
assert resp.status == 418
def test_retry_reuse_safe(self) -> None:
"""It should be possible to reuse a Retry object across requests"""
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_retry_safe"}
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
with HTTPConnectionPool(self.host, self.port) as pool:
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
def test_retry_return_in_response(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_retry_return_in_response"}
retry = Retry(total=2, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
assert resp.retries is not None
assert resp.retries.total == 1
assert resp.retries.history == (
RequestHistory("GET", "/successful_retry", None, 418, None),
)
def test_retry_redirect_history(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
resp = pool.request("GET", "/redirect", fields={"target": "/"})
assert resp.status == 200
assert resp.retries is not None
assert resp.retries.history == (
RequestHistory("GET", "/redirect?target=%2F", None, 303, "/"),
)
def test_multi_redirect_history(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET",
"/multi_redirect",
fields={"redirect_codes": "303,302,200"},
redirect=False,
)
assert r.status == 303
assert r.retries is not None
assert r.retries.history == tuple()
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET",
"/multi_redirect",
retries=10,
fields={"redirect_codes": "303,302,301,307,302,200"},
)
assert r.status == 200
assert r.data == b"Done redirecting"
expected = [
(303, "/multi_redirect?redirect_codes=302,301,307,302,200"),
(302, "/multi_redirect?redirect_codes=301,307,302,200"),
(301, "/multi_redirect?redirect_codes=307,302,200"),
(307, "/multi_redirect?redirect_codes=302,200"),
(302, "/multi_redirect?redirect_codes=200"),
]
assert r.retries is not None
actual = [
(history.status, history.redirect_location)
for history in r.retries.history
]
assert actual == expected
class TestRetryAfter(HTTPDummyServerTestCase):
def test_retry_after(self) -> None:
# Request twice in a second to get a 429 response.
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET",
"/retry_after",
fields={"status": "429 Too Many Requests"},
retries=False,
)
r = pool.request(
"GET",
"/retry_after",
fields={"status": "429 Too Many Requests"},
retries=False,
)
assert r.status == 429
r = pool.request(
"GET",
"/retry_after",
fields={"status": "429 Too Many Requests"},
retries=True,
)
assert r.status == 200
# Request twice in a second to get a 503 response.
r = pool.request(
"GET",
"/retry_after",
fields={"status": "503 Service Unavailable"},
retries=False,
)
r = pool.request(
"GET",
"/retry_after",
fields={"status": "503 Service Unavailable"},
retries=False,
)
assert r.status == 503
r = pool.request(
"GET",
"/retry_after",
fields={"status": "503 Service Unavailable"},
retries=True,
)
assert r.status == 200
# Ignore Retry-After header on status which is not defined in
# Retry.RETRY_AFTER_STATUS_CODES.
r = pool.request(
"GET",
"/retry_after",
fields={"status": "418 I'm a teapot"},
retries=True,
)
assert r.status == 418
def test_redirect_after(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/redirect_after", retries=False)
assert r.status == 303
t = time.time()
r = pool.request("GET", "/redirect_after")
assert r.status == 200
delta = time.time() - t
assert delta >= 1
t = time.time()
timestamp = t + 2
r = pool.request("GET", "/redirect_after?date=" + str(timestamp))
assert r.status == 200
delta = time.time() - t
assert delta >= 1
# Retry-After is past
t = time.time()
timestamp = t - 1
r = pool.request("GET", "/redirect_after?date=" + str(timestamp))
delta = time.time() - t
assert r.status == 200
assert delta < 1
class TestFileBodiesOnRetryOrRedirect(HTTPDummyServerTestCase):
def test_retries_put_filehandle(self) -> None:
"""HTTP PUT retry with a file-like object should not timeout"""
with HTTPConnectionPool(self.host, self.port, timeout=0.1) as pool:
retry = Retry(total=3, status_forcelist=[418])
# httplib reads in 8k chunks; use a larger content length
content_length = 65535
data = b"A" * content_length
uploaded_file = io.BytesIO(data)
headers = {
"test-name": "test_retries_put_filehandle",
"Content-Length": str(content_length),
}
resp = pool.urlopen(
"PUT",
"/successful_retry",
headers=headers,
retries=retry,
body=uploaded_file,
assert_same_host=False,
redirect=False,
)
assert resp.status == 200
def test_redirect_put_file(self) -> None:
"""PUT with file object should work with a redirection response"""
with HTTPConnectionPool(self.host, self.port, timeout=0.1) as pool:
retry = Retry(total=3, status_forcelist=[418])
# httplib reads in 8k chunks; use a larger content length
content_length = 65535
data = b"A" * content_length
uploaded_file = io.BytesIO(data)
headers = {
"test-name": "test_redirect_put_file",
"Content-Length": str(content_length),
}
url = "/redirect?target=/echo&status=307"
resp = pool.urlopen(
"PUT",
url,
headers=headers,
retries=retry,
body=uploaded_file,
assert_same_host=False,
redirect=True,
)
assert resp.status == 200
assert resp.data == data
def test_redirect_with_failed_tell(self) -> None:
"""Abort request if failed to get a position from tell()"""
class BadTellObject(io.BytesIO):
def tell(self) -> typing.NoReturn:
raise OSError
body = BadTellObject(b"the data")
url = "/redirect?target=/successful_retry"
# httplib uses fileno if Content-Length isn't supplied,
# which is unsupported by BytesIO.
headers = {"Content-Length": "8"}
with HTTPConnectionPool(self.host, self.port, timeout=0.1) as pool:
with pytest.raises(
UnrewindableBodyError, match="Unable to record file position for"
):
pool.urlopen("PUT", url, headers=headers, body=body)
class TestRetryPoolSize(HTTPDummyServerTestCase):
def test_pool_size_retry(self) -> None:
retries = Retry(total=1, raise_on_status=False, status_forcelist=[404])
with HTTPConnectionPool(
self.host, self.port, maxsize=10, retries=retries, block=True
) as pool:
pool.urlopen("GET", "/not_found", preload_content=False)
assert pool.num_connections == 1
class TestRedirectPoolSize(HTTPDummyServerTestCase):
def test_pool_size_redirect(self) -> None:
retries = Retry(
total=1, raise_on_status=False, status_forcelist=[404], redirect=True
)
with HTTPConnectionPool(
self.host, self.port, maxsize=10, retries=retries, block=True
) as pool:
pool.urlopen("GET", "/redirect", preload_content=False)
assert pool.num_connections == 1
|
{
"content_hash": "5222679a24de5f119f6b75e1e7162c7f",
"timestamp": "",
"source": "github",
"line_count": 1446,
"max_line_length": 110,
"avg_line_length": 39.45089903181189,
"alnum_prop": 0.5554640114994916,
"repo_name": "urllib3/urllib3",
"id": "6aea007a21887696fee0310ee5a45bcdce24481b",
"size": "57049",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/with_dummyserver/test_connectionpool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "963829"
},
{
"name": "Shell",
"bytes": "326"
}
],
"symlink_target": ""
}
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from home import views as view_home
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView, TemplateView
urlpatterns = [
# Root-level redirects for common browser requests
url(r'^favicon\.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'img/compressed/favicon.ico'), name='favicon.ico'),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain'), name='robots.txt'),
]
urlpatterns += [
url(r'^$', view_home.home, name='home'),
url(r'^blog/', include('blog.urls', namespace="blog")),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
try:
from django.conf.urls.static import static
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Should only occur when debug mode is on for production testing
except ImportError as e:
import logging
l = logging.getLogger(__name__)
l.warning(e)
|
{
"content_hash": "6cbc4603eec6a989fbe1c6cf6bc85778",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 125,
"avg_line_length": 37.3125,
"alnum_prop": 0.6957007258514796,
"repo_name": "janusnic/dj-21v",
"id": "58bb549bdc1f8c1cab850995f8870d2f9dff1d60",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit_08/mysite/mysite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "607197"
},
{
"name": "HTML",
"bytes": "352620"
},
{
"name": "JavaScript",
"bytes": "4098502"
},
{
"name": "Python",
"bytes": "1906453"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_noble_old_zabrak_female_02.iff"
result.attribute_template_id = 9
result.stfName("npc_name","zabrak_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "552dc8049d1ac9a66fd11cf8439b80d1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 24.384615384615383,
"alnum_prop": 0.7003154574132492,
"repo_name": "anhstudios/swganh",
"id": "6f9ba529164ae7d27b6302b250a010df71af867c",
"size": "462",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_noble_old_zabrak_female_02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import os
import pickle
from ffthompy.tensorsLowRank.homogenisation import (homog_Ga_full_potential,
homog_GaNi_full_potential,
homog_Ga_sparse,
homog_GaNi_sparse)
from examples.lowRankTensorApproximations.setting import get_material_coef, kind_list, get_default_parameters
import itertools
print('running time-comparison for stochastic material...')
kinds = {'2': 0,
'3': 2,}
N_lists = {'2': [45, 135, 320, 405, 640, 1215, 2560,3645,5120],
'3': [5, 15, 45, 80, 135, 175, 225, 305, 375]}
err_tol_list=[1e-4, 1e-6]
method=1 # 0-Ga, 1-GaNi
data_folder = "data_for_plot/time"
for dim, material in itertools.product([2, 3], [2, 4]):
if not os.path.exists('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)):
os.makedirs('{}/dim_{}/mat_{}/'.format(data_folder,dim, material))
N_list = N_lists['{}'.format(dim)]
kind=kinds['{}'.format(dim)]
full_time_list = [None]*len(N_list)
sparse_time_list = [[None]*len(N_list), [None]*len(N_list)]
rank_list = [[None]*len(N_list), [None]*len(N_list)]
memory_list = [[None]*len(N_list), [None]*len(N_list)]
for i, N in enumerate(N_list):
# PARAMETERS ##############################################################
pars, pars_sparse=get_default_parameters(dim, N, material, kind)
pars.solver.update(dict(tol=1e-6))
# generating material coefficients
if method in ['Ga',0]:
Aga, Agani, Agas, Aganis=get_material_coef(material, pars, pars_sparse)
print('\n== Full solution with potential by CG (Ga) ===========')
resP_Ga=homog_Ga_full_potential(Aga, pars)
print('mean of solution={}'.format(resP_Ga.Fu.mean()))
print('homogenised properties (component 11) = {}'.format(resP_Ga.AH))
full_time_list[i]=resP_Ga.time
elif method in ['GaNi',1]:
Aga, Agani, Agas, Aganis=get_material_coef(material, pars, pars_sparse, ga=False)
print('\n== Full solution with potential by CG (GaNi)===========')
resP=homog_GaNi_full_potential(Agani, Aga, pars)
print('mean of solution={}'.format(resP.Fu.mean()))
print('homogenised properties (component 11) = {}'.format(resP.AH))
else:
raise ValueError()
full_time_list[i]=resP.time
for counter, err_tol in enumerate(err_tol_list):
for r in range(4, N+1, 2):
pars_sparse.solver.update(dict(rank=r)) # rank of solution vector
print('\n== format={}, N={}, dim={}, material={}, rank={}, err_tol={} ===='.format(pars_sparse.kind,
N, dim, material, pars_sparse.solver['rank'], err_tol))
# PROBLEM DEFINITION ######################################################
if method in ['Ga',0]:
print('\n== SPARSE solver with preconditioner (Ga) =======================')
resS=homog_Ga_sparse(Agas, pars_sparse)
print('mean of solution={}'.format(resS.Fu.mean()))
print('homogenised properties (component 11) = {}'.format(resS.AH))
print('norm(resP)={}'.format(resS.solver['norm_res']))
elif method in ['GaNi',1]:
print('\n== SPARSE solver with preconditioner (GaNi) =======================')
resS=homog_GaNi_sparse(Aganis, Agas, pars_sparse)
print('mean of solution={}'.format(resS.Fu.mean()))
print('homogenised properties (component 11) = {}'.format(resS.AH))
print('iterations={}'.format(resS.solver['kit']))
print('norm(resP)={}'.format(resS.solver['norm_res']))
print('memory efficiency = {0}/{1} = {2}'.format(resS.Fu.memory, resP.Fu.val.size, resS.Fu.memory/resP.Fu.val.size))
print("solution discrepancy", (resS.AH - resP.AH)/resP.AH)
if (resS.AH - resP.AH)/resP.AH <= err_tol:
rank_list[counter][i]=r
sparse_time_list[counter][i]=resS.time
memory_list[counter][i]=resS.Fu.memory/resP.Fu.val.size # memory efficiency
print("tensorsLowRank solver time:",sparse_time_list[counter])
print("full solver time:",full_time_list)
print("rank:",rank_list[counter])
break
print("tensorsLowRank solver time:",sparse_time_list)
print("full solver time:",full_time_list)
print("rank:",rank_list)
pickle.dump(N_list, open("{}/dim_{}/mat_{}/N_list_{}.p"
.format(data_folder,dim, material,kind_list[kind]), "wb"))
pickle.dump(full_time_list, open("{}/dim_{}/mat_{}/full_time_list_{}.p"
.format(data_folder,dim, material,kind_list[kind]), "wb"))
pickle.dump(sparse_time_list[0], open(("{}/dim_{}/mat_{}/sparse_time_list_{}_"+"{:.0e}".format(err_tol_list[0])+'.p')
.format(data_folder,dim, material,kind_list[kind]), "wb"))
pickle.dump(sparse_time_list[1], open(("{}/dim_{}/mat_{}/sparse_time_list_{}_"+"{:.0e}".format(err_tol_list[1])+'.p')
.format(data_folder,dim, material,kind_list[kind]), "wb"))
pickle.dump(rank_list[0], open(("{}/dim_{}/mat_{}/rank_list_{}_"+"{:.0e}".format(err_tol_list[0])+'.p')
.format(data_folder,dim, material,kind_list[kind]), "wb"))
pickle.dump(rank_list[1], open(("{}/dim_{}/mat_{}/rank_list_{}_"+"{:.0e}".format(err_tol_list[1])+'.p')
.format(data_folder,dim, material,kind_list[kind]), "wb"))
print('END')
|
{
"content_hash": "99c378ee41107ade8a145afcbca664a0",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 136,
"avg_line_length": 52.82142857142857,
"alnum_prop": 0.5206220419202163,
"repo_name": "vondrejc/FFTHomPy",
"id": "81e8b2ce30475ae7e904b8ae47224d75ef7ebc50",
"size": "5916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lowRankTensorApproximations/diffusion_comp_time_stochastic_material.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "332845"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from api.eating.views import EatingAPIView
from api.date.views import UserDateAPIView, AdminDateAPIView
urlpatterns = [
url(r'^eating/$', EatingAPIView.as_view()),
url(r'^user/date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$', UserDateAPIView.as_view()),
url(r'^admin/date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$', AdminDateAPIView.as_view()),
]
|
{
"content_hash": "04a7c03dd0d505f9f3f1a2bae27ae45d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 96,
"avg_line_length": 36.27272727272727,
"alnum_prop": 0.6541353383458647,
"repo_name": "jupiny/MIDASChallenge2017",
"id": "5711945d741bc3e9e21345491b50a9faf31f73d3",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "midas_web_solution/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1597"
},
{
"name": "HTML",
"bytes": "65927"
},
{
"name": "JavaScript",
"bytes": "29024"
},
{
"name": "Makefile",
"bytes": "110"
},
{
"name": "Python",
"bytes": "31157"
}
],
"symlink_target": ""
}
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,errno,re,shutil
try:
import cPickle
except ImportError:
import pickle as cPickle
from waflib import Runner,TaskGen,Utils,ConfigSet,Task,Logs,Options,Context,Errors
import waflib.Node
CACHE_DIR='c4che'
CACHE_SUFFIX='_cache.py'
INSTALL=1337
UNINSTALL=-1337
SAVED_ATTRS='root node_deps raw_deps task_sigs'.split()
CFG_FILES='cfg_files'
POST_AT_ONCE=0
POST_LAZY=1
POST_BOTH=2
class BuildContext(Context.Context):
'''executes the build'''
cmd='build'
variant=''
def __init__(self,**kw):
super(BuildContext,self).__init__(**kw)
self.is_install=0
self.top_dir=kw.get('top_dir',Context.top_dir)
self.run_dir=kw.get('run_dir',Context.run_dir)
self.post_mode=POST_AT_ONCE
self.out_dir=kw.get('out_dir',Context.out_dir)
self.cache_dir=kw.get('cache_dir',None)
if not self.cache_dir:
self.cache_dir=self.out_dir+os.sep+CACHE_DIR
self.all_envs={}
self.task_sigs={}
self.node_deps={}
self.raw_deps={}
self.cache_dir_contents={}
self.task_gen_cache_names={}
self.launch_dir=Context.launch_dir
self.jobs=Options.options.jobs
self.targets=Options.options.targets
self.keep=Options.options.keep
self.cache_global=Options.cache_global
self.nocache=Options.options.nocache
self.progress_bar=Options.options.progress_bar
self.deps_man=Utils.defaultdict(list)
self.current_group=0
self.groups=[]
self.group_names={}
def get_variant_dir(self):
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir,self.variant)
variant_dir=property(get_variant_dir,None)
def __call__(self,*k,**kw):
kw['bld']=self
ret=TaskGen.task_gen(*k,**kw)
self.task_gen_cache_names={}
self.add_to_group(ret,group=kw.get('group',None))
return ret
def rule(self,*k,**kw):
def f(rule):
ret=self(*k,**kw)
ret.rule=rule
return ret
return f
def __copy__(self):
raise Errors.WafError('build contexts are not supposed to be copied')
def install_files(self,*k,**kw):
pass
def install_as(self,*k,**kw):
pass
def symlink_as(self,*k,**kw):
pass
def load_envs(self):
node=self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst=node.ant_glob('**/*%s'%CACHE_SUFFIX,quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name=x.path_from(node).replace(CACHE_SUFFIX,'').replace('\\','/')
env=ConfigSet.ConfigSet(x.abspath())
self.all_envs[name]=env
for f in env[CFG_FILES]:
newnode=self.root.find_resource(f)
try:
h=Utils.h_file(newnode.abspath())
except(IOError,AttributeError):
Logs.error('cannot find %r'%f)
h=Utils.SIG_NIL
newnode.sig=h
def init_dirs(self):
if not(os.path.isabs(self.top_dir)and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
self.path=self.srcnode=self.root.find_dir(self.top_dir)
self.bldnode=self.root.make_node(self.variant_dir)
self.bldnode.mkdir()
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.execute_build()
def execute_build(self):
Logs.info("Waf: Entering directory `%s'"%self.variant_dir)
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
if self.progress_bar:
sys.stderr.write(Logs.colors.cursor_off)
try:
self.compile()
finally:
if self.progress_bar==1:
c=len(self.returned_tasks)or 1
self.to_log(self.progress_line(c,c,Logs.colors.BLUE,Logs.colors.NORMAL))
print('')
sys.stdout.flush()
sys.stderr.write(Logs.colors.cursor_on)
Logs.info("Waf: Leaving directory `%s'"%self.variant_dir)
self.post_build()
def restore(self):
try:
env=ConfigSet.ConfigSet(os.path.join(self.cache_dir,'build.config.py'))
except(IOError,OSError):
pass
else:
if env['version']<Context.HEXVERSION:
raise Errors.WafError('Version mismatch! reconfigure the project')
for t in env['tools']:
self.setup(**t)
f=None
try:
dbfn=os.path.join(self.variant_dir,Context.DBFILE)
try:
f=open(dbfn,'rb')
except(IOError,EOFError):
Logs.debug('build: Could not load the build cache %s (missing)'%dbfn)
else:
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
try:
data=cPickle.load(f)
except Exception ,e:
Logs.debug('build: Could not pickle the build cache %s: %r'%(dbfn,e))
else:
for x in SAVED_ATTRS:
setattr(self,x,data[x])
finally:
waflib.Node.pickle_lock.release()
finally:
if f:
f.close()
self.init_dirs()
def store(self):
data={}
for x in SAVED_ATTRS:
data[x]=getattr(self,x)
db=os.path.join(self.variant_dir,Context.DBFILE)
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
f=None
try:
f=open(db+'.tmp','wb')
cPickle.dump(data,f,-1)
finally:
if f:
f.close()
finally:
waflib.Node.pickle_lock.release()
try:
st=os.stat(db)
os.unlink(db)
if not Utils.is_win32:
os.chown(db+'.tmp',st.st_uid,st.st_gid)
except(AttributeError,OSError):
pass
os.rename(db+'.tmp',db)
def compile(self):
Logs.debug('build: compile()')
self.producer=Runner.Parallel(self,self.jobs)
self.producer.biter=self.get_build_iterator()
self.returned_tasks=[]
try:
self.producer.start()
except KeyboardInterrupt:
self.store()
raise
else:
if self.producer.dirty:
self.store()
if self.producer.error:
raise Errors.BuildError(self.producer.error)
def setup(self,tool,tooldir=None,funs=None):
if isinstance(tool,list):
for i in tool:self.setup(i,tooldir)
return
module=Context.load_tool(tool,tooldir)
if hasattr(module,"setup"):module.setup(self)
def get_env(self):
try:
return self.all_envs[self.variant]
except KeyError:
return self.all_envs['']
def set_env(self,val):
self.all_envs[self.variant]=val
env=property(get_env,set_env)
def add_manual_dependency(self,path,value):
if path is None:
raise ValueError('Invalid input')
if isinstance(path,waflib.Node.Node):
node=path
elif os.path.isabs(path):
node=self.root.find_resource(path)
else:
node=self.path.find_resource(path)
if isinstance(value,list):
self.deps_man[id(node)].extend(value)
else:
self.deps_man[id(node)].append(value)
def launch_node(self):
try:
return self.p_ln
except AttributeError:
self.p_ln=self.root.find_dir(self.launch_dir)
return self.p_ln
def hash_env_vars(self,env,vars_lst):
if not env.table:
env=env.parent
if not env:
return Utils.SIG_NIL
idx=str(id(env))+str(vars_lst)
try:
cache=self.cache_env
except AttributeError:
cache=self.cache_env={}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
lst=[env[a]for a in vars_lst]
ret=Utils.h_list(lst)
Logs.debug('envhash: %s %r',Utils.to_hex(ret),lst)
cache[idx]=ret
return ret
def get_tgen_by_name(self,name):
cache=self.task_gen_cache_names
if not cache:
for g in self.groups:
for tg in g:
try:
cache[tg.name]=tg
except AttributeError:
pass
try:
return cache[name]
except KeyError:
raise Errors.WafError('Could not find a task generator for the name %r'%name)
def progress_line(self,state,total,col1,col2):
n=len(str(total))
Utils.rot_idx+=1
ind=Utils.rot_chr[Utils.rot_idx%4]
pc=(100.*state)/total
eta=str(self.timer)
fs="[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s]["%(n,n,ind)
left=fs%(state,total,col1,pc,col2)
right='][%s%s%s]'%(col1,eta,col2)
cols=Logs.get_term_cols()-len(left)-len(right)+2*len(col1)+2*len(col2)
if cols<7:cols=7
ratio=((cols*state)//total)-1
bar=('='*ratio+'>').ljust(cols)
msg=Utils.indicator%(left,bar,right)
return msg
def declare_chain(self,*k,**kw):
return TaskGen.declare_chain(*k,**kw)
def pre_build(self):
for m in getattr(self,'pre_funs',[]):
m(self)
def post_build(self):
for m in getattr(self,'post_funs',[]):
m(self)
def add_pre_fun(self,meth):
try:
self.pre_funs.append(meth)
except AttributeError:
self.pre_funs=[meth]
def add_post_fun(self,meth):
try:
self.post_funs.append(meth)
except AttributeError:
self.post_funs=[meth]
def get_group(self,x):
if not self.groups:
self.add_group()
if x is None:
return self.groups[self.current_group]
if x in self.group_names:
return self.group_names[x]
return self.groups[x]
def add_to_group(self,tgen,group=None):
assert(isinstance(tgen,TaskGen.task_gen)or isinstance(tgen,Task.TaskBase))
tgen.bld=self
self.get_group(group).append(tgen)
def get_group_name(self,g):
if not isinstance(g,list):
g=self.groups[g]
for x in self.group_names:
if id(self.group_names[x])==id(g):
return x
return''
def get_group_idx(self,tg):
se=id(tg)
for i in range(len(self.groups)):
for t in self.groups[i]:
if id(t)==se:
return i
return None
def add_group(self,name=None,move=True):
if name and name in self.group_names:
Logs.error('add_group: name %s already present'%name)
g=[]
self.group_names[name]=g
self.groups.append(g)
if move:
self.current_group=len(self.groups)-1
def set_group(self,idx):
if isinstance(idx,str):
g=self.group_names[idx]
for i in range(len(self.groups)):
if id(g)==id(self.groups[i]):
self.current_group=i
else:
self.current_group=idx
def total(self):
total=0
for group in self.groups:
for tg in group:
try:
total+=len(tg.tasks)
except AttributeError:
total+=1
return total
def get_targets(self):
to_post=[]
min_grp=0
for name in self.targets.split(','):
tg=self.get_tgen_by_name(name)
if not tg:
raise Errors.WafError('target %r does not exist'%name)
m=self.get_group_idx(tg)
if m>min_grp:
min_grp=m
to_post=[tg]
elif m==min_grp:
to_post.append(tg)
return(min_grp,to_post)
def get_all_task_gen(self):
lst=[]
for g in self.groups:
lst.extend(g)
return lst
def post_group(self):
if self.targets=='*':
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
elif self.targets:
if self.cur<self._min_grp:
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
else:
for tg in self._exact_tg:
tg.post()
else:
ln=self.launch_node()
if ln.is_child_of(self.bldnode):
Logs.warn('Building from the build directory, forcing --targets=*')
ln=self.srcnode
elif not ln.is_child_of(self.srcnode):
Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)'%(ln.abspath(),self.srcnode.abspath()))
ln=self.srcnode
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
if tg.path.is_child_of(ln):
f()
def get_tasks_group(self,idx):
tasks=[]
for tg in self.groups[idx]:
try:
tasks.extend(tg.tasks)
except AttributeError:
tasks.append(tg)
return tasks
def get_build_iterator(self):
self.cur=0
if self.targets and self.targets!='*':
(self._min_grp,self._exact_tg)=self.get_targets()
global lazy_post
if self.post_mode!=POST_LAZY:
while self.cur<len(self.groups):
self.post_group()
self.cur+=1
self.cur=0
while self.cur<len(self.groups):
if self.post_mode!=POST_AT_ONCE:
self.post_group()
tasks=self.get_tasks_group(self.cur)
Task.set_file_constraints(tasks)
Task.set_precedence_constraints(tasks)
self.cur_tasks=tasks
self.cur+=1
if not tasks:
continue
yield tasks
while 1:
yield[]
class inst(Task.Task):
color='CYAN'
def uid(self):
lst=[self.dest,self.path]+self.source
return Utils.h_list(repr(lst))
def post(self):
buf=[]
for x in self.source:
if isinstance(x,waflib.Node.Node):
y=x
else:
y=self.path.find_resource(x)
if not y:
if Logs.verbose:
Logs.warn('Could not find %s immediately (may cause broken builds)'%x)
idx=self.generator.bld.get_group_idx(self)
for tg in self.generator.bld.groups[idx]:
if not isinstance(tg,inst)and id(tg)!=id(self):
tg.post()
y=self.path.find_resource(x)
if y:
break
else:
raise Errors.WafError('Could not find %r in %r'%(x,self.path))
buf.append(y)
self.inputs=buf
def runnable_status(self):
ret=super(inst,self).runnable_status()
if ret==Task.SKIP_ME:
return Task.RUN_ME
return ret
def __str__(self):
return''
def run(self):
return self.generator.exec_task()
def get_install_path(self,destdir=True):
dest=Utils.subst_vars(self.dest,self.env)
dest=dest.replace('/',os.sep)
if destdir and Options.options.destdir:
dest=os.path.join(Options.options.destdir,os.path.splitdrive(dest)[1].lstrip(os.sep))
return dest
def exec_install_files(self):
destpath=self.get_install_path()
if not destpath:
raise Errors.WafError('unknown installation path %r'%self.generator)
for x,y in zip(self.source,self.inputs):
if self.relative_trick:
destfile=os.path.join(destpath,y.path_from(self.path))
Utils.check_dir(os.path.dirname(destfile))
else:
destfile=os.path.join(destpath,y.name)
self.generator.bld.do_install(y.abspath(),destfile,self.chmod)
def exec_install_as(self):
destfile=self.get_install_path()
self.generator.bld.do_install(self.inputs[0].abspath(),destfile,self.chmod)
def exec_symlink_as(self):
destfile=self.get_install_path()
src=self.link
if self.relative_trick:
src=os.path.relpath(src,os.path.dirname(destfile))
self.generator.bld.do_link(src,destfile)
class InstallContext(BuildContext):
'''installs the targets on the system'''
cmd='install'
def __init__(self,**kw):
super(InstallContext,self).__init__(**kw)
self.uninstall=[]
self.is_install=INSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
d,_=os.path.split(tgt)
if not d:
raise Errors.WafError('Invalid installation given %r->%r'%(src,tgt))
Utils.check_dir(d)
srclbl=src.replace(self.srcnode.abspath()+os.sep,'')
if not Options.options.force:
try:
st1=os.stat(tgt)
st2=os.stat(src)
except OSError:
pass
else:
if st1.st_mtime+2>=st2.st_mtime and st1.st_size==st2.st_size:
if not self.progress_bar:
Logs.info('- install %s (from %s)'%(tgt,srclbl))
return False
if not self.progress_bar:
Logs.info('+ install %s (from %s)'%(tgt,srclbl))
try:
os.remove(tgt)
except OSError:
pass
try:
shutil.copy2(src,tgt)
os.chmod(tgt,chmod)
except IOError:
try:
os.stat(src)
except(OSError,IOError):
Logs.error('File %r does not exist'%src)
raise Errors.WafError('Could not install the file %r'%tgt)
def do_link(self,src,tgt):
d,_=os.path.split(tgt)
Utils.check_dir(d)
link=False
if not os.path.islink(tgt):
link=True
elif os.readlink(tgt)!=src:
link=True
if link:
try:os.remove(tgt)
except OSError:pass
if not self.progress_bar:
Logs.info('+ symlink %s (to %s)'%(tgt,src))
os.symlink(src,tgt)
else:
if not self.progress_bar:
Logs.info('- symlink %s (to %s)'%(tgt,src))
def run_task_now(self,tsk,postpone):
tsk.post()
if not postpone:
if tsk.runnable_status()==Task.ASK_LATER:
raise self.WafError('cannot post the task %r'%tsk)
tsk.run()
def install_files(self,dest,files,env=None,chmod=Utils.O644,relative_trick=False,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
if isinstance(files,waflib.Node.Node):
tsk.source=[files]
else:
tsk.source=Utils.to_list(files)
tsk.dest=dest
tsk.exec_task=tsk.exec_install_files
tsk.relative_trick=relative_trick
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def install_as(self,dest,srcfile,env=None,chmod=Utils.O644,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
tsk.source=[srcfile]
tsk.dest=dest
tsk.exec_task=tsk.exec_install_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def symlink_as(self,dest,src,env=None,cwd=None,add=True,postpone=True,relative_trick=False):
if Utils.is_win32:
return
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.dest=dest
tsk.path=cwd or self.path
tsk.source=[]
tsk.link=src
tsk.relative_trick=relative_trick
tsk.exec_task=tsk.exec_symlink_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
class UninstallContext(InstallContext):
'''removes the targets installed'''
cmd='uninstall'
def __init__(self,**kw):
super(UninstallContext,self).__init__(**kw)
self.is_install=UNINSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
if not self.progress_bar:
Logs.info('- remove %s'%tgt)
self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError ,e:
if e.errno!=errno.ENOENT:
if not getattr(self,'uninstall_error',None):
self.uninstall_error=True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose>1:
Logs.warn('Could not remove %s (error code %r)'%(e.filename,e.errno))
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def do_link(self,src,tgt):
try:
if not self.progress_bar:
Logs.info('- unlink %s'%tgt)
os.remove(tgt)
except OSError:
pass
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def execute(self):
try:
def runnable_status(self):
return Task.SKIP_ME
setattr(Task.Task,'runnable_status_back',Task.Task.runnable_status)
setattr(Task.Task,'runnable_status',runnable_status)
super(UninstallContext,self).execute()
finally:
setattr(Task.Task,'runnable_status',Task.Task.runnable_status_back)
class CleanContext(BuildContext):
'''cleans the project'''
cmd='clean'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
try:
self.clean()
finally:
self.store()
def clean(self):
Logs.debug('build: clean called')
if self.bldnode!=self.srcnode:
lst=[]
for e in self.all_envs.values():
lst.extend(self.root.find_or_declare(f)for f in e[CFG_FILES])
for n in self.bldnode.ant_glob('**/*',excl='.lock* *conf_check_*/** config.log c4che/*',quiet=True):
if n in lst:
continue
n.delete()
self.root.children={}
for v in'node_deps task_sigs raw_deps'.split():
setattr(self,v,{})
class ListContext(BuildContext):
'''lists the targets to execute'''
cmd='list'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
for g in self.groups:
for tg in g:
try:
f=tg.post
except AttributeError:
pass
else:
f()
try:
self.get_tgen_by_name('')
except Exception:
pass
lst=list(self.task_gen_cache_names.keys())
lst.sort()
for k in lst:
Logs.pprint('GREEN',k)
class StepContext(BuildContext):
'''executes tasks in a step-by-step fashion, for debugging'''
cmd='step'
def __init__(self,**kw):
super(StepContext,self).__init__(**kw)
self.files=Options.options.files
def compile(self):
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets=None
if self.targets and self.targets!='*':
targets=self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f=tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher=self.get_matcher(pat)
for tg in g:
if isinstance(tg,Task.TaskBase):
lst=[tg]
else:
lst=tg.tasks
for tsk in lst:
do_exec=False
for node in getattr(tsk,'inputs',[]):
if matcher(node,output=False):
do_exec=True
break
for node in getattr(tsk,'outputs',[]):
if matcher(node,output=True):
do_exec=True
break
if do_exec:
ret=tsk.run()
Logs.info('%s -> exit %r'%(str(tsk),ret))
def get_matcher(self,pat):
inn=True
out=True
if pat.startswith('in:'):
out=False
pat=pat.replace('in:','')
elif pat.startswith('out:'):
inn=False
pat=pat.replace('out:','')
anode=self.root.find_node(pat)
pattern=None
if not anode:
if not pat.startswith('^'):
pat='^.+?%s'%pat
if not pat.endswith('$'):
pat='%s$'%pat
pattern=re.compile(pat)
def match(node,output):
if output==True and not out:
return False
if output==False and not inn:
return False
if anode:
return anode==node
else:
return pattern.match(node.abspath())
return match
BuildContext.store=Utils.nogc(BuildContext.store)
BuildContext.restore=Utils.nogc(BuildContext.restore)
|
{
"content_hash": "7621f87d55bbf7ff1222dcdc9ba2ac80",
"timestamp": "",
"source": "github",
"line_count": 769,
"max_line_length": 115,
"avg_line_length": 27.4629388816645,
"alnum_prop": 0.6688290165254037,
"repo_name": "pipsiscool/audacity",
"id": "70330ef35d748e8d715dbd79a0c3f58736b4c4f5",
"size": "21119",
"binary": false,
"copies": "58",
"ref": "refs/heads/master",
"path": "lib-src/lv2/sord/waflib/Build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "141298"
},
{
"name": "Awk",
"bytes": "2350"
},
{
"name": "C",
"bytes": "16931951"
},
{
"name": "C++",
"bytes": "21277015"
},
{
"name": "CMake",
"bytes": "102838"
},
{
"name": "CSS",
"bytes": "87696"
},
{
"name": "Common Lisp",
"bytes": "533537"
},
{
"name": "Groff",
"bytes": "65243"
},
{
"name": "HTML",
"bytes": "2177363"
},
{
"name": "Inno Setup",
"bytes": "19531"
},
{
"name": "Java",
"bytes": "84589"
},
{
"name": "M",
"bytes": "6242"
},
{
"name": "Makefile",
"bytes": "141297"
},
{
"name": "Matlab",
"bytes": "2467"
},
{
"name": "NewLisp",
"bytes": "2831"
},
{
"name": "Objective-C",
"bytes": "17554"
},
{
"name": "Pascal",
"bytes": "17208"
},
{
"name": "Perl",
"bytes": "129212"
},
{
"name": "Prolog",
"bytes": "939"
},
{
"name": "Python",
"bytes": "3636067"
},
{
"name": "QMake",
"bytes": "971"
},
{
"name": "R",
"bytes": "305850"
},
{
"name": "Shell",
"bytes": "6354469"
},
{
"name": "Smarty",
"bytes": "172490"
},
{
"name": "TeX",
"bytes": "146115"
}
],
"symlink_target": ""
}
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.protobuf import config_pb2 as tensorflow_dot_core_dot_protobuf_dot_config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/tensorflow_server.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n0tensorflow/core/protobuf/tensorflow_server.proto\x12\ntensorflow\x1a%tensorflow/core/protobuf/config.proto\"r\n\x06JobDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x05tasks\x18\x02 \x03(\x0b\x32\x1d.tensorflow.JobDef.TasksEntry\x1a,\n\nTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"-\n\nClusterDef\x12\x1f\n\x03job\x18\x01 \x03(\x0b\x32\x12.tensorflow.JobDef\"\xa5\x01\n\tServerDef\x12\'\n\x07\x63luster\x18\x01 \x01(\x0b\x32\x16.tensorflow.ClusterDef\x12\x10\n\x08job_name\x18\x02 \x01(\t\x12\x12\n\ntask_index\x18\x03 \x01(\x05\x12\x37\n\x16\x64\x65\x66\x61ult_session_config\x18\x04 \x01(\x0b\x32\x17.tensorflow.ConfigProto\x12\x10\n\x08protocol\x18\x05 \x01(\tB/\n\x1aorg.tensorflow.distruntimeB\x0cServerProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_protobuf_dot_config__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_JOBDEF_TASKSENTRY = _descriptor.Descriptor(
name='TasksEntry',
full_name='tensorflow.JobDef.TasksEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.JobDef.TasksEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.JobDef.TasksEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=217,
)
_JOBDEF = _descriptor.Descriptor(
name='JobDef',
full_name='tensorflow.JobDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.JobDef.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tasks', full_name='tensorflow.JobDef.tasks', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_JOBDEF_TASKSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=103,
serialized_end=217,
)
_CLUSTERDEF = _descriptor.Descriptor(
name='ClusterDef',
full_name='tensorflow.ClusterDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='tensorflow.ClusterDef.job', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=264,
)
_SERVERDEF = _descriptor.Descriptor(
name='ServerDef',
full_name='tensorflow.ServerDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='tensorflow.ServerDef.cluster', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='job_name', full_name='tensorflow.ServerDef.job_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='task_index', full_name='tensorflow.ServerDef.task_index', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_session_config', full_name='tensorflow.ServerDef.default_session_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='protocol', full_name='tensorflow.ServerDef.protocol', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=267,
serialized_end=432,
)
_JOBDEF_TASKSENTRY.containing_type = _JOBDEF
_JOBDEF.fields_by_name['tasks'].message_type = _JOBDEF_TASKSENTRY
_CLUSTERDEF.fields_by_name['job'].message_type = _JOBDEF
_SERVERDEF.fields_by_name['cluster'].message_type = _CLUSTERDEF
_SERVERDEF.fields_by_name['default_session_config'].message_type = tensorflow_dot_core_dot_protobuf_dot_config__pb2._CONFIGPROTO
DESCRIPTOR.message_types_by_name['JobDef'] = _JOBDEF
DESCRIPTOR.message_types_by_name['ClusterDef'] = _CLUSTERDEF
DESCRIPTOR.message_types_by_name['ServerDef'] = _SERVERDEF
JobDef = _reflection.GeneratedProtocolMessageType('JobDef', (_message.Message,), dict(
TasksEntry = _reflection.GeneratedProtocolMessageType('TasksEntry', (_message.Message,), dict(
DESCRIPTOR = _JOBDEF_TASKSENTRY,
__module__ = 'tensorflow.core.protobuf.tensorflow_server_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.JobDef.TasksEntry)
))
,
DESCRIPTOR = _JOBDEF,
__module__ = 'tensorflow.core.protobuf.tensorflow_server_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.JobDef)
))
_sym_db.RegisterMessage(JobDef)
_sym_db.RegisterMessage(JobDef.TasksEntry)
ClusterDef = _reflection.GeneratedProtocolMessageType('ClusterDef', (_message.Message,), dict(
DESCRIPTOR = _CLUSTERDEF,
__module__ = 'tensorflow.core.protobuf.tensorflow_server_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ClusterDef)
))
_sym_db.RegisterMessage(ClusterDef)
ServerDef = _reflection.GeneratedProtocolMessageType('ServerDef', (_message.Message,), dict(
DESCRIPTOR = _SERVERDEF,
__module__ = 'tensorflow.core.protobuf.tensorflow_server_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ServerDef)
))
_sym_db.RegisterMessage(ServerDef)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\032org.tensorflow.distruntimeB\014ServerProtosP\001\370\001\001'))
_JOBDEF_TASKSENTRY.has_options = True
_JOBDEF_TASKSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "a937b135a427ad09eb89c041d579230a",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 817,
"avg_line_length": 37.13191489361702,
"alnum_prop": 0.7110932844373138,
"repo_name": "ryfeus/lambda-packs",
"id": "cd66ddb5c07e06d8aa59121266644039c6af986e",
"size": "8845",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Keras_tensorflow/source/tensorflow/core/protobuf/tensorflow_server_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
import os
import re
import subprocess
from charmhelpers.core.hookenv import (
log,
INFO,
)
from charmhelpers.contrib.hardening.audits.file import (
FilePermissionAudit,
DirectoryPermissionAudit,
NoReadWriteForOther,
TemplatedFile,
)
from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get Apache hardening config audits.
:returns: dictionary of audits
"""
if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
log("Apache server does not appear to be installed on this node - "
"skipping apache hardening", level=INFO)
return []
context = ApacheConfContext()
settings = utils.get_settings('apache')
audits = [
FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root',
group='root', mode=0o0640),
TemplatedFile(os.path.join(settings['common']['apache_dir'],
'mods-available/alias.conf'),
context,
TEMPLATES_DIR,
mode=0o0755,
user='root',
service_actions=[{'service': 'apache2',
'actions': ['restart']}]),
TemplatedFile(os.path.join(settings['common']['apache_dir'],
'conf-enabled/hardening.conf'),
context,
TEMPLATES_DIR,
mode=0o0640,
user='root',
service_actions=[{'service': 'apache2',
'actions': ['restart']}]),
DirectoryPermissionAudit(settings['common']['apache_dir'],
user='root',
group='root',
mode=0o640),
DisabledModuleAudit(settings['hardening']['modules_to_disable']),
NoReadWriteForOther(settings['common']['apache_dir']),
]
return audits
class ApacheConfContext(object):
"""Defines the set of key/value pairs to set in a apache config file.
This context, when called, will return a dictionary containing the
key/value pairs of setting to specify in the
/etc/apache/conf-enabled/hardening.conf file.
"""
def __call__(self):
settings = utils.get_settings('apache')
ctxt = settings['hardening']
out = subprocess.check_output(['apache2', '-v'])
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
out).group(1)
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
ctxt['traceenable'] = settings['hardening']['traceenable']
return ctxt
|
{
"content_hash": "606841fa75bc81ade909f6edc930b737",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 76,
"avg_line_length": 34.726190476190474,
"alnum_prop": 0.5539938292766541,
"repo_name": "coreycb/charm-keystone",
"id": "51b636f77fd71bcfc4af15bedafdb4a669c016d8",
"size": "3497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "charmhelpers/contrib/hardening/apache/checks/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "589"
},
{
"name": "Python",
"bytes": "1174593"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""Support for EnOcean sensors."""
import logging
import voluptuous as vol
from homeassistant.components import enocean
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ID,
CONF_NAME,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
POWER_WATT,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_MAX_TEMP = "max_temp"
CONF_MIN_TEMP = "min_temp"
CONF_RANGE_FROM = "range_from"
CONF_RANGE_TO = "range_to"
DEFAULT_NAME = "EnOcean sensor"
DEVICE_CLASS_POWER = "powersensor"
SENSOR_TYPES = {
DEVICE_CLASS_HUMIDITY: {
"name": "Humidity",
"unit": "%",
"icon": "mdi:water-percent",
"class": DEVICE_CLASS_HUMIDITY,
},
DEVICE_CLASS_POWER: {
"name": "Power",
"unit": POWER_WATT,
"icon": "mdi:power-plug",
"class": DEVICE_CLASS_POWER,
},
DEVICE_CLASS_TEMPERATURE: {
"name": "Temperature",
"unit": TEMP_CELSIUS,
"icon": "mdi:thermometer",
"class": DEVICE_CLASS_TEMPERATURE,
},
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ID): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS, default=DEVICE_CLASS_POWER): cv.string,
vol.Optional(CONF_MAX_TEMP, default=40): vol.Coerce(int),
vol.Optional(CONF_MIN_TEMP, default=0): vol.Coerce(int),
vol.Optional(CONF_RANGE_FROM, default=255): cv.positive_int,
vol.Optional(CONF_RANGE_TO, default=0): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an EnOcean sensor device."""
dev_id = config.get(CONF_ID)
dev_name = config.get(CONF_NAME)
dev_class = config.get(CONF_DEVICE_CLASS)
if dev_class == DEVICE_CLASS_TEMPERATURE:
temp_min = config.get(CONF_MIN_TEMP)
temp_max = config.get(CONF_MAX_TEMP)
range_from = config.get(CONF_RANGE_FROM)
range_to = config.get(CONF_RANGE_TO)
add_entities(
[
EnOceanTemperatureSensor(
dev_id, dev_name, temp_min, temp_max, range_from, range_to
)
]
)
elif dev_class == DEVICE_CLASS_HUMIDITY:
add_entities([EnOceanHumiditySensor(dev_id, dev_name)])
elif dev_class == DEVICE_CLASS_POWER:
add_entities([EnOceanPowerSensor(dev_id, dev_name)])
class EnOceanSensor(enocean.EnOceanDevice):
"""Representation of an EnOcean sensor device such as a power meter."""
def __init__(self, dev_id, dev_name, sensor_type):
"""Initialize the EnOcean sensor device."""
super().__init__(dev_id, dev_name)
self._sensor_type = sensor_type
self._device_class = SENSOR_TYPES[self._sensor_type]["class"]
self._dev_name = "{} {}".format(
SENSOR_TYPES[self._sensor_type]["name"], dev_name
)
self._unit_of_measurement = SENSOR_TYPES[self._sensor_type]["unit"]
self._icon = SENSOR_TYPES[self._sensor_type]["icon"]
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._dev_name
@property
def icon(self):
"""Icon to use in the frontend."""
return self._icon
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
def value_changed(self, packet):
"""Update the internal state of the sensor."""
class EnOceanPowerSensor(EnOceanSensor):
"""Representation of an EnOcean power sensor.
EEPs (EnOcean Equipment Profiles):
- A5-12-01 (Automated Meter Reading, Electricity)
"""
def __init__(self, dev_id, dev_name):
"""Initialize the EnOcean power sensor device."""
super().__init__(dev_id, dev_name, DEVICE_CLASS_POWER)
def value_changed(self, packet):
"""Update the internal state of the sensor."""
if packet.rorg != 0xA5:
return
packet.parse_eep(0x12, 0x01)
if packet.parsed["DT"]["raw_value"] == 1:
# this packet reports the current value
raw_val = packet.parsed["MR"]["raw_value"]
divisor = packet.parsed["DIV"]["raw_value"]
self._state = raw_val / (10 ** divisor)
self.schedule_update_ha_state()
class EnOceanTemperatureSensor(EnOceanSensor):
"""Representation of an EnOcean temperature sensor device.
EEPs (EnOcean Equipment Profiles):
- A5-02-01 to A5-02-1B All 8 Bit Temperature Sensors of A5-02
- A5-10-01 to A5-10-14 (Room Operating Panels)
- A5-04-01 (Temp. and Humidity Sensor, Range 0°C to +40°C and 0% to 100%)
- A5-04-02 (Temp. and Humidity Sensor, Range -20°C to +60°C and 0% to 100%)
- A5-10-10 (Temp. and Humidity Sensor and Set Point)
- A5-10-12 (Temp. and Humidity Sensor, Set Point and Occupancy Control)
- 10 Bit Temp. Sensors are not supported (A5-02-20, A5-02-30)
For the following EEPs the scales must be set to "0 to 250":
- A5-04-01
- A5-04-02
- A5-10-10 to A5-10-14
"""
def __init__(self, dev_id, dev_name, scale_min, scale_max, range_from, range_to):
"""Initialize the EnOcean temperature sensor device."""
super().__init__(dev_id, dev_name, DEVICE_CLASS_TEMPERATURE)
self._scale_min = scale_min
self._scale_max = scale_max
self.range_from = range_from
self.range_to = range_to
def value_changed(self, packet):
"""Update the internal state of the sensor."""
if packet.data[0] != 0xA5:
return
temp_scale = self._scale_max - self._scale_min
temp_range = self.range_to - self.range_from
raw_val = packet.data[3]
temperature = temp_scale / temp_range * (raw_val - self.range_from)
temperature += self._scale_min
self._state = round(temperature, 1)
self.schedule_update_ha_state()
class EnOceanHumiditySensor(EnOceanSensor):
"""Representation of an EnOcean humidity sensor device.
EEPs (EnOcean Equipment Profiles):
- A5-04-01 (Temp. and Humidity Sensor, Range 0°C to +40°C and 0% to 100%)
- A5-04-02 (Temp. and Humidity Sensor, Range -20°C to +60°C and 0% to 100%)
- A5-10-10 to A5-10-14 (Room Operating Panels)
"""
def __init__(self, dev_id, dev_name):
"""Initialize the EnOcean humidity sensor device."""
super().__init__(dev_id, dev_name, DEVICE_CLASS_HUMIDITY)
def value_changed(self, packet):
"""Update the internal state of the sensor."""
if packet.rorg != 0xA5:
return
humidity = packet.data[2] * 100 / 250
self._state = round(humidity, 1)
self.schedule_update_ha_state()
|
{
"content_hash": "54c9979aae9146eca90c22f42ac8091c",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 85,
"avg_line_length": 33.199074074074076,
"alnum_prop": 0.6148375400920374,
"repo_name": "joopert/home-assistant",
"id": "2e6b5bdb986da638ee3a69070fffcdad88b1b653",
"size": "7179",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/enocean/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
""" self.run with
python setup.py install; nosetests -v --nocapture tests/cm_basic/test_face.py:Test_face.test_001
nosetests -v --nocapture tests/cm_basic/test_face.py
or
nosetests -v tests/cm_basic/test_face.py
"""
from __future__ import print_function
from cloudmesh_client.common.Shell import Shell
from cloudmesh_client.util import HEADING
from cloudmesh_client.util import banner
from cloudmesh_client.common.dotdict import dotdict
# noinspection PyMethodMayBeStatic,PyMethodMayBeStatic,PyPep8Naming
class Test_face(object):
""" """
data = dotdict({
"name": "ABC"
})
def run(self, command):
command = command.format(**self.data)
banner(command, c="-")
print(command)
parameter = command.split(" ")
shell_command = parameter[0]
args = parameter[1:]
result = Shell.execute(shell_command, args)
print(result)
return str(result)
def setup(self):
pass
def test_001(self):
HEADING("simple test")
result = self.run("echo {name}")
assert data.name in result
|
{
"content_hash": "0c0b4ae90844b31c56ce5b73172a6252",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 23.229166666666668,
"alnum_prop": 0.6502242152466368,
"repo_name": "cloudmesh/ansible-cloudmesh-face",
"id": "10da33d252e91e153532a234f16eac4cf6f3058f",
"size": "1115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_face.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4296"
},
{
"name": "Shell",
"bytes": "6700"
}
],
"symlink_target": ""
}
|
"""IOThomeinspector URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from two_factor.admin import AdminSiteOTPRequired
from django.contrib.auth.views import (
logout,
password_reset,
password_reset_done,
password_reset_confirm,
password_reset_complete)
from django.views.static import serve
from django.conf.urls.static import static
from IOThomeinspector.views import HomePageView, team_view
from two_factor.urls import LoginView, SetupView
admin.site.__class__ = AdminSiteOTPRequired
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', HomePageView, name='home'),
url(r'', include('two_factor.urls', 'two_factor')),
url(
regex=r'^account/login/$',
view=LoginView.as_view(),
name='login',
),
url(
regex=r'^account/two_factor/setup/$',
view=SetupView.as_view(),
name='setup',
),
url(r'^registration/', include('registration.backends.hmac.urls')),
url(r'^logout/$', logout, name='logout'),
url(r'^profile/', include('userprofile.urls')),
url(r'^user/password/reset/$',
password_reset,
{'post_reset_redirect': '/user/password/reset/done/'},
name="password_reset"),
url(r'^user/password/reset/done/$',
password_reset_done),
url(r'^user/password/reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$',
password_reset_confirm,
{'post_reset_redirect': '/user/password/done/'},
name='password_reset_confirm'),
url(r'^user/password/done/$',
password_reset_complete),
url(r'^team/$', team_view, name='team')
]
|
{
"content_hash": "e674a577297b5160c109bc1cd4a6b2ed",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 36.285714285714285,
"alnum_prop": 0.6618547681539807,
"repo_name": "rveeblefetzer/IOT-home-inspector",
"id": "7c0f5d7a07bb3682e698cbd022f52f9c41f8e910",
"size": "2286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IOThomeinspector/IOThomeinspector/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6415"
},
{
"name": "HTML",
"bytes": "26044"
},
{
"name": "Python",
"bytes": "52128"
}
],
"symlink_target": ""
}
|
import logging
import os
import re
import sys
import time
import liveandletdie
import pytest
import requests
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import (
NoSuchElementException,
TimeoutException,
UnexpectedAlertPresentException,
WebDriverException,
)
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from authomatic.six.moves.urllib import parse
from tests.functional_tests import fixtures
import constants
from tests.functional_tests import config
requests.packages.urllib3.disable_warnings()
ME = os.path.dirname(__file__)
VIRTUALENV_NAME = os.path.basename(os.environ.get('VIRTUAL_ENV', ''))
LOG_PATH = os.path.join(ME, 'login-py{0}{1}.log'.format(sys.version_info[0],
sys.version_info[1]))
PROJECT_DIR = os.path.abspath(os.path.join(ME, '../..'))
EXAMPLES_DIR = os.path.join(PROJECT_DIR, 'examples')
PROVIDERS = sorted([(k, v) for k, v in fixtures.ASSEMBLED_CONFIG.items()
if k in config.INCLUDE_PROVIDERS])
PROVIDERS_IDS = [k for k, v in PROVIDERS]
PROVIDER_NAME_WIDTH = len(max(PROVIDERS_IDS, key=lambda x: len(x)))
# CHECK_URL = 'https://authomatic.com'
ALL_APPS = {
'Django': liveandletdie.Django(
os.path.join(EXAMPLES_DIR, 'django/functional_test'),
host=config.HOST,
port=config.PORT,
check_url=config.HOST_ALIAS,
),
'Flask': liveandletdie.Flask(
os.path.join(EXAMPLES_DIR, 'flask/functional_test/main.py'),
host=config.HOST,
port=config.PORT,
check_url=config.HOST_ALIAS,
ssl=True,
),
'Pyramid': liveandletdie.WsgirefSimpleServer(
os.path.join(EXAMPLES_DIR, 'pyramid/functional_test/main.py'),
host=config.HOST,
port=config.PORT,
check_url=config.HOST_ALIAS
),
}
APPS = dict((k, v) for k, v in ALL_APPS.items() if
k.lower() in config.INCLUDE_FRAMEWORKS)
file_handler = logging.FileHandler(LOG_PATH, mode='w')
file_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', '%x %X'))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
def teardown_module():
if hasattr(config, 'teardown') and hasattr(config.teardown, '__call__'):
config.teardown()
def log(indent, provider_name, message):
tab_width = 4
logger.info(u'({venv}) {provider: <{padding}}{indent}{message}'.format(
venv=VIRTUALENV_NAME,
provider=provider_name,
padding=PROVIDER_NAME_WIDTH + 3,
indent=u' ' * tab_width * indent,
message=message
))
@pytest.fixture('module')
def browser(request):
"""Starts and stops the server for each app in APPS"""
_browser = config.get_browser()
_browser.set_window_size(800, 600)
_browser.set_window_position(1024 - 800 - 10, 40)
# _browser.implicitly_wait(4)
request.addfinalizer(lambda: _browser.quit())
return _browser
@pytest.fixture('module', APPS)
def app(request):
"""Starts and stops the server for each app in APPS"""
_app = APPS[request.param]
_app.name = request.param
try:
# Run the live server.
_app.live(kill_port=True)
except Exception as e:
# Skip test if not started.
pytest.exit(e.message)
request.addfinalizer(lambda: _app.die())
return _app
def login(request, browser, app, attempt=1):
"""Runs for each provider."""
success = False
provider_name, provider = request.param
log(1, provider_name, 'Attempt {0}'.format(attempt))
def wait(indent, seconds):
seconds = seconds or 0
seconds = seconds * config.WAIT_MULTIPLIER
if seconds < config.MIN_WAIT:
seconds = config.MIN_WAIT
if seconds:
log(indent, provider_name, u'(waiting {0} seconds)'.format(seconds))
# log(0, provider_name, u' waiting {0} seconds '
# .format(seconds).center(60, '#'))
time.sleep(seconds)
def human_interaction_needed(xpath, seconds=0):
log(2, provider_name, 'Checking if human interaction is needed')
try:
wait(2, seconds)
el = browser.find_element_by_xpath(xpath)
if el.is_displayed():
print('Human interaction is needed (captcha or similar)!')
print('Go to the browser, do the interaction and hit "c".')
if os.environ.get('TRAVIS'):
message = ('Human interaction needed, '
'but not possible on Travis CI!')
log(3, provider_name, message)
pytest.fail(message)
return
log(3, provider_name, 'Entering PDB for human interaction')
import pdb; pdb.set_trace()
log(3, provider_name, 'Returned from PDB')
return
except NoSuchElementException:
pass
log(3, provider_name, 'Not needed')
try:
provider['name'] = provider_name
conf = fixtures.get_configuration(provider_name)
# Andy types the login handler url to the address bar.
url = parse.urljoin(app.check_url, 'login/' + provider['_path'])
# Andy authenticates by the provider.
login_url = provider.get('login_url')
login_xpath = provider.get('login_xpath')
password_xpath = provider.get('password_xpath')
pre_login_xpaths = provider.get('pre_login_xpaths')
# Go to login URL to log in
if login_url:
log(2, provider_name, 'Going to login URL: {0}'.format(login_url))
browser.get(login_url)
else:
browser.get(url)
# Handle alerts
try:
alert_wait = provider.get('alert_wait_seconds', 0)
WebDriverWait(browser, alert_wait)\
.until(expected_conditions.alert_is_present())
if alert_wait:
log(2, provider_name, 'Waiting {0} seconds for alert'
.format(alert_wait))
alert = browser.switch_to_alert()
log(2, provider_name, 'Accepting alert: {0}'.format(alert.text))
alert.accept()
except TimeoutException:
pass
# Pause for getting login and password xpaths
if request.config.getoption("--pause"):
log(2, provider_name, 'Pausing to pdb')
import pdb; pdb.set_trace()
if login_xpath:
if pre_login_xpaths:
for xpath in pre_login_xpaths:
log(2, provider_name,
'Finding pre-login element {0}'.format(xpath))
pre_login = browser.find_element_by_xpath(xpath)
log(3, provider_name,
'Clicking on pre-login element'.format(xpath))
pre_login.click()
log(2, provider_name, 'Finding login input {0}'.format(login_xpath))
login_element = browser.find_element_by_xpath(login_xpath)
log(3, provider_name, 'Filling out login')
login_element.send_keys(conf.user_login)
enter_after_login_input = provider.get('enter_after_login_input')
if enter_after_login_input:
log(3, provider_name, 'Hitting ENTER after login input')
login_element.send_keys(Keys.ENTER)
hi = provider.get('human_interaction_before_password')
if hi:
human_interaction_needed(*hi)
log(2, provider_name,
'Finding password input {0}'.format(password_xpath))
password_element = browser.find_element_by_xpath(password_xpath)
log(3, provider_name, 'Filling out password')
password_element.send_keys(conf.user_password)
wait(2, provider.get('before_login_enter_wait'))
log(2, provider_name, 'Hitting ENTER')
password_element.send_keys(Keys.ENTER)
wait(2, provider.get('after_login_wait_seconds'))
if login_url:
# Return back from login URL
log(2, provider_name, 'Going back from login URL to: {0}'
.format(url))
browser.get(url)
# Andy authorizes this app to access his protected resources.
consent_xpaths = provider.get('consent_xpaths')
if consent_xpaths:
for xpath in consent_xpaths:
try:
wait(2, provider.get('consent_wait_seconds'))
log(2, provider_name,
'Finding consent button {0}'.format(xpath))
button = browser.find_element_by_xpath(xpath)
log(3, provider_name, 'Clicking consent button')
button.click()
except NoSuchElementException as e:
log(3, provider_name,
'Consent button not found! '
'(provider probably remembers consent)')
wait(2, provider.get('after_consent_wait_seconds'))
try:
log(2, provider_name, 'Finding result element')
browser.find_element_by_id('login-result')
log(3, provider_name, 'Result element found')
success = True
except NoSuchElementException:
log(3, provider_name, 'Result element not found!')
except WebDriverException as e:
if request.config.getoption('--login-error-pdb'):
log(2, provider_name, 'Entering PDB session')
import pdb; pdb.set_trace()
try:
log(2, provider_name,
'Finding result element after error {0}'.format(e.msg))
browser.find_element_by_id('login-result')
log(3, provider_name, 'Result element found')
success = True
except NoSuchElementException:
log(3, provider_name, 'Result element not found!')
if success:
log(0, provider_name, 'SUCCESS')
else:
if attempt < config.MAX_LOGIN_ATTEMPTS:
login(request, browser, app, attempt + 1)
else:
log(1, provider_name,
'Giving up after {0} attempts!'.format(attempt))
# import pdb; pdb.set_trace()
pytest.fail('Login by provider "{0}" failed!'.format(provider_name))
return provider
@pytest.fixture(scope='module', params=PROVIDERS, ids=PROVIDERS_IDS)
def provider(request, browser, app):
provider_name, provider = request.param
logout_url = provider.get('logout_url')
if logout_url:
log(0, provider_name, 'Logging out at {0}'.format(logout_url))
browser.get(logout_url)
cookies = browser.get_cookies()
if cookies:
log(0, provider_name, 'Deleting {0} cookies'.format(len(cookies)))
browser.delete_all_cookies()
log(0, request.param[0], 'Logging in')
return login(request, browser, app)
class Base(object):
def skip_if_openid(self, provider):
if provider.get('openid_identifier'):
pytest.skip("OpenID provider has no credentials.")
class TestCredentials(Base):
@pytest.fixture()
def fixture(self, app, provider, browser):
self.skip_if_openid(provider)
def f(property_name, coerce=None):
id_ = 'original-credentials-{0}'.format(property_name)
value = browser.find_element_by_id(id_).text or None
expected = provider['credentials'][property_name]
if expected is True:
assert value
else:
try:
unicode
except NameError:
class unicode(object): pass
if coerce is not None and isinstance(expected, (str, unicode)):
expected = coerce(expected)
value = coerce(value)
assert value == expected
return f
def test_refresh_response(self, fixture):
# status = browser.find_element_by_id('refresh-status').text
# assert status == '200'
fixture('refresh_status')
def test_token_type(self, fixture):
fixture('token_type')
def test_provider_type_id(self, fixture):
fixture('provider_type_id')
def test__expiration_time(self, fixture):
fixture('_expiration_time', float)
def test_consumer_key(self, fixture):
fixture('consumer_key')
def test_provider_id(self, fixture):
fixture('provider_id')
def test_consumer_secret(self, fixture):
fixture('consumer_secret')
def test_token(self, fixture):
fixture('token')
def test_token_secret(self, fixture):
fixture('token_secret')
def test__expire_in(self, fixture):
fixture('_expire_in', float)
def test_provider_name(self, fixture):
fixture('provider_name')
def test_refresh_token(self, fixture):
fixture('refresh_token')
def test_provider_type(self, fixture):
fixture('provider_type')
class TestCredentialsChange(Base):
@pytest.fixture()
def fixture(self, app, provider, browser):
self.skip_if_openid(provider)
refresh_status = browser.find_element_by_id('original-credentials-'
'refresh_status').text
supports_refresh = refresh_status != \
constants.CREDENTIALS_REFRESH_NOT_SUPPORTED
def f(property_name, coerce=None):
if not supports_refresh:
pytest.skip("Doesn't support credentials refresh.")
changed_values = provider.get('credentials_refresh_change')
if not changed_values:
pytest.skip("Credentials refresh values not specified.")
else:
original_id = 'original-credentials-{0}'.format(property_name)
changed_id = 'refreshed-credentials-{0}'.format(property_name)
original_val = browser.find_element_by_id(original_id).text\
or None
changed_val = browser.find_element_by_id(changed_id).text\
or None
if coerce is not None:
original_val = coerce(original_val)
changed_val = coerce(changed_val)
expected = changed_values[property_name]
if expected is not None:
assert (original_val == changed_val) is expected
return f
def test_token_type(self, fixture):
fixture('token_type')
def test_provider_type_id(self, fixture):
fixture('provider_type_id')
def test__expiration_time(self, fixture):
fixture('_expiration_time', float)
def test_consumer_key(self, fixture):
fixture('consumer_key')
def test_provider_id(self, fixture):
fixture('provider_id')
def test_consumer_secret(self, fixture):
fixture('consumer_secret')
def test_token(self, fixture):
fixture('token')
def test_token_secret(self, fixture):
fixture('token_secret')
def test__expire_in(self, fixture):
fixture('_expire_in', float)
def test_provider_name(self, fixture):
fixture('provider_name')
def test_refresh_token(self, fixture):
fixture('refresh_token')
def test_provider_type(self, fixture):
fixture('provider_type')
class TestUser(Base):
@pytest.fixture()
def fixture(self, app, provider, browser):
def f(property_name):
value = browser.find_element_by_id(property_name).text or None
expected = provider['user'][property_name]
if isinstance(expected, type(re.compile(''))):
assert expected.match(value)
else:
assert value == expected
return f
def test_id(self, fixture):
fixture('id')
def test_email(self, fixture):
fixture('email')
def test_username(self, fixture):
fixture('username')
def test_name(self, fixture):
fixture('name')
def test_first_name(self, fixture):
fixture('first_name')
def test_last_name(self, fixture):
fixture('last_name')
def test_nickname(self, fixture):
fixture('nickname')
def test_birth_date(self, fixture):
fixture('birth_date')
def test_city(self, fixture):
fixture('city')
def test_country(self, fixture):
fixture('country')
def test_gender(self, fixture):
fixture('gender')
def test_link(self, fixture):
fixture('link')
def test_locale(self, fixture):
fixture('locale')
def test_location(self, fixture):
fixture('location')
def test_phone(self, fixture):
fixture('phone')
def test_picture(self, fixture):
fixture('picture')
def test_postal_code(self, fixture):
fixture('postal_code')
def test_timezone(self, fixture):
fixture('timezone')
def test_content_should_contain(self, app, provider, browser):
self.skip_if_openid(provider)
content = browser.find_element_by_id('content').text
for item in provider['content_should_contain']:
assert item in content
def test_content_should_not_contain(self, app, provider, browser):
self.skip_if_openid(provider)
content = browser.find_element_by_id('content').text.lower()
for item in provider['content_should_not_contain']:
if item:
assert item.lower() not in content
def test_provider_support(self, app, provider):
self.skip_if_openid(provider)
sua = provider['class_'].supported_user_attributes
tested = dict((k, getattr(sua, k)) for k in sua._fields)
expected = dict((k, bool(v)) for k, v in provider['user'].items() if
k is not 'content')
assert tested == expected
|
{
"content_hash": "ede0133fb96bbb40c36ca85f6c175b5b",
"timestamp": "",
"source": "github",
"line_count": 563,
"max_line_length": 80,
"avg_line_length": 32.18827708703375,
"alnum_prop": 0.5911599161240482,
"repo_name": "dougchestnut/authomatic",
"id": "6b285942003ba4e4e6d78617a9d9643be9082bc7",
"size": "18141",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tests/functional_tests/test_providers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "308736"
},
{
"name": "CoffeeScript",
"bytes": "26033"
},
{
"name": "HTML",
"bytes": "15525"
},
{
"name": "JavaScript",
"bytes": "4916"
},
{
"name": "Python",
"bytes": "448568"
},
{
"name": "Ruby",
"bytes": "655"
},
{
"name": "Shell",
"bytes": "1898"
}
],
"symlink_target": ""
}
|
u"""
Send a SOAP request to the :term: `BCSS` server
using Gaëtan Delannay's :term:`appy` toolkit.
test4 : Using a class AnyMarshaller and a slightly
modified `xml_parser.py` to support ANY elements.
"""
import sys
from cStringIO import StringIO
import logging
logger = logging.getLogger(__name__)
from appy import Object
from appy.shared.dav import Resource
from appy.shared.xml_parser import XmlUnmarshaller, XmlMarshaller
from xml.dom.minidom import parseString
#~ from django.conf import settings
# simulate a Django `settings` module:
settings = Object(SITE=Object(
bcss_user_params=dict(
UserID='123456',
Email='info@exemple.be',
OrgUnit='0123456',
MatrixID=17,
MatrixSubID=1)))
class AnyMarshaller(XmlMarshaller):
"""
An XmlMarshaller who expects an attribute `_any` on its
root instance which is expected to contain a string to be
written after the other child elements.
"""
fieldsToExclude = ['_any']
def marshallSpecificElements(self, instance, res):
res.write(instance._any)
def assert_equivalent(xs1, xs2):
print "xs1: -------------------"
print xs1
print "xs2: -------------------"
print xs2
print "-------------------"
dom1 = parseString(xs1)
dom2 = parseString(xs2)
if dom1 != dom2:
open('s1.xml', 'w').write(xs1)
open('s2.xml', 'w').write(xs2)
sys.exit()
def main():
allocationReq1 = """<ns1:AllocationRequest xmlns:ns1="http://www.smals.be/XSD/OCMW_CPAS/HeatingAllocationRequest" xmlns:com="http://www.smals.be/XSD/OCMW_CPAS/HeatingAllocationDataTypes"><ns1:ComputeAllocation><ns1:PrimaryBeneficiary><ns1:SSIN>67031703807</ns1:SSIN><ns1:Category>1</ns1:Category></ns1:PrimaryBeneficiary><ns1:Invoice><com:Amount>8390100</com:Amount><com:Quantity>1000</com:Quantity><com:HousingNumber>1</com:HousingNumber><com:DeliveryDate>2011-09-19</com:DeliveryDate><com:UnitFuel>3</com:UnitFuel></ns1:Invoice></ns1:ComputeAllocation><ns1:OCMW_CPAS><ns1:KboBceNumber>0212344876</ns1:KboBceNumber></ns1:OCMW_CPAS></ns1:AllocationRequest>"""
ssin = '67031703807'
allocationReq = Object(
ComputeAllocation=Object(
Invoice=Object(
Amount="8390100",
Quantity=1000,
HousingNumber=1,
DeliveryDate="2011-09-19",
UnitFuel=3),
PrimaryBeneficiary=Object(SSIN=ssin, Category="1"),
),
OCMW_CPAS=Object(KboBceNumber='0212344876')
)
ns = dict(
ns1="http://www.smals.be/XSD/OCMW_CPAS/HeatingAllocationRequest",
com="http://www.smals.be/XSD/OCMW_CPAS/HeatingAllocationDataTypes")
nst = dict()
nst.update(AllocationRequest='ns1')
nst.update(ComputeAllocation='ns1')
nst.update(OCMW_CPAS='ns1')
nst.update(KboBceNumber='ns1')
nst.update(Invoice='ns1')
nst.update(Quantity='com')
nst.update(HousingNumber='com')
nst.update(DeliveryDate='com')
nst.update(UnitFuel='com')
m = XmlMarshaller(namespaces=ns, namespacedTags=nst,
dumpXmlPrologue=False, rootTag="AllocationRequest")
allocationReq2 = m.marshall(allocationReq)
#~ assert_equivalent(allocationReq1,allocationReq2)
contenu1 = """<SSDNRequest xmlns="http://www.ksz-bcss.fgov.be/XSD/SSDN/Service">
<RequestContext><AuthorizedUser><UserID>00901732883</UserID><Email>info@oshz.eupen.net</Email><OrgUnit>0212344876</OrgUnit><MatrixID>17</MatrixID><MatrixSubID>1</MatrixSubID></AuthorizedUser><Message><Reference>630230001126766</Reference><TimeRequest>20110921T105230</TimeRequest></Message></RequestContext><ServiceRequest><ServiceId>OCMWCPASHeatingAllocation</ServiceId><Version>20090409</Version>
%s
</ServiceRequest></SSDNRequest>""" % allocationReq1
ssdnReq = Object(
RequestContext=Object(
AuthorizedUser=Object(**settings.SITE.bcss_user_params),
Message=Object(Reference='630230001126766',
TimeRequest='20110921T105230')
),
ServiceRequest=Object(
ServiceId="OCMWCPASHeatingAllocation",
Version="20090409"
),
_any=allocationReq2
)
m = AnyMarshaller(dumpXmlPrologue=False,
rootTag='SSDNRequest',
defaultNamespace="http://www.ksz-bcss.fgov.be/XSD/SSDN/Service")
contenu2 = m.marshall(ssdnReq)
assert_equivalent(contenu1, contenu2)
body = Object(
#~ xmlString="<![CDATA[%s]]>" % contenu)
xmlString=contenu2)
raise Exception("ok jusqu'ici")
server = Resource(
'https://bcssksz-services-test.smals.be/connectors/webservice/KSZBCSSWebServiceConnectorPort', measure=True)
res = server.soap(
body, namespace="http://ksz-bcss.fgov.be/connectors/WebServiceConnector")
print res.code
print res.data
s = str(res.data.xmlString)
#~ s = res.data.xmlString.replace('"UTF-8"','"utf-8"')
#~ s = s.replace('?>','?>\n')
print s
reply = XmlUnmarshaller().parse(s)
import pdb
pdb.set_trace()
if __name__ == '__main__':
main()
|
{
"content_hash": "b3c47e5e318528a5bf0e4aa57716c550",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 663,
"avg_line_length": 34.25333333333333,
"alnum_prop": 0.6584274036590113,
"repo_name": "lino-framework/lino",
"id": "60cfc184ae0bba372e56343b3b4f9f76913b249e",
"size": "5163",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lino/sandbox/bcss/test4.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "704"
},
{
"name": "CSS",
"bytes": "1281825"
},
{
"name": "Emacs Lisp",
"bytes": "277895"
},
{
"name": "HTML",
"bytes": "928037"
},
{
"name": "Hack",
"bytes": "3416"
},
{
"name": "JavaScript",
"bytes": "1128493"
},
{
"name": "PHP",
"bytes": "53997"
},
{
"name": "Python",
"bytes": "2601694"
},
{
"name": "Shell",
"bytes": "4469"
},
{
"name": "TSQL",
"bytes": "2427"
}
],
"symlink_target": ""
}
|
import json
import os
from django.conf import settings
from django.core.files.storage import default_storage as storage
from mock import patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.tests import assert_required, formset, initial
from amo.urlresolvers import reverse
from addons.models import BlacklistedSlug
from applications.models import AppVersion
from devhub.views import packager_path
class TestPackager(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'base/appversion',
'base/addon_3615']
def setUp(self):
self.url = reverse('devhub.package_addon')
ctx = self.client.get(self.url).context['compat_forms']
self.compat_form = initial(ctx.initial_forms[1])
def _form_data(self, data={}, compat_forms=None):
"""Build the initial data set for the form."""
initial_data = {'author_name': 'author',
'contributors': '',
'description': '',
'name': 'My Addon',
'package_name': 'my_addon',
'id': 'foo@bar.com',
'version': '1.2.3'}
if not compat_forms:
compat_forms = [self.compat_form]
initial_data.update(formset(*compat_forms))
if data:
initial_data.update(data)
return initial_data
def test_login_optional(self):
eq_(self.client.get(self.url).status_code, 200)
self.client.login(username='regular@mozilla.com', password='password')
eq_(self.client.get(self.url).status_code, 200)
def test_form_initial(self):
"""Ensure that the initial forms for each application are present."""
r = self.client.get(self.url)
eq_(r.status_code, 200)
rows = pq(r.content)('#supported-apps li.row')
classes = [a.short for a in amo.APP_USAGE]
eq_(rows.length, len(classes))
for app_class, label in zip(classes, rows('label.app')):
assert pq(label).hasClass(app_class), (
'Label for application %r not found' % app_class)
def test_success(self):
"""
Test that a proper set of data will pass validation, pass through
to the success view, and check if the .zip file exists.
"""
self.compat_form = {'enabled': 'on', 'min_ver': '86', 'max_ver': '114'}
data = self._form_data()
pkg_name = data['package_name']
r = self.client.post(self.url, data, follow=True)
self.assertRedirects(r, reverse('devhub.package_addon_success',
args=[pkg_name]), 302)
eq_(r.status_code, 200)
d = pq(r.content)('#packager-download')
eq_(d.attr('data-downloadurl'),
reverse('devhub.package_addon_json', args=[pkg_name]))
assert storage.exists(packager_path(pkg_name)), (
'Package was not created.')
pkg = self.client.get(reverse('devhub.package_addon_download',
args=[pkg_name]))
eq_(pkg.status_code, 200)
eq_(pkg['content-type'], 'application/zip')
eq_(pkg[settings.XSENDFILE_HEADER], packager_path(pkg_name))
def test_name_required(self):
r = self.client.post(self.url, self._form_data({'package_name': ''}))
self.assertFormError(r, 'basic_form', 'package_name',
'This field is required.')
def test_name_trademarks(self):
"""Test that the add-on name cannot contain Mozilla trademarks."""
r = self.client.post(self.url, self._form_data({'name': 'Mozilla <3'}))
self.assertFormError(r, 'basic_form', 'name',
'Add-on names should not contain Mozilla trademarks.')
def test_name_taken(self):
"""Test that the add-on name is not already taken."""
data = self._form_data({'name': 'Delicious Bookmarks'})
r = self.client.post(self.url, data)
self.assertFormError(r, 'basic_form', 'name',
'This name is already in use. Please choose another.')
def test_name_minlength(self):
data = self._form_data({'name': 'abcd'})
r = self.client.post(self.url, data)
self.assertFormError(r, 'basic_form', 'name',
'Ensure this value has at least 5 characters (it has 4).')
def test_name_maxlength(self):
data = self._form_data({'name': 'x' * 51})
r = self.client.post(self.url, data)
self.assertFormError(r, 'basic_form', 'name',
'Ensure this value has at most 50 characters (it has 51).')
def test_package_name_required(self):
r = self.client.post(self.url, self._form_data({'package_name': ''}))
self.assertFormError(r, 'basic_form', 'package_name',
'This field is required.')
def test_package_name_minlength(self):
data = self._form_data({'package_name': 'abcd'})
r = self.client.post(self.url, data)
self.assertFormError(r, 'basic_form', 'package_name',
'Ensure this value has at least 5 characters (it has 4).')
def test_package_name_maxlength(self):
data = self._form_data({'package_name': 'x' * 51})
r = self.client.post(self.url, data)
self.assertFormError(r, 'basic_form', 'package_name',
'Ensure this value has at most 50 characters (it has 51).')
def test_package_name_format(self):
error = ('Enter a valid package name consisting of letters, numbers, '
'or underscores.')
r = self.client.post(self.url,
self._form_data({'package_name': 'addon name'}))
self.assertFormError(r, 'basic_form', 'package_name', error)
r = self.client.post(self.url,
self._form_data({'package_name': 'addon-name'}))
self.assertFormError(r, 'basic_form', 'package_name', error)
def test_package_name_uppercase(self):
r = self.client.post(self.url,
self._form_data({'package_name': 'ADDON_NAME'}))
eq_(r.context['basic_form'].errors, {})
def test_package_name_taken(self):
r = self.client.post(self.url,
self._form_data({'package_name': 'a3615'}))
self.assertFormError(r, 'basic_form', 'package_name',
'This package name is already in use.')
def test_package_name_blacklisted(self):
BlacklistedSlug.objects.create(name='slap_tickle')
r = self.client.post(self.url,
self._form_data({'package_name': 'slap_tickle'}))
self.assertFormError(r, 'basic_form', 'package_name',
'The package name cannot be: slap_tickle.')
def test_version(self):
"""Test that the add-on version is properly validated."""
r = self.client.post(self.url,
self._form_data({'version': 'invalid version'}))
self.assertFormError(r, 'basic_form', 'version',
'The version string is invalid.')
def test_id(self):
"""Test that the add-on id is properly validated."""
r = self.client.post(self.url, self._form_data({'id': 'invalid id'}))
self.assertFormError(
r, 'basic_form', 'id',
'The add-on ID must be a UUID string or an email address.')
def test_firefox_required(self):
"""Ensure that at least one target application is required."""
self.compat_form = {}
r = self.client.post(self.url, self._form_data())
eq_(r.context['compat_forms'].non_form_errors(),
['Firefox is a required target application.'])
def test_enabled_apps_version_required(self):
"""Min/Max Version fields should be required for enabled apps."""
forms = [self.compat_form, {'enabled': 'on'}]
r = self.client.post(self.url, self._form_data(compat_forms=forms))
assert_required(r.context['compat_forms'].errors[1]['min_ver'][0])
assert_required(r.context['compat_forms'].errors[1]['max_ver'][0])
def test_version_order(self):
"""Test that the min version is lte the max version."""
self.compat_form['enabled'] = 'on'
self.compat_form['min_ver'] = '114'
self.compat_form['max_ver'] = '86'
r = self.client.post(self.url, self._form_data())
eq_(r.context['compat_forms'].errors[0]['__all__'][0],
'Min version must be less than Max version.')
@patch.object(amo, 'DEFAULT_MINVER', '3.6')
def test_default_firefox_minver(self):
eq_(len(AppVersion.objects.filter(application__id=amo.FIREFOX.id,
version='3.6')), 1)
r = self.client.get(self.url)
eq_(r.status_code, 200)
s = pq(r.content)('select#id_form-0-min_ver option[selected]').text()
eq_(s, '3.6')
@patch.object(amo, 'DEFAULT_MINVER', '999.0')
def test_no_default_firefox_minver(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
s = pq(r.content)('select#id_form-0-min_ver option[selected]').text()
assert s != '3.6', (
'The Firefox minVer default should not be set on POST.')
@patch.object(amo, 'DEFAULT_MINVER', '3.6')
def test_no_default_firefox_minver_on_post(self):
self.compat_form['min_ver'] = '114'
r = self.client.post(self.url, self._form_data())
s = pq(r.content)('select#id_form-0-min_ver option[selected]').text()
assert s != '3.6', (
'The Firefox minVer default should not be set on POST.')
class TestPackagerDownload(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'base/appversion',
'base/addon_3615']
def setUp(self):
self.url = lambda f: reverse('devhub.package_addon_json', args=[f])
def _prep_mock_package(self, name):
"""Prep a fake package to be downloaded."""
path = packager_path(name)
with storage.open(path, mode='w') as package:
package.write('ready')
return path
def _unprep_package(self, name):
package = packager_path(name)
if storage.exists(package):
storage.delete(package)
def test_package_pending(self):
"""
Test that an unavailable message is returned when the file isn't ready
to be downloaded yet.
"""
self._unprep_package('foobar')
r = self.client.get(self.url('foobar'))
# Ensure a deleted file returns an empty message.
eq_(r.content, 'null')
def test_package_success(self):
"""Ensure a completed file returns the file data."""
dst = self._prep_mock_package('foobar')
r = self.client.get(self.url('foobar'))
data = json.loads(r.content)
# Size in kB.
eq_(data['size'], round(storage.open(dst).size / 1024, 1))
eq_(data['filename'], os.path.basename(dst))
eq_(data['download_url'], reverse('devhub.package_addon_download',
args=['foobar']))
assert data['download_url'].endswith('.zip'), (
'Expected filename to end with .zip.')
pkg = self.client.get(data['download_url'])
eq_(pkg.status_code, 200)
eq_(pkg['content-type'], 'application/zip')
eq_(pkg[settings.XSENDFILE_HEADER], dst)
self._unprep_package('foobar')
def test_login_optional(self):
self._prep_mock_package('foobar')
url = self.url('foobar')
eq_(self.client.get(url).status_code, 200)
self.client.login(username='regular@mozilla.com', password='password')
eq_(self.client.get(url).status_code, 200)
self._unprep_package('foobar')
|
{
"content_hash": "7fb4e85b944c8206ac82bdfd3d0b7dc5",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 79,
"avg_line_length": 41.48601398601399,
"alnum_prop": 0.5808680994521702,
"repo_name": "clouserw/olympia",
"id": "d735f0763d6f0a7251e6588091bf7ce0d4a8486b",
"size": "11865",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "apps/devhub/tests/test_views_packager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
""" Rally command: info
Samples:
$ rally info find create_meter_and_get_stats
CeilometerStats.create_meter_and_get_stats (benchmark scenario).
Test creating a meter and fetching its statistics.
Meter is first created and then statistics is fetched for the same
using GET /v2/meters/(meter_name)/statistics.
Parameters:
- name_length: length of generated (random) part of meter name
- kwargs: contains optional arguments to create a meter
$ rally info find Authenticate
Authenticate (benchmark scenario group).
This class should contain authentication mechanism.
Benchmark scenarios:
---------------------------------------------------------
Name Description
---------------------------------------------------------
Authenticate.keystone
Authenticate.validate_cinder Check Cinder Client ...
Authenticate.validate_glance Check Glance Client ...
Authenticate.validate_heat Check Heat Client ...
$ rally info find some_non_existing_benchmark
Failed to find any docs for query: 'some_non_existing_benchmark'
"""
from __future__ import print_function
import inspect
from rally.cli import cliutils
from rally.common.plugin import discover
from rally.common import utils
from rally.deployment import engine
from rally.deployment.serverprovider import provider
from rally import exceptions
from rally import plugins
from rally.task import scenario
from rally.task import sla
class InfoCommands(object):
"""This command allows you to get quick doc of some rally entities.
Available for scenario groups, scenarios, SLA, deploy engines and
server providers.
Usage:
$ rally info find <query>
To get information about main concepts of Rally as well as to list entities
you can query docs for, type one of the following:
$ rally info BenchmarkScenarios
$ rally info SLA
$ rally info DeploymentEngines
$ rally info ServerProviders
"""
@cliutils.args("--query", dest="query", type=str, help="Search query.")
@plugins.ensure_plugins_are_loaded
def find(self, query):
"""Search for an entity that matches the query and print info about it.
:param query: search query.
"""
info = self._find_info(query)
if info:
print(info)
else:
substitutions = self._find_substitution(query)
if len(substitutions) == 1:
print(self._find_info(substitutions[0]))
else:
print("Failed to find any docs for query: '%s'" % query)
if substitutions:
print("Did you mean one of these?\n\t%s" %
"\n\t".join(substitutions))
return 1
@plugins.ensure_plugins_are_loaded
def list(self):
"""List main entities in Rally for which rally info find works.
Lists benchmark scenario groups, deploy engines and server providers.
"""
self.BenchmarkScenarios()
self.SLA()
self.DeploymentEngines()
self.ServerProviders()
@plugins.ensure_plugins_are_loaded
def BenchmarkScenarios(self):
"""Get information about benchmark scenarios available in Rally."""
def scenarios_filter(scenario_cls):
return any(scenario.Scenario.is_scenario(scenario_cls, m)
for m in dir(scenario_cls))
scenarios = self._get_descriptions(scenario.Scenario, scenarios_filter)
info = (self._make_header("Rally - Benchmark scenarios") +
"\n\n"
"Benchmark scenarios are what Rally actually uses to test "
"the performance of an OpenStack deployment.\nEach Benchmark "
"scenario implements a sequence of atomic operations "
"(server calls) to simulate\ninteresing user/operator/"
"client activity in some typical use case, usually that of "
"a specific OpenStack\nproject. Iterative execution of this "
"sequence produces some kind of load on the target cloud.\n"
"Benchmark scenarios play the role of building blocks in "
"benchmark task configuration files."
"\n\n"
"Scenarios in Rally are put together in groups. Each "
"scenario group is concentrated on some specific \nOpenStack "
"functionality. For example, the 'NovaServers' scenario "
"group contains scenarios that employ\nseveral basic "
"operations available in Nova."
"\n\n" +
self._compose_table("List of Benchmark scenario groups",
scenarios) +
"To get information about benchmark scenarios inside "
"each scenario group, run:\n"
" $ rally info find <ScenarioGroupName>\n\n")
print(info)
@plugins.ensure_plugins_are_loaded
def SLA(self):
"""Get information about SLA available in Rally."""
sla_descrs = self._get_descriptions(sla.SLA)
# NOTE(msdubov): Add config option names to the "Name" column
for i in range(len(sla_descrs)):
description = sla_descrs[i]
sla_cls = sla.SLA.get(description[0])
sla_descrs[i] = (sla_cls.get_name(), description[1])
info = (self._make_header("Rally - SLA checks "
"(Service-Level Agreements)") +
"\n\n"
"SLA in Rally enable quick and easy checks of "
"whether the results of a particular\nbenchmark task have "
"passed certain success criteria."
"\n\n"
"SLA checks can be configured in the 'sla' section of "
"benchmark task configuration\nfiles, used to launch new "
"tasks by the 'rally task start <config_file>' command.\n"
"For each SLA check you would like to use, you should put "
"its name as a key and the\ntarget check parameter as an "
"associated value, e.g.:\n\n"
" sla:\n"
" max_seconds_per_iteration: 4\n"
" failure_rate:\n"
" max: 1"
"\n\n" +
self._compose_table("List of SLA checks", sla_descrs) +
"To get information about specific SLA checks, run:\n"
" $ rally info find <sla_check_name>\n")
print(info)
@plugins.ensure_plugins_are_loaded
def DeploymentEngines(self):
"""Get information about deploy engines available in Rally."""
engines = self._get_descriptions(engine.Engine)
info = (self._make_header("Rally - Deployment engines") +
"\n\n"
"Rally is an OpenStack benchmarking system. Before starting "
"benchmarking with Rally,\nyou obviously have either to "
"deploy a new OpenStack cloud or to register an existing\n"
"one in Rally. Deployment engines in Rally are essentially "
"plugins that control the\nprocess of deploying some "
"OpenStack distribution, say, with DevStack or FUEL, and\n"
"register these deployments in Rally before any benchmarking "
"procedures against them\ncan take place."
"\n\n"
"A typical use case in Rally would be when you first "
"register a deployment using the\n'rally deployment create' "
"command and then reference this deployment by uuid "
"when\nstarting a benchmark task with 'rally task start'. "
"The 'rally deployment create'\ncommand awaits a deployment "
"configuration file as its parameter. This file may look "
"like:\n"
"{\n"
" \"type\": \"ExistingCloud\",\n"
" \"auth_url\": \"http://example.net:5000/v2.0/\",\n"
" \"admin\": { <credentials> },\n"
" ...\n"
"}"
"\n\n" +
self._compose_table("List of Deployment engines", engines) +
"To get information about specific Deployment engines, run:\n"
" $ rally info find <DeploymentEngineName>\n")
print(info)
@plugins.ensure_plugins_are_loaded
def ServerProviders(self):
"""Get information about server providers available in Rally."""
providers = self._get_descriptions(provider.ProviderFactory)
info = (self._make_header("Rally - Server providers") +
"\n\n"
"Rally is an OpenStack benchmarking system. Before starting "
"benchmarking with Rally,\nyou obviously have either to "
"deploy a new OpenStack cloud or to register an existing\n"
"one in Rally with one of the Deployment engines. These "
"deployment engines, in turn,\nmay need Server "
"providers to manage virtual machines used for "
"OpenStack deployment\nand its following benchmarking. The "
"key feature of server providers is that they\nprovide a "
"unified interface for interacting with different "
"virtualization\ntechnologies (LXS, Virsh etc.)."
"\n\n"
"Server providers are usually referenced in deployment "
"configuration files\npassed to the 'rally deployment create'"
" command, e.g.:\n"
"{\n"
" \"type\": \"DevstackEngine\",\n"
" \"provider\": {\n"
" \"type\": \"ExistingServers\",\n"
" \"credentials\": [{\"user\": \"root\",\n"
" \"host\": \"10.2.0.8\"}]\n"
" }\n"
"}"
"\n\n" +
self._compose_table("List of Server providers", providers) +
"To get information about specific Server providers, run:\n"
" $ rally info find <ServerProviderName>\n")
print(info)
def _get_descriptions(self, base_cls, subclass_filter=None):
descriptions = []
subclasses = discover.itersubclasses(base_cls)
if subclass_filter:
subclasses = filter(subclass_filter, subclasses)
for entity in subclasses:
name = entity.get_name()
doc = utils.parse_docstring(entity.__doc__)
description = doc["short_description"] or ""
descriptions.append((name, description))
descriptions.sort(key=lambda d: d[0])
return descriptions
def _find_info(self, query):
return (self._get_scenario_group_info(query) or
self._get_scenario_info(query) or
self._get_sla_info(query) or
self._get_deploy_engine_info(query) or
self._get_server_provider_info(query))
def _find_substitution(self, query):
max_distance = min(3, len(query) / 4)
scenarios = scenario.Scenario.list_benchmark_scenarios()
scenario_groups = list(set(s.split(".")[0] for s in scenarios))
scenario_methods = list(set(s.split(".")[1] for s in scenarios))
sla_info = [cls.get_name() for cls in sla.SLA.get_all()]
deploy_engines = [cls.get_name() for cls in
engine.Engine.get_all()]
server_providers = [cls.get_name() for cls in
provider.ProviderFactory.get_all()]
candidates = (scenarios + scenario_groups + scenario_methods +
sla_info + deploy_engines + server_providers)
suggestions = []
# NOTE(msdubov): Incorrect query may either have typos or be truncated.
for candidate in candidates:
if ((utils.distance(query, candidate) <= max_distance or
candidate.startswith(query))):
suggestions.append(candidate)
return suggestions
def _get_scenario_group_info(self, query):
try:
scenario_group = scenario.Scenario.get_by_name(query)
if not any(scenario.Scenario.is_scenario(scenario_group, m)
for m in dir(scenario_group)):
return None
info = self._make_header("%s (benchmark scenario group)" %
scenario_group.get_name())
info += "\n\n"
info += utils.format_docstring(scenario_group.__doc__)
scenarios = scenario_group.list_benchmark_scenarios()
descriptions = []
for scenario_name in scenarios:
cls, method_name = scenario_name.split(".")
if hasattr(scenario_group, method_name):
scenario_inst = getattr(scenario_group, method_name)
doc = utils.parse_docstring(scenario_inst.__doc__)
descr = doc["short_description"] or ""
descriptions.append((scenario_name, descr))
info += self._compose_table("Benchmark scenarios", descriptions)
return info
except exceptions.NoSuchScenario:
return None
def _get_scenario_info(self, query):
try:
scenario_inst = scenario.Scenario.get_scenario_by_name(query)
scenario_gr_name = utils.get_method_class(scenario_inst).get_name()
header = ("%(scenario_group)s.%(scenario_name)s "
"(benchmark scenario)" %
{"scenario_group": scenario_gr_name,
"scenario_name": scenario_inst.__name__})
info = self._make_header(header)
info += "\n\n"
doc = utils.parse_docstring(scenario_inst.__doc__)
if not doc["short_description"]:
return None
info += doc["short_description"] + "\n\n"
if doc["long_description"]:
info += doc["long_description"] + "\n\n"
if doc["params"]:
args = inspect.getargspec(scenario_inst)
if args.defaults:
default_values = dict(zip(args.args[-len(args.defaults):],
args.defaults))
else:
default_values = {}
info += "Parameters:\n"
for param in doc["params"]:
info += " - %(name)s: %(doc)s" % param
name = param["name"]
if name in default_values:
if default_values[name] is not None:
info += " [Default: %s]" % default_values[name]
else:
info += " [optional]"
info += "\n"
if doc["returns"]:
info += "Returns: %s" % doc["returns"]
return info
except exceptions.NoSuchScenario:
return None
def _get_sla_info(self, query):
try:
found_sla = sla.SLA.get(query)
header = "%s (SLA)" % found_sla.get_name()
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(found_sla.__doc__) + "\n"
return info
except exceptions.PluginNotFound:
return None
def _get_deploy_engine_info(self, query):
try:
deploy_engine = engine.Engine.get(query)
header = "%s (deploy engine)" % deploy_engine.get_name()
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(deploy_engine.__doc__)
return info
except exceptions.PluginNotFound:
return None
def _get_server_provider_info(self, query):
try:
server_provider = provider.ProviderFactory.get(query)
header = "%s (server provider)" % server_provider.get_name()
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(server_provider.__doc__)
return info
except exceptions.PluginNotFound:
return None
def _make_header(self, string):
header = "-" * (len(string) + 2) + "\n"
header += " " + string + " \n"
header += "-" * (len(string) + 2)
return header
def _compose_table(self, title, descriptions):
table = " " + title + ":\n"
len0 = lambda x: len(x[0])
len1 = lambda x: len(x[1])
first_column_len = max(map(len0, descriptions)) + cliutils.MARGIN
second_column_len = max(map(len1, descriptions)) + cliutils.MARGIN
table += "-" * (first_column_len + second_column_len + 1) + "\n"
table += (" Name" + " " * (first_column_len - len("Name")) +
"Description\n")
table += "-" * (first_column_len + second_column_len + 1) + "\n"
for (name, descr) in descriptions:
table += " " + name
table += " " * (first_column_len - len(name))
table += descr + "\n"
table += "-" * (first_column_len + second_column_len + 1) + "\n"
table += "\n"
return table
|
{
"content_hash": "ec030b912f8d057f6b166680a6cdd16d",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 79,
"avg_line_length": 44.478371501272264,
"alnum_prop": 0.5504004576659038,
"repo_name": "go-bears/rally",
"id": "8e9fbe72be5b89817e0304fb3a79d44d8ffb3665",
"size": "18110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rally/cli/commands/info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2552449"
},
{
"name": "Shell",
"bytes": "43329"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
from os import path
import six
import abc
import json
import logging
#@six.add_metaclass(abc.ABCMeta)
class Parser(object):
__metaclass__ = abc.ABCMeta
def __init__(self, config):
self.logger = logging.getLogger('parser')
if config is None:
raise Exception("[Parser]: json config not found on filesystem")
#cfiles = list(filter(lambda x: path.exists(x), [config[i] for i in config.keys()]))
cf = []
for conf in config.keys():
if config.get(conf).startswith("~/"):
value = path.expanduser("~") + '/' + config.get(conf).split("~/")[1]
cf.append(value)
#[cf.append(v) if config.get(c).startswith("~/") v = path.expanduser("~") + '/' + \
# config.get(c).split("~/")[1] for c in config.keys()]
cfiles = list(filter(lambda x: path.exists(x), cf))
if(len(cfiles) == 0):
raise Exception("Json config not found on filesystem")
self.config = cfiles[0]
self.logger.info("[Parser] Processing config file: [%s]" % self.config)
try:
with open(self.config, 'r') as f:
self.raw_json = json.load(f)
except Exception as js_except:
self.logger.error("Parser.__init__(): [%s]" % js_except.__str__())
raise js_except
@abstractmethod
def parse(self):
raise NotImplemetedException("[ERR] Base Class - You have to derive and implement your own specialized version.")
|
{
"content_hash": "9d0ec0ce8fa98eb3a9bb5eb856122581",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 116,
"avg_line_length": 28.520833333333332,
"alnum_prop": 0.6530314097881665,
"repo_name": "fmount/tmux-layout",
"id": "8b1d6e428bab2ac54a78d3a03db61f0716e681bb",
"size": "1994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/menu/utils/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7301"
},
{
"name": "Shell",
"bytes": "466"
}
],
"symlink_target": ""
}
|
"""
A selection of names to be used in tested documents.
"""
import six # TODO: remove here and then run all doctests
import django
django.setup()
from lino.api.shell import *
from django.utils import translation
from django.utils.encoding import force_str
from django.test import Client
from django.db import connection, reset_queries as reset_sql_queries
import json
from bs4 import BeautifulSoup
from rstgen import table
import rstgen
from rstgen import attrtable
from atelier.utils import unindent, rmu, sixprint
from lino.utils import AttrDict
from lino.utils import i2d
from etgen.html import E, tostring, to_rst
from lino.utils.diag import analyzer
from lino.utils import diag
from lino.utils.sql import sql_summary
from lino.core import actors, kernel
from lino.core.menus import find_menu_item
from lino.sphinxcontrib.actordoc import menuselection_text
from pprint import pprint
from lino.utils.diag import visible_for
from lino.core.utils import full_model_name
from lino.core.utils import PseudoRequest
from lino.core.site import html2text
from lino.core.menus import Menu
from lino.core.actions import ShowTable
test_client = Client()
# naming it simply "client" caused conflict with a
# `lino_welfare.pcsw.models.Client`
import collections
HttpQuery = collections.namedtuple(
'HttpQuery',
['username', 'url_base', 'json_fields', 'expected_rows', 'kwargs'])
settings.SITE.is_testing = True
def get_json_dict(username, uri, an='detail', **kwargs):
url = '/api/{0}?fmt=json&an={1}'.format(uri, an)
for k, v in kwargs.items():
url += "&{}={}".format(k, v)
test_client.force_login(rt.login(username).user)
res = test_client.get(url, REMOTE_USER=username)
assert res.status_code == 200
return json.loads(res.content.decode())
def get_json_soup(username, uri, fieldname, **kwargs):
"""Being authentified as `username`, perform a web request to `uri` of
the test client.
"""
d = get_json_dict(username, uri, **kwargs)
html = d['data'][fieldname]
return BeautifulSoup(html, 'lxml')
def post_json_dict(username, url, data, **extra):
"""Send a POST with given username, url and data. The client is
expected to respond with a JSON encoded response. Parse the
response's content (which is expected to contain a dict), convert
this dict to an AttrDict before returning it.
"""
test_client.force_login(rt.login(username).user)
res = test_client.post(url, data, REMOTE_USER=username, **extra)
if res.status_code != 200:
raise Exception("{} gave status code {} instead of 200".format(
url, res.status_code))
return AttrDict(json.loads(res.content.decode()))
def check_json_result(response, expected_keys=None, msg=''):
"""Checks the result of response which is expected to return a
JSON-encoded dictionary with the expected_keys.
"""
# print("20150129 response is %r" % response.content)
if response.status_code != 200:
raise Exception(
"Response status ({0}) was {1} instead of 200".format(
msg, response.status_code))
try:
result = json.loads(response.content.decode())
except ValueError as e:
raise Exception("{0} in {1}".format(e, response.content))
if expected_keys is not None:
if set(result.keys()) != set(expected_keys.split()):
raise Exception("'{0}' != '{1}'".format(
' '.join(list(result.keys())), expected_keys))
return result
def demo_get(
username, url_base, json_fields=None,
expected_rows=None, **kwargs):
case = HttpQuery(username, url_base, json_fields,
expected_rows, kwargs)
# Django test client does not like future pseudo-unicode strings
# See #870
url = str(settings.SITE.buildurl(case.url_base, **case.kwargs))
# print(20160329, url)
if False:
msg = 'Using remote authentication, but no user credentials found.'
try:
response = test_client.get(url)
raise Exception("Expected '%s'" % msg)
except Exception:
pass
#~ self.tc.assertEqual(str(e),msg)
#~ if str(e) != msg:
#~ raise Exception("Expected %r but got %r" % (msg,str(e)))
if False:
# removed 20161202 because (1) it was relatively useless and
# (2) caused a PermissionDenied warning
response = test_client.get(url, REMOTE_USER=str('foo'))
if response.status_code != 403:
raise Exception(
"Status code %s other than 403 for anonymous on GET %s" % (
response.status_code, url))
ses= rt.login(username)
test_client.force_login(ses.user)
response = test_client.get(url, REMOTE_USER=username)
# try:
if True:
# user = settings.SITE.user_model.objects.get(
# username=case.username)
result = check_json_result(
response, case.json_fields,
"GET %s for user %s" % (url, ses.user))
num = case.expected_rows
if num is not None:
if not isinstance(num, tuple):
num = [num]
if result['count'] not in num:
msg = "%s got %s rows instead of %s" % (
url, result['count'], num)
raise Exception(msg)
# except Exception as e:
# print("%s:\n%s" % (url, e))
# raise
def show_menu_path(spec, language=None):
"""
Print the menu path of the given actor or action.
Deprecated. You should rather use
:meth:`lino.core.requests.BaseRequest.show_menu_path` which
automatically sets the language of the user and works for any user
type.
"""
user_type = rt.models.users.UserTypes.get_by_value('900')
mi = user_type.find_menu_item(spec)
if mi is None:
raise Exception("Invalid spec {0}".format(spec))
if language:
with translation.override(language):
print(menuselection_text(mi))
else:
print(menuselection_text(mi))
# items = [mi]
# p = mi.parent
# while p:
# items.insert(0, p)
# p = p.parent
# return " --> ".join([i.label for i in items])
def noblanklines(s):
"""Remove blank lines from output. This is used to increase
readability when some expected output would otherweise contain
disturbing `<BLANKLINE>` which are not relevant to the test
itself.
"""
return '\n'.join([ln for ln in s.splitlines() if ln.strip()])
def show_choices(username, url, show_count=False):
"""Print the choices returned via web client."""
test_client.force_login(rt.login(username).user)
response = test_client.get(url, REMOTE_USER=username)
if response.status_code != 200:
raise Exception(
"Response status ({0}) was {1} instead of 200".format(
url, response.status_code))
result = json.loads(response.content.decode())
for r in result['rows']:
print(r['text'])
# print(r['value'], r['text'])
if show_count:
print("{} rows".format(result['count']))
from django.db.models import Model
from lino.core.actions import Action
from lino.core.tables import AbstractTable
from lino.core.boundaction import BoundAction
def show_workflow(actions, all=False, language=None):
"""
Show the given actions as a table. Usage example in
:ref:`avanti.specs.cal`.
"""
def doit():
cells = []
cols = ["Action name", "Verbose name", "Help text",
"Target state", "Required states"] # , "Required roles"]
for a in actions:
ht = a.help_text or ''
if ht or all:
# required_roles = ' '.join(
# [str(r) for r in a.required_roles])
cells.append(
[a.action_name, a.label, unindent(ht),
a.target_state, a.required_states or '',
# required_roles
])
print(table(cols, cells).strip())
if language:
with translation.override(language):
return doit()
return doit()
def show_fields(model, fieldnames=None, columns=False, all=None):
"""
Print an overview description of the specified fields of the
specified model.
If model is an action or table, print the parameter fields of that
action or table.
If model is a table and you want the columns instead of the
parameter fields, then specify `columns=True`.
By default this shows only fields which have a help text. If you
specify `all=True`, then also fields that have no help text will
be shown.
"""
cells = []
cols = ["Internal name", "Verbose name", "Help text"]
if all is None:
all = fieldnames is not None
if isinstance(model, BoundAction):
get_field = model.action.parameters.get
if fieldnames is None:
fieldnames = model.action.params_layout
elif isinstance(model, Action):
get_field = model.parameters.get
if fieldnames is None:
fieldnames = model.params_layout.main
elif issubclass(model, Model):
get_field = model._meta.get_field
# get_field = model.get_data_elem
if fieldnames is None:
fieldnames = [f.name for f in model._meta.get_fields()]
elif issubclass(model, AbstractTable):
if columns:
get_field = model.get_data_elem
if fieldnames is None:
fieldnames = model.column_names
# get_handle().list_layout.main.columns
else:
get_field = model.parameters.get
if fieldnames is None:
fieldnames = model.params_layout.main
if isinstance(fieldnames, str):
fieldnames = fieldnames.split()
for n in fieldnames:
fld = get_field(n)
if fld is not None and hasattr(fld, 'verbose_name'):
ht = fld.help_text or ''
if ht or all:
cells.append([n,
fld.verbose_name,
unindent(ht)])
print(table(cols, cells).strip())
def show_fields_by_type(fldtype):
"""Print a list of all fields (in all models) that have the specified type.
"""
from lino.core.utils import (sorted_models_list)
items = []
for model in sorted_models_list():
flds = []
for f in model._meta.fields:
if isinstance(f, fldtype):
name = f.name
verbose_name = force_str(f.verbose_name).strip()
txt = "{verbose_name} ({name})".format(**locals())
flds.append(txt)
if len(flds):
txt = "{model} : {fields}".format(
model=full_model_name(model), fields=", ".join(flds))
items.append(txt)
print(rstgen.ul(items))
def show_columns(*args, **kwargs):
"""Like :func:`show_fields` but with `columns` defaulting to True.
"""
kwargs.update(columns=True)
return show_fields(*args, **kwargs)
def py2rst(x, doctestfmt=True):
return diag.py2rst(x, doctestfmt)
def show_dialog_actions():
return analyzer.show_dialog_actions(True)
def walk_menu_items(username=None, severe=True):
"""Walk through all menu items which run a :class:`ShowTable
<lino.core.actions.ShowTable>` action, showing how many data rows
the grid contains.
"""
def doit(ar):
if ar is None:
user_type = None
else:
user_type = ar.user.user_type
test_client.force_login(ar.user)
mnu = settings.SITE.get_site_menu(user_type)
items = []
for mi in mnu.walk_items():
if mi.bound_action:
if isinstance(mi.bound_action.action, ShowTable):
mt = mi.bound_action.actor
url = 'api/{}/{}'.format(mt.app_label, mt.__name__)
url = str(settings.SITE.buildurl(url, fmt='json'))
item = menuselection_text(mi) + " : "
try:
response = test_client.get(url, REMOTE_USER=str(username))
result = check_json_result(
response, None,
"GET %s for user %s" % (url, username))
item += str(result['count'])
except Exception as e:
if severe:
raise
else:
item += str(e)
items.append(item)
s = rstgen.ul(items)
print(s)
if settings.SITE.user_types_module:
ar = settings.SITE.login(username)
with translation.override(ar.user.language):
doit(ar)
else:
doit(None)
def show_sql_queries():
"""
Print the SQL queries which have been made since last call.
Usage example: :ref:`specs.noi.sql`.
"""
for qry in connection.queries:
sql = qry['sql'].strip()
print(sql.replace('"', ''))
# reset_sql_queries()
def show_sql_summary(**kwargs):
"""Print a summary of the SQL queries which have been made since last
call.
Usage example: :ref:`specs.tera.sql`.
"""
def func():
for qry in connection.queries:
try:
yield "({time}) {sql};".format(**qry)
except KeyError as e:
yield "{} : {}".format(qry, e)
sql_summary(func(), **kwargs)
# reset_sql_queries()
def add_call_logger(owner, name):
"""Replace the callable named name on owner by a wrapper which
additionally prints a message on each call.
"""
func = getattr(owner, name)
msg = "{}() on {} was called".format(name, owner)
def w(*args, **kwargs):
print(msg)
return func(*args, **kwargs)
setattr(owner, name, w)
def str2languages(txt):
"""
Return a list of all translations for the given translatable text.
"""
lst = []
for lng in settings.SITE.languages:
with translation.override(lng.django_code):
lst.append(str(txt))
return lst
def show_choicelist(cls):
"""
Similar to :func:`rt.show`, but the `text` is shown in all
languages instead of just the current language.
"""
headers = ["value", "name"] + [lng.name for lng in settings.SITE.languages]
rows = []
for i in cls.get_list_items():
row = [i.value, i.name] + str2languages(i.text)
rows.append(row)
print(table(headers, rows))
def show_choicelists():
"""
Show all the choicelists defined in this application.
"""
headers = ["name", "#items", "preferred_width"] + [lng.name for lng in settings.SITE.languages]
rows = []
for i in sorted(kernel.CHOICELISTS.values(), key=lambda s: str(s)):
row = [str(i), len(i.choices), i.preferred_width] + str2languages(i.verbose_name_plural)
rows.append(row)
print(table(headers, rows))
def show_permissions(*args):
print(visible_for(*args))
def show_quick_search_fields(*args):
for m in args:
print(str(m._meta.verbose_name_plural))
for fld in m.quick_search_fields:
print("- {} ({})".format(fld.verbose_name, fld.name))
def pprint_json_string(s):
"""
Used to doctest json values and have them be python 2/3 passable.
:param s: json string
"""
print(json.dumps(json.loads(s),
indent=2,
sort_keys=True,
separators=(",", ": ")))
def show_dashboard(username, **options):
"""Show the dashboard of the given user.
Useful options:
- ignore_links=True
For more options, see
https://pypi.org/project/html2text/ and
https://github.com/Alir3z4/html2text/blob/master/docs/usage.md
Note that this is currently not much used because the result is difficult to
maintain. One reason for this is that :func:`naturaltime` (from
:mod:`django.contrib.humanize.templatetags.humanize`) ignores demo_date and
therefore produces results that depend on the current date/time.
"""
request = PseudoRequest(username)
ui = settings.SITE.kernel.text_renderer.front_end
html = settings.SITE.get_main_html(request, extjs=ui)
print(html2text(html, **options))
def menu2rst(mnu, level=1):
"""Recursive utility used by :func:`show_menu`.
"""
if not isinstance(mnu, Menu):
return str(mnu.label)
has_submenus = False
for i in mnu.items:
if isinstance(i, Menu):
has_submenus = True
items = [menu2rst(mi, level + 1) for mi in mnu.items]
if has_submenus:
s = rstgen.ul(items).strip() + '\n'
if mnu.label is not None:
s = str(mnu.label) + ' :\n\n' + s
else:
s = ', '.join(items)
if mnu.label is not None:
s = str(mnu.label) + ' : ' + s
return s
def show_menu(username, language=None, stripped=True, level=1):
"""
Render the main menu for the given user as a reStructuredText formatted
bullet list.
:language: explicitly select another language than that
specified in the requesting user's :attr:`language
<lino.modlib.users.models.User.language>` field.
:stripped: remove lots of blanklines which are necessary for
reStructuredText but disturbing in a doctest
snippet.
"""
ar = rt.login(username)
user = ar.get_user()
if language is None:
language = user.language
with translation.override(language):
mnu = settings.SITE.get_site_menu(user.user_type)
s = menu2rst(mnu, level)
if stripped:
for ln in s.splitlines():
if ln.strip():
print(ln)
else:
print(s)
|
{
"content_hash": "5cd7467b019e60b888fa7574346db844",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 99,
"avg_line_length": 32.18231046931408,
"alnum_prop": 0.6010432441527848,
"repo_name": "lino-framework/lino",
"id": "f6c8ff260c34a903c547be69d2bec6daaf64ddf5",
"size": "17970",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino/api/doctest.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "704"
},
{
"name": "CSS",
"bytes": "1281825"
},
{
"name": "Emacs Lisp",
"bytes": "277895"
},
{
"name": "HTML",
"bytes": "928037"
},
{
"name": "Hack",
"bytes": "3416"
},
{
"name": "JavaScript",
"bytes": "1128493"
},
{
"name": "PHP",
"bytes": "53997"
},
{
"name": "Python",
"bytes": "2601694"
},
{
"name": "Shell",
"bytes": "4469"
},
{
"name": "TSQL",
"bytes": "2427"
}
],
"symlink_target": ""
}
|
import re
import sqlite3
from mock import Mock, call, patch
from pytest import fixture, mark, raises
import nosqlite
@fixture(scope="module")
def db(request):
_db = sqlite3.connect(':memory:')
request.addfinalizer(_db.close)
return _db
@fixture(scope="module")
def collection(db, request):
return nosqlite.Collection(db, 'foo', create=False)
class TestConnection(object):
def test_connect(self):
conn = nosqlite.Connection(':memory:')
assert conn.db.isolation_level is None
@patch('nosqlite.sqlite3')
def test_context_manager_closes_connection(self, sqlite):
with nosqlite.Connection() as conn:
pass
assert conn.db.close.called
@patch('nosqlite.sqlite3')
@patch('nosqlite.Collection')
def test_getitem_returns_collection(self, mock_collection, sqlite):
sqlite.connect.return_value = sqlite
mock_collection.return_value = mock_collection
conn = nosqlite.Connection()
assert 'foo' not in conn._collections
assert conn['foo'] == mock_collection
@patch('nosqlite.sqlite3')
def test_getitem_returns_cached_collection(self, sqlite):
conn = nosqlite.Connection()
conn._collections['foo'] = 'bar'
assert conn['foo'] == 'bar'
@patch('nosqlite.sqlite3')
def test_drop_collection(self, sqlite):
conn = nosqlite.Connection()
conn.drop_collection('foo')
conn.db.execute.assert_called_with('drop table if exists foo')
@patch('nosqlite.sqlite3')
def test_getattr_returns_attribute(self, sqlite):
conn = nosqlite.Connection()
assert conn.__getattr__('db') in conn.__dict__.values()
@patch('nosqlite.sqlite3')
def test_getattr_returns_collection(self, sqlite):
conn = nosqlite.Connection()
foo = conn.__getattr__('foo')
assert foo not in conn.__dict__.values()
assert isinstance(foo, nosqlite.Collection)
class TestCollection(object):
def setup(self):
self.db = sqlite3.connect(':memory:')
self.collection = nosqlite.Collection(self.db, 'foo', create=False)
def teardown(self):
self.db.close()
def unformat_sql(self, sql):
return re.sub(r'[\s]+', ' ', sql.strip().replace('\n', ''))
def test_create(self):
collection = nosqlite.Collection(Mock(), 'foo', create=False)
collection.create()
collection.db.execute.assert_any_call("""
create table if not exists foo (
id integer primary key autoincrement,
data text not null
)
""")
def test_clear(self):
collection = nosqlite.Collection(Mock(), 'foo')
collection.clear()
collection.db.execute.assert_any_call('delete from foo')
def test_exists_when_absent(self):
assert not self.collection.exists()
def test_exists_when_present(self):
self.collection.create()
assert self.collection.exists()
def test_insert_actually_updates(self):
doc = {'_id': 1, 'foo': 'bar'}
self.collection.update = Mock()
self.collection.insert(doc)
self.collection.update.assert_called_with(doc)
def test_insert(self):
doc = {'foo': 'bar'}
self.collection.create()
inserted = self.collection.insert(doc)
assert inserted['_id'] == 1
def test_save_calls_update(self):
with patch.object(self.collection, 'update'):
doc = {'foo': 'bar'}
self.collection.save(doc)
self.collection.update.assert_called_with(doc)
def test_update_actually_inserts(self):
doc = {'foo': 'bar'}
self.collection.insert = Mock()
self.collection.update(doc)
self.collection.insert.assert_called_with(doc)
def test_update(self):
doc = {'foo': 'bar'}
self.collection.create()
doc = self.collection.insert(doc)
doc['foo'] = 'baz'
updated = self.collection.update(doc)
assert updated['foo'] == 'baz'
def test_delete_calls_remove(self):
with patch.object(self.collection, 'remove'):
doc = {'foo': 'bar'}
self.collection.delete(doc)
self.collection.remove.assert_called_with(doc)
def test_remove_raises_when_no_id(self):
with raises(AssertionError):
self.collection.remove({'foo': 'bar'})
def test_remove(self):
self.collection.create()
doc = self.collection.insert({'foo': 'bar'})
assert 1 == int(self.collection.db.execute("select count(1) from foo").fetchone()[0])
self.collection.remove(doc)
assert 0 == int(self.collection.db.execute("select count(1) from foo").fetchone()[0])
@mark.parametrize('strdoc,doc', [
('{"foo": "bar"}', {'_id': 1, 'foo': 'bar'}),
(u'{"foo": "☃"}', {'_id': 1, 'foo': u'☃'}),
])
def test_load(self, strdoc, doc):
assert doc == self.collection._load(1, strdoc)
def test_find(self):
query = {'foo': 'bar'}
documents = [
(1, {'foo': 'bar', 'baz': 'qux'}), # Will match
(2, {'foo': 'bar', 'bar': 'baz'}), # Will match
(2, {'foo': 'baz', 'bar': 'baz'}), # Will not match
(3, {'baz': 'qux'}), # Will not match
]
collection = nosqlite.Collection(Mock(), 'foo', create=False)
collection.db.execute.return_value = collection.db
collection.db.fetchall.return_value = documents
collection._load = lambda id, data: data
ret = collection.find(query)
assert len(ret) == 2
def test_find_honors_limit(self):
query = {'foo': 'bar'}
documents = [
(1, {'foo': 'bar', 'baz': 'qux'}), # Will match
(2, {'foo': 'bar', 'bar': 'baz'}), # Will match
(2, {'foo': 'baz', 'bar': 'baz'}), # Will not match
(3, {'baz': 'qux'}), # Will not match
]
collection = nosqlite.Collection(Mock(), 'foo', create=False)
collection.db.execute.return_value = collection.db
collection.db.fetchall.return_value = documents
collection._load = lambda id, data: data
ret = collection.find(query, limit=1)
assert len(ret) == 1
def test_apply_query_and_type(self):
query = {'$and': [{'foo': 'bar'}, {'baz': 'qux'}]}
assert self.collection._apply_query(query, {'foo': 'bar', 'baz': 'qux'})
assert not self.collection._apply_query(query, {'foo': 'bar', 'baz': 'foo'})
def test_apply_query_or_type(self):
query = {'$or': [{'foo': 'bar'}, {'baz': 'qux'}]}
assert self.collection._apply_query(query, {'foo': 'bar', 'abc': 'xyz'})
assert self.collection._apply_query(query, {'baz': 'qux', 'abc': 'xyz'})
assert not self.collection._apply_query(query, {'abc': 'xyz'})
def test_apply_query_not_type(self):
query = {'$not': {'foo': 'bar'}}
assert self.collection._apply_query(query, {'foo': 'baz'})
assert not self.collection._apply_query(query, {'foo': 'bar'})
def test_apply_query_nor_type(self):
query = {'$nor': [{'foo': 'bar'}, {'baz': 'qux'}]}
assert self.collection._apply_query(query, {'foo': 'baz', 'baz': 'bar'})
assert not self.collection._apply_query(query, {'foo': 'bar'})
assert not self.collection._apply_query(query, {'baz': 'qux'})
assert not self.collection._apply_query(query, {'foo': 'bar', 'baz': 'qux'})
def test_apply_query_gt_operator(self):
query = {'foo': {'$gt': 5}}
assert self.collection._apply_query(query, {'foo': 10})
assert not self.collection._apply_query(query, {'foo': 4})
def test_apply_query_gte_operator(self):
query = {'foo': {'$gte': 5}}
assert self.collection._apply_query(query, {'foo': 5})
assert not self.collection._apply_query(query, {'foo': 4})
def test_apply_query_lt_operator(self):
query = {'foo': {'$lt': 5}}
assert self.collection._apply_query(query, {'foo': 4})
assert not self.collection._apply_query(query, {'foo': 10})
def test_apply_query_lte_operator(self):
query = {'foo': {'$lte': 5}}
assert self.collection._apply_query(query, {'foo': 5})
assert not self.collection._apply_query(query, {'foo': 10})
def test_apply_query_eq_operator(self):
query = {'foo': {'$eq': 5}}
assert self.collection._apply_query(query, {'foo': 5})
assert not self.collection._apply_query(query, {'foo': 4})
assert not self.collection._apply_query(query, {'foo': 'bar'})
def test_apply_query_in_operator(self):
query = {'foo': {'$in': [1, 2, 3]}}
assert self.collection._apply_query(query, {'foo': 1})
assert not self.collection._apply_query(query, {'foo': 4})
assert not self.collection._apply_query(query, {'foo': 'bar'})
def test_apply_query_in_operator_raises(self):
query = {'foo': {'$in': 5}}
with raises(nosqlite.MalformedQueryException):
self.collection._apply_query(query, {'foo': 1})
def test_apply_query_nin_operator(self):
query = {'foo': {'$nin': [1, 2, 3]}}
assert self.collection._apply_query(query, {'foo': 4})
assert self.collection._apply_query(query, {'foo': 'bar'})
assert not self.collection._apply_query(query, {'foo': 1})
def test_apply_query_nin_operator_raises(self):
query = {'foo': {'$nin': 5}}
with raises(nosqlite.MalformedQueryException):
self.collection._apply_query(query, {'foo': 1})
def test_apply_query_ne_operator(self):
query = {'foo': {'$ne': 5}}
assert self.collection._apply_query(query, {'foo': 1})
assert self.collection._apply_query(query, {'foo': 'bar'})
assert not self.collection._apply_query(query, {'foo': 5})
def test_apply_query_all_operator(self):
query = {'foo': {'$all': [1, 2, 3]}}
assert self.collection._apply_query(query, {'foo': range(10)})
assert not self.collection._apply_query(query, {'foo': ['bar', 'baz']})
assert not self.collection._apply_query(query, {'foo': 3})
def test_apply_query_all_operator_raises(self):
query = {'foo': {'$all': 3}}
with raises(nosqlite.MalformedQueryException):
self.collection._apply_query(query, {'foo': 'bar'})
def test_apply_query_mod_operator(self):
query = {'foo': {'$mod': [2, 0]}}
assert self.collection._apply_query(query, {'foo': 4})
assert not self.collection._apply_query(query, {'foo': 3})
assert not self.collection._apply_query(query, {'foo': 'bar'})
def test_apply_query_mod_operator_raises(self):
query = {'foo': {'$mod': 2}}
with raises(nosqlite.MalformedQueryException):
self.collection._apply_query(query, {'foo': 5})
def test_apply_query_honors_multiple_operators(self):
query = {'foo': {'$gte': 0, '$lte': 10, '$mod': [2, 0]}}
assert self.collection._apply_query(query, {'foo': 4})
assert not self.collection._apply_query(query, {'foo': 3})
assert not self.collection._apply_query(query, {'foo': 15})
assert not self.collection._apply_query(query, {'foo': 'foo'})
def test_apply_query_honors_logical_and_operators(self):
# 'bar' must be 'baz', and 'foo' must be an even number 0-10 or an odd number > 10
query = {
'bar': 'baz',
'$or': [
{'foo': {'$gte': 0, '$lte': 10, '$mod': [2, 0]}},
{'foo': {'$gt': 10, '$mod': [2, 1]}},
]
}
assert self.collection._apply_query(query, {'bar': 'baz', 'foo': 4})
assert self.collection._apply_query(query, {'bar': 'baz', 'foo': 15})
assert not self.collection._apply_query(query, {'bar': 'baz', 'foo': 14})
assert not self.collection._apply_query(query, {'bar': 'qux', 'foo': 4})
def test_apply_query_exists(self):
query_exists = {'foo': {'$exists': True}}
query_not_exists = {'foo': {'$exists': False}}
assert self.collection._apply_query(query_exists, {'foo': 'bar'})
assert self.collection._apply_query(query_not_exists, {'bar': 'baz'})
assert not self.collection._apply_query(query_exists, {'baz': 'bar'})
assert not self.collection._apply_query(query_not_exists, {'foo': 'bar'})
def test_apply_query_exists_raises(self):
query = {'foo': {'$exists': 'foo'}}
with raises(nosqlite.MalformedQueryException):
self.collection._apply_query(query, {'foo': 'bar'})
def test_get_operator_fn_improper_op(self):
with raises(nosqlite.MalformedQueryException):
self.collection._get_operator_fn('foo')
def test_get_operator_fn_valid_op(self):
assert self.collection._get_operator_fn('$in') == nosqlite._in
def test_get_operator_fn_no_op(self):
with raises(nosqlite.MalformedQueryException):
self.collection._get_operator_fn('$foo')
def test_find_and_modify(self):
update = {'foo': 'bar'}
docs = [
{'foo': 'foo'},
{'baz': 'qux'},
]
with patch.object(self.collection, 'find'):
with patch.object(self.collection, 'update'):
self.collection.find.return_value = docs
self.collection.find_and_modify(update=update)
self.collection.update.assert_has_calls([
call({'foo': 'bar'}),
call({'foo': 'bar', 'baz': 'qux'}),
])
def test_count(self):
with patch.object(self.collection, 'find'):
self.collection.find.return_value = range(10)
assert self.collection.count() == 10
def test_distinct(self):
docs = [
{'foo': 'bar'},
{'foo': 'baz'},
{'foo': 10},
{'bar': 'foo'}
]
self.collection.find = lambda: docs
assert set(('bar', 'baz', 10)) == self.collection.distinct('foo')
def test_rename_raises_for_collision(self):
nosqlite.Collection(self.db, 'bar') # Create a collision point
self.collection.create()
with raises(AssertionError):
self.collection.rename('bar')
def test_rename(self):
self.collection.create()
assert self.collection.exists()
self.collection.rename('bar')
assert self.collection.name == 'bar'
assert self.collection.exists()
assert not nosqlite.Collection(self.db, 'foo', create=False).exists()
class TestFindOne(object):
def test_returns_None_if_collection_does_not_exist(self, collection):
assert collection.find_one({}) is None
def test_returns_None_if_document_is_not_found(self, collection):
collection.create()
assert collection.find_one({}) is None
|
{
"content_hash": "ad8ca8401c5f4eb926d23e9426e0f005",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 93,
"avg_line_length": 35.15023474178404,
"alnum_prop": 0.5788700414051022,
"repo_name": "shaunduncan/nosqlite",
"id": "339cb4e136d302c918f1a9089ae1c3ac9d54a215",
"size": "14994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34274"
}
],
"symlink_target": ""
}
|
import os
import scrapy
from scrapy.selector import Selector
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
class CSDNImagesPipeline(ImagesPipeline):
def file_path(self, request, response = None, info = None):
# handle such image(with watermark) url:
# http://img.blog.csdn.net/20140917165912117?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvaWFpdGk=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast
return os.sep.join((lambda x: x if '?' not in x else x.split('?')[0]+'.png')(request.url)[7:].split('/'))
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url)
def item_completed(self, results, item, info):
if not [x['url'] for ok, x in results if ok]:
raise DropItem("Item contains no images")
return item
|
{
"content_hash": "85ddbe3c1fab764ba3f8f9e92031283a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 184,
"avg_line_length": 42.04545454545455,
"alnum_prop": 0.6897297297297297,
"repo_name": "sighingnow/Spider-Utils",
"id": "01fef1d74682eee2c27bb5176555318ed4d9102a",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csdnspider/csdnspider/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "149069"
},
{
"name": "Python",
"bytes": "31757"
}
],
"symlink_target": ""
}
|
"""Template tags for working with Recurly.js"""
from django import template
from django.template import Library, Node, Variable, loader
from django_recurly.utils import recurly
from django_recurly.models import Account
from django_recurly.helpers.recurlyjs import get_config, get_subscription_form, get_billing_info_update_form
register = template.Library()
@register.inclusion_tag('django_recurly/base_script.html', takes_context=True)
def recurly_script_block(context, plan_code):
return {
'user': context['user'],
'plan_code': plan_code
}
@register.simple_tag
def recurly_config():
return get_config()
@register.simple_tag(takes_context=True)
def subscription_form(context, plan_code, target_element="#recurly-container", protected_params={}, unprotected_params={}):
from django_recurly.utils import dict_merge
user = context['user']
account = None
if user.is_authenticated():
try:
# Grab the recurly account details (could be different from app user details)
account = user.recurly_accounts.get().get_account()
except Account.DoesNotExist:
# Pre-populate the form fields with user data
account = recurly.Account(**user._wrapped.__dict__)
# TODO: (IW) Simplify
if 'account' in unprotected_params:
unprotected_params["account"] = dict_merge(account.to_dict(js=True), unprotected_params["account"])
else:
unprotected_params["account"] = account.to_dict(js=True)
return get_subscription_form(plan_code=plan_code, user=user, protected_params=protected_params, unprotected_params=unprotected_params)
@register.simple_tag(takes_context=True)
def billing_info_update_form(context, target_element="#recurly-container", protected_params={}, unprotected_params={}):
from django_recurly.utils import dict_merge
user = context['user']
account = None
if user.is_authenticated():
try:
# Grab the recurly account details (could be different from app user details)
account = user.recurly_accounts.get().get_account()
except Account.DoesNotExist:
# Pre-populate the form fields with user data
account = recurly.Account(**user._wrapped.__dict__)
#TODO: (IW) Simplify
if 'account' in unprotected_params:
unprotected_params["account"] = dict_merge(account.to_dict(js=True), unprotected_params["account"])
else:
unprotected_params["account"] = account.to_dict(js=True)
return get_billing_info_update_form(user=user, account=account)
@register.simple_tag(takes_context=True)
def has_active_account(context):
user = context['user']
if not user:
return False
try:
user.recurly_accounts.get().is_active()
except Account.DoesNotExist:
return False
|
{
"content_hash": "bc94e9d3d344a6236b6a65a1cd00aebc",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 138,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.6798611111111111,
"repo_name": "pakal/django-recurly",
"id": "d978a1699546f4431526e6f8b0f223ef0e49d9b2",
"size": "2880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_recurly/templatetags/recurly_js.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1721"
},
{
"name": "JavaScript",
"bytes": "293"
},
{
"name": "Python",
"bytes": "131805"
}
],
"symlink_target": ""
}
|
import warnings
from six import string_types
from django.db import models
class Indexed(object):
@classmethod
def indexed_get_parent(cls, require_model=True):
for base in cls.__bases__:
if issubclass(base, Indexed) and (issubclass(base, models.Model) or require_model is False):
return base
@classmethod
def indexed_get_content_type(cls):
# Work out content type
content_type = (cls._meta.app_label + '_' + cls.__name__).lower()
# Get parent content type
parent = cls.indexed_get_parent()
if parent:
parent_content_type = parent.indexed_get_content_type()
return parent_content_type + '_' + content_type
else:
return content_type
@classmethod
def indexed_get_toplevel_content_type(cls):
# Get parent content type
parent = cls.indexed_get_parent()
if parent:
return parent.indexed_get_content_type()
else:
# At toplevel, return this content type
return (cls._meta.app_label + '_' + cls.__name__).lower()
@classmethod
def get_search_fields(cls):
search_fields = {}
for field in cls.search_fields:
search_fields[(type(field), field.field_name)] = field
return list(search_fields.values())
@classmethod
def get_searchable_search_fields(cls):
return [
field for field in cls.get_search_fields()
if isinstance(field, SearchField)
]
@classmethod
def get_filterable_search_fields(cls):
return [
field for field in cls.get_search_fields()
if isinstance(field, FilterField)
]
@classmethod
def get_indexed_objects(cls):
return cls.objects.all()
def get_indexed_instance(self):
"""
If the indexed model uses multi table inheritance, override this method
to return the instance in its most specific class so it reindexes properly.
"""
return self
search_fields = ()
def get_indexed_models():
return [
model for model in models.get_models()
if issubclass(model, Indexed) and not model._meta.abstract
]
def class_is_indexed(cls):
return issubclass(cls, Indexed) and issubclass(cls, models.Model) and not cls._meta.abstract
class BaseField(object):
suffix = ''
def __init__(self, field_name, **kwargs):
self.field_name = field_name
self.kwargs = kwargs
def get_field(self, cls):
return cls._meta.get_field_by_name(self.field_name)[0]
def get_attname(self, cls):
try:
field = self.get_field(cls)
return field.attname
except models.fields.FieldDoesNotExist:
return self.field_name
def get_index_name(self, cls):
return self.get_attname(cls) + self.suffix
def get_type(self, cls):
if 'type' in self.kwargs:
return self.kwargs['type']
try:
field = self.get_field(cls)
return field.get_internal_type()
except models.fields.FieldDoesNotExist:
return 'CharField'
def get_value(self, obj):
try:
field = self.get_field(obj.__class__)
return field._get_val_from_obj(obj)
except models.fields.FieldDoesNotExist:
value = getattr(obj, self.field_name, None)
if hasattr(value, '__call__'):
value = value()
return value
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.field_name)
class SearchField(BaseField):
def __init__(self, field_name, boost=None, partial_match=False, **kwargs):
super(SearchField, self).__init__(field_name, **kwargs)
self.boost = boost
self.partial_match = partial_match
class FilterField(BaseField):
suffix = '_filter'
|
{
"content_hash": "f41dd9837fdd4542bbb4f8a8cf40c6ff",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 104,
"avg_line_length": 28.302158273381295,
"alnum_prop": 0.5945602440264361,
"repo_name": "chimeno/wagtail",
"id": "bbe9dbf6afd16b6f560016d8d3b3e0869bb49a44",
"size": "3934",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/wagtailsearch/index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "138709"
},
{
"name": "HTML",
"bytes": "226717"
},
{
"name": "JavaScript",
"bytes": "55764"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1032057"
},
{
"name": "Ruby",
"bytes": "1275"
},
{
"name": "Shell",
"bytes": "11292"
}
],
"symlink_target": ""
}
|
"""
WSGI config for sinek_tasarim project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sinek_tasarim.settings")
application = get_wsgi_application()
|
{
"content_hash": "cfe5c53d22aece1dc194931e087c20f8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.1875,
"alnum_prop": 0.771712158808933,
"repo_name": "hanakamer/sinek-tasarim",
"id": "d99ab9132c21f520cfbe7a55f3427468b46b4c03",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sinek_tasarim/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2010"
},
{
"name": "HTML",
"bytes": "2865"
},
{
"name": "Python",
"bytes": "13682"
}
],
"symlink_target": ""
}
|
import ctypes
class DynamicArray():
def __init__(self):
self.n = 0
self.capacity = 1
self.A = self.make_array(self.capacity)
def __len__(self):
return self.n
def __getitem__(self, k):
if not 0 <= k < self.n:
return IndexError('K is out of bounds!')
return self.A[k]
def append(self, item):
#if the size of array is equal to the cap. call the resize function to double size
if self.n == self.capacity:
#resize the array of cap is full
self._resize(2*self.capacity)
#at position n, add the new item (n because we start at 0)
#if n = 9, 0-8 is full and the 9th spot is the next to fill
self.A[self.n] = item
#increase the size by one to keep track
self.n += 1
def _resize(self, new_capacity):
#make a new array, calling this function
#kind of like C and C++, initialize new array
B = self.make_array(new_capacity)
#move all the values from A to B temporarily
for k in range(self.n):
B[k] = self.A[k]
#set A to B to get the values back into A
self.A = B
#set the capacity of the array to the new capacity
self.capacity = new_capacity
def make_array(self, new_cap):
return (new_cap * ctypes.py_object)()
|
{
"content_hash": "df89eef2d82ebbc44da5e7108060818f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 90,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.5714285714285714,
"repo_name": "kozlowsm/Python",
"id": "1e8111dac1a23a5867fd78040d32e2a7c48a0a9a",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DynamicArray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7470"
}
],
"symlink_target": ""
}
|
try:
import unittest2 as unittest
except ImportError:
import unittest
from cassandra.query import BatchStatement
from cassandra.cluster import Cluster
from tests.integration import use_singledc, PROTOCOL_VERSION
def setup_module():
use_singledc()
class ClientWarningTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION < 4:
return
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
cls.session.execute("CREATE TABLE IF NOT EXISTS test1rf.client_warning (k int, v0 int, v1 int, PRIMARY KEY (k, v0))")
cls.prepared = cls.session.prepare("INSERT INTO test1rf.client_warning (k, v0, v1) VALUES (?, ?, ?)")
cls.warn_batch = BatchStatement()
# 213 = 5 * 1024 / (4+4 + 4+4 + 4+4)
# thresh_kb/ (min param size)
for x in range(213):
cls.warn_batch.add(cls.prepared, (x, x, 1))
@classmethod
def tearDownClass(cls):
if PROTOCOL_VERSION < 4:
return
cls.cluster.shutdown()
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest(
"Native protocol 4,0+ is required for client warnings, currently using %r"
% (PROTOCOL_VERSION,))
def test_warning_basic(self):
"""
Test to validate that client warnings can be surfaced
@since 2.6.0
@jira_ticket PYTHON-315
@expected_result valid warnings returned
@test_assumptions
- batch_size_warn_threshold_in_kb: 5
@test_category queries:client_warning
"""
future = self.session.execute_async(self.warn_batch)
future.result()
self.assertEqual(len(future.warnings), 1)
self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*')
def test_warning_with_trace(self):
"""
Test to validate client warning with tracing
@since 2.6.0
@jira_ticket PYTHON-315
@expected_result valid warnings returned
@test_assumptions
- batch_size_warn_threshold_in_kb: 5
@test_category queries:client_warning
"""
future = self.session.execute_async(self.warn_batch, trace=True)
future.result()
self.assertEqual(len(future.warnings), 1)
self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*')
self.assertIsNotNone(future._query_trace)
def test_warning_with_custom_payload(self):
"""
Test to validate client warning with custom payload
@since 2.6.0
@jira_ticket PYTHON-315
@expected_result valid warnings returned
@test_assumptions
- batch_size_warn_threshold_in_kb: 5
@test_category queries:client_warning
"""
payload = {'key': b'value'}
future = self.session.execute_async(self.warn_batch, custom_payload=payload)
future.result()
self.assertEqual(len(future.warnings), 1)
self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*')
self.assertDictEqual(future.custom_payload, payload)
def test_warning_with_trace_and_custom_payload(self):
"""
Test to validate client warning with tracing and client warning
@since 2.6.0
@jira_ticket PYTHON-315
@expected_result valid warnings returned
@test_assumptions
- batch_size_warn_threshold_in_kb: 5
@test_category queries:client_warning
"""
payload = {'key': b'value'}
future = self.session.execute_async(self.warn_batch, trace=True, custom_payload=payload)
future.result()
self.assertEqual(len(future.warnings), 1)
self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*')
self.assertIsNotNone(future._query_trace)
self.assertDictEqual(future.custom_payload, payload)
|
{
"content_hash": "67ff483feaffcb81da6b6915f695f8bb",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 125,
"avg_line_length": 34.05172413793103,
"alnum_prop": 0.6288607594936709,
"repo_name": "jregovic/python-driver",
"id": "14405a8df1db48e330ee531d0013e3d899bd1216",
"size": "4531",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/integration/standard/test_client_warnings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28918"
},
{
"name": "Python",
"bytes": "1710751"
}
],
"symlink_target": ""
}
|
"""fuelclient.objects sub-module contains classes that mirror
functionality from nailgun objects.
"""
from fuelclient.objects.base import BaseObject
from fuelclient.objects.environment import Environment
from fuelclient.objects.node import Node
from fuelclient.objects.node import NodeCollection
from fuelclient.objects.release import Release
from fuelclient.objects.task import DeployTask
from fuelclient.objects.task import SnapshotTask
from fuelclient.objects.task import Task
|
{
"content_hash": "958afbe059980f1fc817f83b2ce54d36",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 61,
"avg_line_length": 40.083333333333336,
"alnum_prop": 0.8565488565488566,
"repo_name": "koder-ua/nailgun-fcert",
"id": "0917a7632a6f708a99630d6f536a746093aaa88f",
"size": "1091",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fuelclient/fuelclient/objects/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99349"
},
{
"name": "JavaScript",
"bytes": "551854"
},
{
"name": "Python",
"bytes": "2643199"
},
{
"name": "Ruby",
"bytes": "33345"
},
{
"name": "Shell",
"bytes": "29681"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns,url
from people import views
urlpatterns = patterns('',
# url(r'^$', views.IndexView.as_view(), name='index'),
# url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
# url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view(), name='results'),
url(r'^signup/$', views.signup_view, name='signup'),
url(r'^login/$', views.login_view, name="login"),
url(r'^add_avatar/$', views.add_avatar, name="add_avatar"),
url(r'^get_avatar/$', views.get_avatar, name="get_avatar"),
)
|
{
"content_hash": "2eccae01f755b0e21a76e3c87d33a922",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 38,
"alnum_prop": 0.6428571428571429,
"repo_name": "youtaya/knight",
"id": "812bf6b1e796e58198a1afec4bd31e24513629fa",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzybee/people/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "883"
},
{
"name": "HTML",
"bytes": "6338"
},
{
"name": "JavaScript",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "25337"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.