repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
commtrack/commtrack-core | apps/buildmanager/models.py | 3 | 24577 | import os, sys
import logging
import traceback
from django.conf import settings
from datetime import datetime
import time
# make things easier so people don't have to install pygments
try:
from pygments import highlight
from pygments.lexers import HtmlLexer
from pygments.formatters import HtmlFormatter
pygments_found=True
except ImportError:
pygments_found=False
from zipstream import ZipStream
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from domain.models import Domain
from django.contrib.auth.models import User
from hq.utils import build_url
from requestlogger.models import RequestLog
from xformmanager.models import FormDefModel
from xformmanager.manager import XFormManager
from buildmanager import xformvalidator
from buildmanager.jar import validate_jar, extract_xforms
from buildmanager.exceptions import BuildError
BUILDFILES_PATH = settings.RAPIDSMS_APPS['buildmanager']['buildpath']
class Project (models.Model):
"""
A project is a high level container for a given build project. A project
can contain a history of builds
"""
domain = models.ForeignKey(Domain)
name = models.CharField(max_length=255)
description = models.CharField(max_length=512, null=True, blank=True)
# the optional project id in a different server (e.g. the build server)
project_id = models.CharField(max_length=20, null=True, blank=True)
@property
def downloads(self):
'''Get all the downloads associated with this project, across
builds.'''
return BuildDownload.objects.filter(build__project=self)
def get_non_released_builds(self):
'''Get all non-released builds for this project'''
return self.builds.exclude(status="release").order_by('-package_created')
def get_released_builds(self):
'''Get all released builds for a project'''
return self.builds.filter(status="release").order_by('-released')
def get_latest_released_build(self):
'''Gets the latest released build for a project, based on the
released date.'''
releases = self.get_released_builds()
if releases:
return releases[0]
def get_latest_jar_url(self):
'''Get the URL for the latest released jar file, empty if no builds
have been released'''
build = self.get_latest_released_build()
if build:
return reverse('get_latest_buildfile',
args=(self.id,
build.get_jar_filename()))
return None
def get_latest_jad_url(self):
'''Get the URL for the latest released jad file, empty if no builds
have been released'''
build = self.get_latest_released_build()
if build:
return reverse('get_latest_buildfile',
args=(self.id,
build.get_jad_filename()))
return None
def get_buildURL(self):
"""Hard coded build url for our build server"""
return 'http://build.dimagi.com:250/viewType.html?buildTypeId=bt%s' % self.project_id
def num_builds(self):
'''Get the number of builds associated with this project'''
return self.builds.all().count()
def __unicode__(self):
return unicode(self.name)
UNKNOWN_IP = "0.0.0.0"
BUILD_STATUS = (
('build', 'Standard Build'),
('release', 'Release'),
)
class ProjectBuild(models.Model):
'''When a jad/jar is built, it should correspond to a unique ReleasePackage
With all corresponding meta information on release info and build
information such that it can be traced back to a url/build info in source
control.'''
project = models.ForeignKey(Project, related_name="builds")
uploaded_by = models.ForeignKey(User, related_name="builds_uploaded")
status = models.CharField(max_length=64, choices=BUILD_STATUS, default="build")
build_number = models.PositiveIntegerField(help_text="the teamcity build number")
revision_number = models.CharField(max_length=255, null=True, blank=True,
help_text="the source control revision number")
version = models.CharField(max_length=20, null=True, blank=True,
help_text = 'the "release" version. e.g. 2.0.1')
package_created = models.DateTimeField()
jar_file = models.FilePathField(_('JAR File Location'),
match='.*\.jar$',
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
jad_file = models.FilePathField(_('JAD File Location'),
match='.*\.jad$',
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
description = models.CharField(max_length=512, null=True, blank=True)
# release info
released = models.DateTimeField(null=True, blank=True)
released_by = models.ForeignKey(User, null=True, blank=True, related_name="builds_released")
def __unicode__(self):
return "%s build: %s. jad: %s, jar: %s" %\
(self.project, self.build_number, self.jad_file, self.jar_file)
def __str__(self):
return unicode(self).encode('utf-8')
def get_display_string(self):
'''Like calling str() but with a url attached'''
return "%s\nurl on server: %s" % (str(self),
build_url(reverse('show_build',
args=(self.id,))))
def get_jar_download_count(self):
return len(self.downloads.filter(type="jar"))
def get_jad_download_count(self):
return len(self.downloads.filter(type="jad"))
@property
def upload_information(self):
'''Get the upload request information associated with this,
if it is present.'''
try:
return BuildUpload.objects.get(build=self).log
except BuildUpload.DoesNotExist:
return None
def save(self):
"""Override save to provide some simple enforcement of uniqueness to the build numbers
generated by the submission of the build"""
if ProjectBuild.objects.filter(project=self.project).filter(build_number=self.build_number).count() > 0 and self.id == None:
raise Exception ("Error, the build number must be unique for this project build: " + str(self.build_number) + " project: " + str(self.project.id))
else:
super(ProjectBuild, self).save()
def get_jar_size(self):
return os.path.getsize(self.jar_file)
def get_jad_size(self):
return os.path.getsize(self.jad_file)
def get_jar_filename(self):
'''Returns the name (no paths) of the jar file'''
return os.path.basename(self.jar_file)
def get_jad_filename(self):
'''Returns the name (no paths) of the jad file'''
return os.path.basename(self.jad_file)
def get_zip_filename(self):
'''Returns the name (no paths) of the zip file, which will include the version number infromation'''
fname = os.path.basename(self.jar_file)
basename = os.path.splitext(fname)[0]
zipfilename = basename + "-build" + str(self.build_number) + ".zip"
return zipfilename
def get_jar_filestream(self):
try:
fin = open(self.jar_file,'r')
return fin
except Exception, e:
logging.error("Unable to open jarfile", extra={"exception": e,
"jar_file": self.jar_file,
"build_number": self.build_number,
"project_id": self.project.id})
def get_jad_filestream(self, mode='r'):
try:
fin = open(self.jad_file, mode)
return fin
except Exception, e:
logging.error("Unable to open jadfile", extra={"exception": e,
"jad_file": self.jad_file,
"build_number": self.build_number,
"project_id": self.project.id})
def get_zip_filestream(self):
try:
zpath = str(os.path.dirname(self.jar_file) + "/")
buf = StringIO()
zp = ZipStream(zpath)
for data in zp:
buf.write(data)
#print data
buf.flush()
buf.seek(0)
return buf.read()
except Exception, e:
logging.error("Unable to open create ZipStream", extra={"exception": e,
"build_number": self.build_number,
"project_id": self.project.id})
def get_jad_contents(self):
'''Returns the contents of the jad as text.'''
file = self.get_jad_filestream()
lines = []
for line in file:
lines.append(line.strip())
return "<br>".join(lines)
def get_jad_properties(self):
'''Reads the properties of the jad file and returns a dict'''
file = self.get_jad_filestream()
sep = ': '
proplines = [line.strip() for line in file.readlines() if line.strip()]
jad_properties = {}
for propln in proplines:
i = propln.find(sep)
if i == -1:
pass #log error?
(propname, propvalue) = (propln[:i], propln[i+len(sep):])
jad_properties[propname] = propvalue
return jad_properties
def write_jad(self, properties):
'''Write a property dictionary back to the jad file'''
ordered = ['MIDlet-Name', 'MIDlet-Version', 'MIDlet-Vendor', 'MIDlet-Jar-URL',
'MIDlet-Jar-Size', 'MIDlet-Info-URL', 'MIDlet-1']
for po in ordered:
if po not in properties.keys():
pass #log error -- required property is missing?
unordered = [propname for propname in properties.keys() if propname not in ordered]
ordered.extend(sorted(unordered))
proplines = ['%s: %s\n' % (propname, properties[propname]) for propname in ordered]
file = self.get_jad_filestream('w')
file.write(''.join(proplines))
file.close()
def add_jad_properties(self, propdict):
'''Add properties to the jad file'''
props = self.get_jad_properties()
props.update(propdict)
self.write_jad(props)
def get_xform_html_summary(self):
'''This is used by the view. It is pretty cool, but perhaps misplaced.'''
to_return = []
for form in self.xforms.all():
try:
to_return.append(form.get_link())
except Exception, e:
# we don't care about this
pass
if to_return:
return "<br>".join(to_return)
else:
return "No X-Forms found"
def get_zip_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct zipfile download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
self.get_zip_filename()))
def get_jar_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
os.path.basename(self.jar_file)))
def get_jad_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
os.path.basename(self.jad_file)))
def get_buildURL(self):
"""Hard coded build url for our build server"""
return 'http://build.dimagi.com:250/viewLog.html?buildTypeId=bt%s&buildNumber=%s' % \
(self.project.project_id, self.build_number)
def set_jadfile(self, filename, filestream):
"""Simple utility function to save the uploaded file to the right location and set the property of the model"""
try:
new_file_name = os.path.join(self._get_destination(), filename)
fout = open(new_file_name, 'w')
fout.write( filestream.read() )
fout.close()
self.jad_file = new_file_name
except Exception, e:
logging.error("Error, saving jadfile failed", extra={"exception":e, "jad_filename":filename})
def set_jarfile(self, filename, filestream):
"""Simple utility function to save the uploaded file to the right location and set the property of the model"""
try:
new_file_name = os.path.join(self._get_destination(), filename)
fout = open(new_file_name, 'wb')
fout.write( filestream.read() )
fout.close()
self.jar_file = new_file_name
except Exception, e:
logging.error("Error, saving jarfile failed", extra={"exception":e, "jar_filename":filename})
def _get_destination(self):
"""The directory this build saves its data to. Defined in
the config and then /xforms/<project_id>/<build_id>/ is
appended. If it doesn't exist, the directory is
created by this method."""
destinationpath = os.path.join(BUILDFILES_PATH,
str(self.project.id),
str(self.build_number))
if not os.path.exists(destinationpath):
os.makedirs(destinationpath)
return destinationpath
def validate_jar(self, include_xforms=False):
'''Validates this build's jar file. By default, does NOT validate
the jar's xforms.'''
validate_jar(self.jar_file, include_xforms)
def validate_xforms(self):
'''Validates this build's xforms.'''
errors = []
for form in self.xforms.all():
try:
xformvalidator.validate(form.file_location)
except Exception, e:
errors.append(e)
if errors:
raise BuildError("Problem validating xforms for %s!" % self, errors)
def check_and_release_xforms(self):
'''Checks this build's xforms against the xformmanager and releases
them, if they pass compatibility tests'''
errors = []
to_skip = []
to_register = []
for form in self.xforms.all():
try:
formdef = xformvalidator.validate(form.file_location)
modelform = FormDefModel.get_model(formdef.target_namespace,
self.project.domain,
formdef.version)
if modelform:
# if the model form exists we must ensure it is compatible
# with the version we are trying to release
existing_formdef = modelform.to_formdef()
differences = existing_formdef.get_differences(formdef)
if differences.is_empty():
# this is all good
to_skip.append(form)
else:
raise BuildError("""Schema %s is not compatible with %s.
Because of the following differences:
%s
You must update your version number!"""
% (existing_formdef, formdef, differences))
else:
# this must be registered
to_register.append(form)
except Exception, e:
errors.append(e)
if errors:
raise BuildError("Problem validating xforms for %s!" % self, errors)
# finally register
manager = XFormManager()
# TODO: we need transaction management
for form in to_register:
try:
formdefmodel = manager.add_schema(form.get_file_name(),
form.as_filestream(),
self.project.domain)
upload_info = self.upload_information
if upload_info:
formdefmodel.submit_ip = upload_info.ip
user = upload_info.user
else:
formdefmodel.submit_ip = UNKNOWN_IP
user = self.uploaded_by
formdefmodel.uploaded_by = user
formdefmodel.bytes_received = form.size
formdefmodel.form_display_name = form.get_file_name()
formdefmodel.save()
except Exception, e:
# log the error with the stack, otherwise this is hard to track down
info = sys.exc_info()
logging.error("Error registering form in build manager: %s\n%s" % \
(e, traceback.print_tb(info[2])))
errors.append(e)
if errors:
raise BuildError("Problem registering xforms for %s!" % self, errors)
def set_jad_released(self):
'''Set the appropriate 'release' properties in the jad'''
self.add_jad_properties({
'Build-Number': '*' + str(self.get_release_number()), #remove * once we get a real build number
'Released-on': time.strftime('%Y-%b-%d %H:%M', time.gmtime())
})
#FIXME!
def get_release_number(self):
'''return an incrementing build number per released build, unique across all builds for a given commcare project'''
import random
return random.randint(1000, 9999) #return a high random number until we get the incrementing plugged in
def release(self, user):
'''Release a build. This does a number of things:
1. Validates the Jar. The specifics of this are still in flux but at the very
least it should be extractable, and there should be at least one xform.
2. Ensures all the xforms have valid xmlns, version, and uiversion attributes
3. Checks if xforms with the same xmlns and version are registered already
If so: ensures the current forms are compatible with the registered forms
If not: registers the forms
4. Updates the build status to be released, sets the released and
released_by properties
This method will raise an exception if, for any reason above, the build cannot
be released.'''
if self.status == "release":
raise BuildError("Tried to release an already released build!")
else:
# TODO: we need transaction management. Any of these steps can raise exceptions
self.validate_jar()
self.validate_xforms()
self.check_and_release_xforms()
self.set_jad_released()
self.status = "release"
self.released = datetime.now()
self.released_by = user
self.save()
logging.error("%s just released build %s! We just thought you might want to be keeping tabs..." %
(user, self.get_display_string()))
def extract_and_link_xforms(sender, instance, created, **kwargs):
'''Extracts all xforms from this build's jar and creates
references on disk and model objects for them.'''
# only do this the first time we save, not on updates
if not created:
return
try:
xforms = extract_xforms(instance.jar_file, instance._get_destination())
for form in xforms:
form_model = BuildForm.objects.create(build=instance, file_location=form)
num_created = len(instance.xforms.all())
if num_created == 0:
logging.warn("Build %s didn't have any linked xforms! Why not?!" % instance)
except Exception, e:
logging.error("Problem extracting xforms for build: %s, the error is: %s" %\
(instance, e))
post_save.connect(extract_and_link_xforms, sender=ProjectBuild)
class BuildForm(models.Model):
"""Class representing the location of a single build's xform on
the file system."""
build = models.ForeignKey(ProjectBuild, related_name="xforms")
file_location = models.FilePathField(_('Xform Location'),
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
def get_file_name(self):
'''Get a readable file name for this xform'''
return os.path.basename(self.file_location)
@property
def size(self):
return os.path.getsize(self.file_location)
def get_url(self):
'''Get the url where you can view this form'''
return reverse('get_build_xform', args=(self.id,))
def as_filestream(self):
'''Gets a raw handle to the form as a file stream'''
try:
fin = open(self.file_location,'r')
return fin
except Exception, e:
logging.error("Unable to open xform: %s" % self,
extra={"exception": e })
def get_text(self):
'''Gets the body of the xform, as text'''
try:
file = self.as_filestream()
text = file.read()
file.close()
return text
except Exception, e:
logging.error("Unable to open xform: %s" % self,
extra={"exception": e })
def to_html(self):
'''Gets the body of the xform, as pretty printed text'''
raw_body = self.get_text()
if pygments_found:
return highlight(raw_body, HtmlLexer(), HtmlFormatter())
return raw_body
def get_link(self):
'''A clickable html displayable version of this for use in templates'''
return '<a href=%s target=_blank>%s</a>' % (self.get_url(), self.get_file_name())
def __unicode__(self):
return "%s: %s" % (self.build, self.get_file_name())
BUILD_FILE_TYPE = (
('jad', '.jad file'),
('jar', '.jar file'),
)
class BuildUpload(models.Model):
"""Represents an instance of the upload of a build."""
build = models.ForeignKey(ProjectBuild, unique=True)
log = models.ForeignKey(RequestLog, unique=True)
class BuildDownload(models.Model):
"""Represents an instance of a download of a build file. Included are the
type of file, the build id, and the request log."""
type = models.CharField(max_length=3, choices=BUILD_FILE_TYPE)
build = models.ForeignKey(ProjectBuild, related_name="downloads")
log = models.ForeignKey(RequestLog, unique=True)
def __unicode__(self):
return "%s download for build %s. Request: %s" %\
(self.type, self.build, self.log)
| bsd-3-clause |
mozilla/addons-server | src/olympia/amo/tests/test_helpers.py | 1 | 15332 | # -*- coding: utf-8 -*-
import mimetypes
import os
from datetime import datetime, timedelta
from unittest.mock import Mock, patch
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import NoReverseMatch
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_bytes
import pytest
from pyquery import PyQuery
import olympia
from olympia import amo
from olympia.amo import urlresolvers, utils
from olympia.amo.reverse import set_url_prefix
from olympia.amo.templatetags import jinja_helpers
from olympia.amo.tests import SQUOTE_ESCAPED, TestCase, reverse_ns
from olympia.amo.utils import ImageCheck
ADDONS_TEST_FILES = os.path.join(
os.path.dirname(olympia.__file__), 'devhub', 'tests', 'addons'
)
pytestmark = pytest.mark.django_db
def render(s, context=None):
if context is None:
context = {}
t = utils.from_string(s)
return t.render(context)
def test_strip_controls():
# We want control codes like \x0c to disappear.
assert 'I ove you' == jinja_helpers.strip_controls('I \x0cove you')
def test_finalize():
"""We want None to show up as ''. We do this in JINJA_CONFIG."""
assert '' == render('{{ x }}', {'x': None})
def test_slugify_spaces():
"""We want slugify to preserve spaces, but not at either end."""
assert utils.slugify(' b ar ') == 'b-ar'
assert utils.slugify(' b ar ', spaces=True) == 'b ar'
assert utils.slugify(' b ar ', spaces=True) == 'b ar'
def test_page_title():
request = Mock()
title = 'Oh hai!'
s = render('{{ page_title("%s") }}' % title, {'request': request})
assert s == '%s :: Add-ons for Firefox' % title
# Check the dirty unicodes.
s = render(
'{{ page_title(x) }}',
{'request': request, 'x': force_bytes('\u05d0\u05d5\u05e1\u05e3')},
)
def test_page_title_markup():
"""If the title passed to page_title is a jinja2 Markup object, don't cast
it back to a string or it'll get double escaped. See issue #1062."""
request = Mock()
# Markup isn't double escaped.
res = render(
'{{ page_title("{0}"|format_html("It\'s all text")) }}', {'request': request}
)
assert res == f'It{SQUOTE_ESCAPED}s all text :: Add-ons for Firefox'
def test_template_escaping():
"""Test that tests various formatting scenarios we're using in our
templates and makes sure they're working as expected.
"""
# Simple HTML in a translatable string
expected = '<a href="...">This is a test</a>'
assert render('{{ _(\'<a href="...">This is a test</a>\') }}') == expected
# Simple HTML in a translatable string, with |format_html works
# as expected
expected = '<a href="...">This is a test</a>'
original = '{{ _(\'<a href="...">{0}</a>\')|format_html("This is a test") }}'
assert render(original) == expected
# The html provided in the translatable string won't be escaped
# but all arguments are.
expected = '<a href="...">This is a <h1>test</h1></a>'
original = (
'{{ _(\'<a href="...">{0}</a>\')|format_html("This is a <h1>test</h1>") }}'
)
assert render(original) == expected
# Unless marked explicitly as safe
expected = '<a href="...">This is a <h1>test</h1></a>'
original = (
'{{ _(\'<a href="...">{0}</a>\')'
'|format_html("This is a <h1>test</h1>"|safe) }}'
)
assert render(original) == expected
# Document how newstyle gettext behaves, everything that get's passed in
# like that needs to be escaped!
expected = '<script></script>'
assert render('{{ _(foo) }}', {'foo': '<script></script>'}) != expected
assert render('{{ _(foo|escape) }}', {'foo': '<script></script>'}) == expected
# Various tests for gettext related helpers and make sure they work
# properly just as `_()` does.
expected = '<b>5 users</b>'
assert (
render(
"{{ ngettext('<b>{0} user</b>', '<b>{0} users</b>', 2)" '|format_html(5) }}'
)
== expected
)
# You could also mark the whole output as |safe but note that this
# still escapes the arguments of |format_html unless explicitly
# marked as safe
expected = '<b><script> users</b>'
assert (
render(
"{{ ngettext('<b>{0} user</b>', '<b>{0} users</b>', 2)"
'|format_html("<script>")|safe }}'
)
== expected
)
@patch('olympia.amo.templatetags.jinja_helpers.reverse')
def test_url(mock_reverse):
render('{{ url("viewname", 1, z=2) }}')
mock_reverse.assert_called_with(
'viewname', args=(1,), kwargs={'z': 2}, add_prefix=True
)
render('{{ url("viewname", 1, z=2, host="myhost") }}')
mock_reverse.assert_called_with(
'viewname', args=(1,), kwargs={'z': 2}, add_prefix=True
)
def test_drf_url():
fragment = '{{ drf_url("addon-detail", pk="a3615") }}'
rf = RequestFactory()
request = rf.get('/hello/')
rendered = render(fragment, context={'request': request})
# As no /vX/ in the request, RESTFRAMEWORK['DEFAULT_VERSION'] is used.
assert rendered == jinja_helpers.absolutify(
reverse_ns('addon-detail', args=['a3615'])
)
with pytest.raises(NoReverseMatch):
# Without a request it can't resolve the name correctly.
render(fragment, context={})
def test_urlparams():
url = '/en-US/firefox/themes/category'
c = {
'base': url,
'base_frag': url + '#hash',
'base_query': url + '?x=y',
'sort': 'name',
'frag': 'frag',
}
# Adding a query.
s = render('{{ base_frag|urlparams(sort=sort) }}', c)
assert s == '%s?sort=name#hash' % url
# Adding a fragment.
s = render('{{ base|urlparams(frag) }}', c)
assert s == '%s#frag' % url
# Replacing a fragment.
s = render('{{ base_frag|urlparams(frag) }}', c)
assert s == '%s#frag' % url
# Adding query and fragment.
s = render('{{ base_frag|urlparams(frag, sort=sort) }}', c)
assert s == '%s?sort=name#frag' % url
# Adding query with existing params.
s = render('{{ base_query|urlparams(frag, sort=sort) }}', c)
amo.tests.assert_url_equal(s, '%s?sort=name&x=y#frag' % url)
# Replacing a query param.
s = render('{{ base_query|urlparams(frag, x="z") }}', c)
assert s == '%s?x=z#frag' % url
# Params with value of None get dropped.
s = render('{{ base|urlparams(sort=None) }}', c)
assert s == url
# Removing a query
s = render('{{ base_query|urlparams(x=None) }}', c)
assert s == url
def test_urlparams_unicode():
url = '/xx?evil=reco\ufffd\ufffd\ufffd\u02f5'
utils.urlparams(url)
def test_urlparams_returns_safe_string():
s = render('{{ "https://foo.com/"|urlparams(param="help+me") }}', {})
assert s == 'https://foo.com/?param=help%2Bme'
s = render('{{ "https://foo.com/"|urlparams(param="obiwankénobi") }}', {})
assert s == 'https://foo.com/?param=obiwank%C3%A9nobi'
s = render('{{ "https://foo.com/"|urlparams(param=42) }}', {})
assert s == 'https://foo.com/?param=42'
s = render('{{ "https://foo.com/"|urlparams(param="") }}', {})
assert s == 'https://foo.com/?param='
s = render('{{ "https://foo.com/"|urlparams(param="help%2Bme") }}', {})
assert s == 'https://foo.com/?param=help%2Bme'
s = render('{{ "https://foo.com/"|urlparams(param="a%20b") }}', {})
assert s == 'https://foo.com/?param=a+b'
s = render('{{ "https://foo.com/"|urlparams(param="%AAA") }}', {})
assert s == 'https://foo.com/?param=%AAA'
string = render(
'{{ unsafe_url|urlparams }}',
{
'unsafe_url': "http://url.with?foo=<script>alert('awesome')</script>"
'&baa=that'
},
)
assert string == (
'http://url.with?foo=%3Cscript%3Ealert%28%27awesome%27%29%3C%2Fscript%3E'
'&baa=that'
)
string = render(
'{{ "http://safe.url?baa=that"|urlparams(foo=unsafe_param) }}',
{'unsafe_param': "<script>alert('awesome')</script>"},
)
assert string == (
'http://safe.url?baa=that'
'&foo=%3Cscript%3Ealert%28%27awesome%27%29%3C%2Fscript%3E'
)
def test_isotime():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|isotime }}', {'d': time})
assert s == '2009-12-25T10:11:12Z'
s = render('{{ d|isotime }}', {'d': None})
assert s == ''
def test_epoch():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|epoch }}', {'d': time})
assert s == '1261735872'
s = render('{{ d|epoch }}', {'d': None})
assert s == ''
def test_locale_url():
rf = RequestFactory()
request = rf.get('/de', SCRIPT_NAME='/z')
prefixer = urlresolvers.Prefixer(request)
set_url_prefix(prefixer)
s = render('{{ locale_url("mobile") }}')
assert s == '/z/de/mobile'
def test_external_url():
redirect_url = settings.REDIRECT_URL
secretkey = settings.REDIRECT_SECRET_KEY
settings.REDIRECT_URL = 'http://example.net'
settings.REDIRECT_SECRET_KEY = 'sekrit'
try:
myurl = 'http://example.com'
s = render('{{ "%s"|external_url }}' % myurl)
assert s == urlresolvers.get_outgoing_url(myurl)
finally:
settings.REDIRECT_URL = redirect_url
settings.REDIRECT_SECRET_KEY = secretkey
@patch('olympia.amo.templatetags.jinja_helpers.urlresolvers.get_outgoing_url')
def test_linkify_bounce_url_callback(mock_get_outgoing_url):
mock_get_outgoing_url.return_value = 'bar'
res = urlresolvers.linkify_bounce_url_callback({(None, 'href'): 'foo'})
# Make sure get_outgoing_url was called.
assert res == {(None, 'href'): 'bar'}
mock_get_outgoing_url.assert_called_with('foo')
@patch(
'olympia.amo.templatetags.jinja_helpers.urlresolvers.linkify_bounce_url_callback'
)
def test_linkify_with_outgoing_text_links(mock_linkify_bounce_url_callback):
def side_effect(attrs, new=False):
attrs[(None, 'href')] = 'bar'
return attrs
mock_linkify_bounce_url_callback.side_effect = side_effect
res = urlresolvers.linkify_with_outgoing('a text http://example.com link')
# Use PyQuery because the attributes could be rendered in any order.
doc = PyQuery(res)
assert doc('a[href="bar"][rel="nofollow"]')[0].text == 'http://example.com'
@patch(
'olympia.amo.templatetags.jinja_helpers.urlresolvers.linkify_bounce_url_callback'
)
def test_linkify_with_outgoing_markup_links(mock_linkify_bounce_url_callback):
def side_effect(attrs, new=False):
attrs[(None, 'href')] = 'bar'
return attrs
mock_linkify_bounce_url_callback.side_effect = side_effect
res = urlresolvers.linkify_with_outgoing(
'a markup <a href="http://example.com">link</a> with text'
)
# Use PyQuery because the attributes could be rendered in any order.
doc = PyQuery(res)
assert doc('a[href="bar"][rel="nofollow"]')[0].text == 'link'
def get_image_path(name):
return os.path.join(settings.ROOT, 'src', 'olympia', 'amo', 'tests', 'images', name)
def get_uploaded_file(name):
data = open(get_image_path(name), mode='rb').read()
return SimpleUploadedFile(name, data, content_type=mimetypes.guess_type(name)[0])
def get_addon_file(name):
return os.path.join(ADDONS_TEST_FILES, name)
class TestAnimatedImages(TestCase):
def test_animated_images(self):
img = ImageCheck(open(get_image_path('animated.png'), mode='rb'))
assert img.is_animated()
img = ImageCheck(open(get_image_path('non-animated.png'), mode='rb'))
assert not img.is_animated()
img = ImageCheck(open(get_image_path('animated.gif'), mode='rb'))
assert img.is_animated()
img = ImageCheck(open(get_image_path('non-animated.gif'), mode='rb'))
assert not img.is_animated()
def test_junk(self):
img = ImageCheck(open(__file__, 'rb'))
assert not img.is_image()
img = ImageCheck(open(get_image_path('non-animated.gif'), mode='rb'))
assert img.is_image()
def test_jinja_trans_monkeypatch():
# This tests the monkeypatch in manage.py that prevents localizers from
# taking us down.
render('{% trans come_on=1 %}% (come_on)s{% endtrans %}')
render('{% trans come_on=1 %}%(come_on){% endtrans %}')
render('{% trans come_on=1 %}%(come_on)z{% endtrans %}')
@pytest.mark.parametrize(
'url,site,expected',
[
('', None, settings.EXTERNAL_SITE_URL),
('', '', settings.EXTERNAL_SITE_URL),
(None, None, settings.EXTERNAL_SITE_URL),
('foo', None, f'{settings.EXTERNAL_SITE_URL}/foo'),
('foobar', 'http://amo.com', 'http://amo.com/foobar'),
('abc', 'https://localhost', 'https://localhost/abc'),
('http://addons.mozilla.org', None, 'http://addons.mozilla.org'),
('https://addons.mozilla.org', None, 'https://addons.mozilla.org'),
('https://amo.com', 'https://addons.mozilla.org', 'https://amo.com'),
('woo', 'www', 'woo'),
],
)
def test_absolutify(url, site, expected):
"""Make sure we correct join a base URL and a possibly relative URL."""
assert jinja_helpers.absolutify(url, site) == expected
def test_timesince():
month_ago = datetime.now() - timedelta(days=30)
assert jinja_helpers.timesince(month_ago) == '1 month ago'
assert jinja_helpers.timesince(None) == ''
def test_timeuntil():
a_month_in_the_future = datetime.now() + timedelta(days=31)
assert jinja_helpers.timeuntil(a_month_in_the_future) == '1 month'
a_week_in_the_future = datetime.now() + timedelta(days=14, hours=1)
assert jinja_helpers.timeuntil(a_week_in_the_future) == '2 weeks'
def test_format_unicode():
# This makes sure there's no UnicodeEncodeError when doing the string
# interpolation.
assert render('{{ "foo {0}"|format_html("baré") }}') == 'foo baré'
class TestStoragePath(TestCase):
@override_settings(ADDONS_PATH=None, MEDIA_ROOT='/path/')
def test_without_settings(self):
del settings.ADDONS_PATH
path = jinja_helpers.user_media_path('addons')
assert path == '/path/addons'
@override_settings(ADDONS_PATH='/another/path/')
def test_with_settings(self):
path = jinja_helpers.user_media_path('addons')
assert path == '/another/path/'
class TestMediaUrl(TestCase):
@override_settings(USERPICS_URL=None)
def test_without_settings(self):
del settings.USERPICS_URL
settings.MEDIA_URL = '/mediapath/'
url = jinja_helpers.user_media_url('userpics')
assert url == '/mediapath/userpics/'
SPACELESS_TEMPLATE = """
<div> <div>outside</div>
<b>tag</b> <em>is fine</em>
{% spaceless %}
<div prop=" inside props is left alone ">not</div>
<i>space </i> <span>between
</span>
{% endspaceless %}
<div>outside again </div>
</div>
"""
SPACELESS_RESULT = """
<div> <div>outside</div>
<b>tag</b> <em>is fine</em>
<div prop=" inside props is left alone ">not</div><i>space </i><span>between
</span><div>outside again </div>
</div>"""
def test_spaceless_extension():
assert render(SPACELESS_TEMPLATE) == SPACELESS_RESULT
| bsd-3-clause |
jcupitt/sorl-thumbnail | sorl/thumbnail/kvstores/cached_db_kvstore.py | 10 | 2035 | from django.core.cache import cache, InvalidCacheBackendError
from sorl.thumbnail.compat import get_cache
from sorl.thumbnail.kvstores.base import KVStoreBase
from sorl.thumbnail.conf import settings
from sorl.thumbnail.models import KVStore as KVStoreModel
class EMPTY_VALUE(object):
pass
class KVStore(KVStoreBase):
def __init__(self):
super(KVStore, self).__init__()
@property
def cache(self):
try:
kv_cache = get_cache(settings.THUMBNAIL_CACHE)
except InvalidCacheBackendError:
kv_cache = cache
return kv_cache
def clear(self, delete_thumbnails=False):
"""
We can clear the database more efficiently using the prefix here rather
than calling :meth:`_delete_raw`.
"""
prefix = settings.THUMBNAIL_KEY_PREFIX
for key in self._find_keys_raw(prefix):
self.cache.delete(key)
KVStoreModel.objects.filter(key__startswith=prefix).delete()
if delete_thumbnails:
self.delete_all_thumbnail_files()
def _get_raw(self, key):
value = self.cache.get(key)
if value is None:
try:
value = KVStoreModel.objects.get(key=key).value
except KVStoreModel.DoesNotExist:
# we set the cache to prevent further db lookups
value = EMPTY_VALUE
self.cache.set(key, value, settings.THUMBNAIL_CACHE_TIMEOUT)
if value == EMPTY_VALUE:
return None
return value
def _set_raw(self, key, value):
KVStoreModel.objects.get_or_create(
key=key, defaults={'value': value})
self.cache.set(key, value, settings.THUMBNAIL_CACHE_TIMEOUT)
def _delete_raw(self, *keys):
KVStoreModel.objects.filter(key__in=keys).delete()
for key in keys:
self.cache.delete(key)
def _find_keys_raw(self, prefix):
qs = KVStoreModel.objects.filter(key__startswith=prefix)
return qs.values_list('key', flat=True)
| bsd-3-clause |
pearsonlab/nipype | nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py | 12 | 1315 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..featuredetection import GenerateTestImage
def test_GenerateTestImage_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
lowerBoundOfOutputVolume=dict(argstr='--lowerBoundOfOutputVolume %f',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
outputVolumeSize=dict(argstr='--outputVolumeSize %f',
),
terminal_output=dict(nohash=True,
),
upperBoundOfOutputVolume=dict(argstr='--upperBoundOfOutputVolume %f',
),
)
inputs = GenerateTestImage.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_GenerateTestImage_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = GenerateTestImage.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
chiefspace/udemy-rest-api | udemy_rest_api_section5/env/lib/python3.4/site-packages/pip/__init__.py | 7 | 11392 | #!/usr/bin/env python
import os
import optparse
import sys
import re
import errno
# Debian virtual environment (venv) support. When inside a venv, we have to
# add all the devendorized wheels to sys.path from inside the venv, otherwise
# the devendorized packages won't be found. Only do this in a venv so it
# doesn't affect global pip operation. venv determination is a bit of a black
# art, but this algorithm should work in both Python 2 (virtualenv-only) and
# Python 3 (pyvenv and virtualenv). - barry@debian.org 2014-06-03
base_prefix = getattr(sys, 'base_prefix', None)
real_prefix = getattr(sys, 'real_prefix', None)
if base_prefix is None:
# Python 2 has no base_prefix at all. It also has no pyvenv. Fall back
# to checking real_prefix.
if real_prefix is None:
# We are not in a venv.
in_venv = False
else:
# We're in a Python 2 virtualenv created venv, but real_prefix should
# never be the same as sys.prefix.
assert sys.prefix != real_prefix
in_venv = True
elif sys.prefix != base_prefix:
# We're in a Python 3, pyvenv created venv.
in_venv = True
elif real_prefix is None:
# We're in Python 3, outside a venv, but base better equal prefix.
assert sys.prefix == base_prefix
in_venv = False
else:
# We're in a Python 3, virtualenv created venv.
assert real_prefix != sys.prefix
in_venv = True
if in_venv:
wheel_dir = os.path.join(sys.prefix, 'lib', 'python-wheels')
else:
wheel_dir = '/usr/share/python-wheels'
# We'll add all the wheels we find to the front of sys.path so that they're
# found first, even if the same dependencies are available in site-packages.
try:
for filename in os.listdir(wheel_dir):
if os.path.splitext(filename)[1] == '.whl':
sys.path.insert(0, os.path.join(wheel_dir, filename))
# FileNotFoundError doesn't exist in Python 2, but ignore it anyway.
except OSError as error:
if error.errno != errno.ENOENT:
raise
from pip.exceptions import InstallationError, CommandError, PipError
from pip.log import logger
from pip.util import get_installed_distributions, get_prog
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import commands, get_summaries, get_similar_commands
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "1.5.4"
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this call
# is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0].lower()
#all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(args_else[0].lower())
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
return cmd_name, cmd_args
def main(initial_args=None):
if initial_args is None:
initial_args = sys.argv[1:]
autocomplete()
try:
cmd_name, cmd_args = parseopts(initial_args)
except PipError:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s" % e)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands[cmd_name]()
return command.main(cmd_args)
def bootstrap():
"""
Bootstrapping function to be called from install-pip.py script.
"""
pkgs = ['pip']
try:
import setuptools
except ImportError:
pkgs.append('setuptools')
return main(['install', '--upgrade'] + pkgs + sys.argv[1:])
############################################################
## Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError:
ex = sys.exc_info()[1]
logger.warn("Error when trying to get requirement for VCS system %s, falling back to uneditable format" % ex)
req = None
if req is None:
logger.warn('Could not determine repository location of %s' % location)
comments.append('## !! Could not determine repository location')
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="]
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend(
).get_location(dist, dependency_links)
if not svn_location:
logger.warn(
'Warning: cannot find svn location for %s' % req)
comments.append('## FIXME: could not find svn URL in dependency_links for this package:')
else:
comments.append('# Installing as editable to satisfy requirement %s:' % req)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (svn_location, rev, cls.egg_name(dist))
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
exit = main()
if exit:
sys.exit(exit)
| gpl-2.0 |
tastynoodle/django | django/contrib/gis/db/backends/spatialite/base.py | 119 | 3209 | import sys
from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (Database,
DatabaseWrapper as SQLiteDatabaseWrapper, SQLiteCursorWrapper)
from django.contrib.gis.db.backends.spatialite.client import SpatiaLiteClient
from django.contrib.gis.db.backends.spatialite.creation import SpatiaLiteCreation
from django.contrib.gis.db.backends.spatialite.introspection import SpatiaLiteIntrospection
from django.contrib.gis.db.backends.spatialite.operations import SpatiaLiteOperations
from django.utils import six
class DatabaseWrapper(SQLiteDatabaseWrapper):
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.creation = SpatiaLiteCreation(self)
self.introspection = SpatiaLiteIntrospection(self)
def get_new_connection(self, conn_params):
conn = super(DatabaseWrapper, self).get_new_connection(conn_params)
# Enabling extension loading on the SQLite connection.
try:
conn.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured(
'The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.')
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = conn.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception as msg:
new_msg = (
'Unable to load the SpatiaLite library extension '
'"%s" because: %s') % (self.spatialite_lib, msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
cur.close()
return conn
| bsd-3-clause |
niavlys/kivy | kivy/core/audio/audio_ffpyplayer.py | 39 | 5984 | '''
FFmpeg based audio player
=========================
To use, you need to install ffpyplyaer and have a compiled ffmpeg shared
library.
https://github.com/matham/ffpyplayer
The docs there describe how to set this up. But briefly, first you need to
compile ffmpeg using the shared flags while disabling the static flags (you'll
probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here's some
instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you
can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/.
Similarly, you should download SDL.
Now, you should a ffmpeg and sdl directory. In each, you should have a include,
bin, and lib directory, where e.g. for Windows, lib contains the .dll.a files,
while bin contains the actual dlls. The include directory holds the headers.
The bin directory is only needed if the shared libraries are not already on
the path. In the environment define FFMPEG_ROOT and SDL_ROOT, each pointing to
the ffmpeg, and SDL directories, respectively. (If you're using SDL2,
the include directory will contain a directory called SDL2, which then holds
the headers).
Once defined, download the ffpyplayer git and run
python setup.py build_ext --inplace
Finally, before running you need to ensure that ffpyplayer is in python's path.
..Note::
When kivy exits by closing the window while the audio is playing,
it appears that the __del__method of SoundFFPy
is not called. Because of this the SoundFFPy object is not
properly deleted when kivy exits. The consequence is that because
MediaPlayer creates internal threads which do not have their daemon
flag set, when the main threads exists it'll hang and wait for the other
MediaPlayer threads to exit. But since __del__ is not called to delete the
MediaPlayer object, those threads will remain alive hanging kivy. What this
means is that you have to be sure to delete the MediaPlayer object before
kivy exits by setting it to None.
'''
__all__ = ('SoundFFPy', )
try:
import ffpyplayer
from ffpyplayer.player import MediaPlayer
from ffpyplayer.tools import set_log_callback, loglevels,\
get_log_callback, formats_in
except:
raise
from kivy.clock import Clock
from kivy.logger import Logger
from kivy.core.audio import Sound, SoundLoader
from kivy.weakmethod import WeakMethod
import time
Logger.info('SoundFFPy: Using ffpyplayer {}'.format(ffpyplayer.version))
logger_func = {'quiet': Logger.critical, 'panic': Logger.critical,
'fatal': Logger.critical, 'error': Logger.error,
'warning': Logger.warning, 'info': Logger.info,
'verbose': Logger.debug, 'debug': Logger.debug}
def _log_callback(message, level):
message = message.strip()
if message:
logger_func[level]('ffpyplayer: {}'.format(message))
class SoundFFPy(Sound):
@staticmethod
def extensions():
return formats_in
def __init__(self, **kwargs):
self._ffplayer = None
self.quitted = False
self._log_callback_set = False
self._state = ''
self.state = 'stop'
self._callback_ref = WeakMethod(self._player_callback)
if not get_log_callback():
set_log_callback(_log_callback)
self._log_callback_set = True
super(SoundFFPy, self).__init__(**kwargs)
def __del__(self):
self.unload()
if self._log_callback_set:
set_log_callback(None)
def _player_callback(self, selector, value):
if self._ffplayer is None:
return
if selector == 'quit':
def close(*args):
self.quitted = True
self.unload()
Clock.schedule_once(close, 0)
elif selector == 'eof':
Clock.schedule_once(self._do_eos, 0)
def load(self):
self.unload()
ff_opts = {'vn': True, 'sn': True} # only audio
self._ffplayer = MediaPlayer(self.source,
callback=self._callback_ref,
loglevel='info', ff_opts=ff_opts)
player = self._ffplayer
player.set_volume(self.volume)
player.toggle_pause()
self._state = 'paused'
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.clock()
while ((not player.get_metadata()['duration'])
and not self.quitted and time.clock() - s < 10.):
time.sleep(0.005)
def unload(self):
if self._ffplayer:
self._ffplayer = None
self._state = ''
self.state = 'stop'
self.quitted = False
def play(self):
if self._state == 'playing':
super(SoundFFPy, self).play()
return
if not self._ffplayer:
self.load()
self._ffplayer.toggle_pause()
self._state = 'playing'
self.state = 'play'
super(SoundFFPy, self).play()
def stop(self):
if self._ffplayer and self._state == 'playing':
self._ffplayer.toggle_pause()
self._state = 'paused'
self.state = 'stop'
super(SoundFFPy, self).stop()
def seek(self, position):
if self._ffplayer is None:
return
self._ffplayer.seek(position, relative=False)
def get_pos(self):
if self._ffplayer is not None:
return self._ffplayer.get_pts()
return 0
def on_volume(self, instance, volume):
if self._ffplayer is not None:
self._ffplayer.set_volume(volume)
def _get_length(self):
if self._ffplayer is None:
return super(SoundFFPy, self)._get_length()
return self._ffplayer.get_metadata()['duration']
def _do_eos(self, *args):
if not self.loop:
self.stop()
else:
self.seek(0.)
SoundLoader.register(SoundFFPy)
| mit |
GoogleCloudPlatform/bigquery-utils | tools/cloud_functions/gcs_event_based_ingest/tests/conftest.py | 1 | 20146 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for gcs_ocn_bq_ingest"""
import json
import os
import time
import uuid
from typing import List
import pytest
from google.cloud import bigquery
from google.cloud import error_reporting
from google.cloud import storage
import gcs_ocn_bq_ingest.common.ordering
import gcs_ocn_bq_ingest.common.utils
TEST_DIR = os.path.realpath(os.path.dirname(__file__))
LOAD_JOB_POLLING_TIMEOUT = 10 # seconds
@pytest.fixture(scope="module")
def bq() -> bigquery.Client:
"""BigQuery Client"""
return bigquery.Client(location="US")
@pytest.fixture(scope="module")
def gcs() -> storage.Client:
"""GCS Client"""
return storage.Client()
@pytest.fixture(scope="module")
def error() -> error_reporting.Client:
"""GCS Client"""
return error_reporting.Client()
@pytest.fixture
def gcs_bucket(request, gcs) -> storage.bucket.Bucket:
"""GCS bucket for test artifacts"""
bucket = gcs.create_bucket(str(uuid.uuid4()))
bucket.versioning_enabled = True
bucket.patch()
# overide default field delimiter at bucket level
load_config_json = {
"fieldDelimiter": "|",
}
load_json_blob: storage.Blob = bucket.blob("_config/load.json")
load_json_blob.upload_from_string(json.dumps(load_config_json))
def teardown():
load_json_blob.delete()
bucket.versioning_enabled = False
bucket.patch()
for obj in gcs.list_blobs(bucket_or_name=bucket, versions=True):
obj.delete()
bucket.delete(force=True)
request.addfinalizer(teardown)
return bucket
@pytest.fixture
def mock_env(gcs, monkeypatch):
"""environment variable mocks"""
# Infer project from ADC of gcs client.
monkeypatch.setenv("GCP_PROJECT", gcs.project)
monkeypatch.setenv("FUNCTION_NAME", "integration-test")
monkeypatch.setenv("FUNCTION_TIMEOUT_SEC", "540")
monkeypatch.setenv("BQ_PROJECT", gcs.project)
@pytest.fixture
def ordered_mock_env(mock_env, monkeypatch):
"""environment variable mocks"""
monkeypatch.setenv("ORDER_PER_TABLE", "TRUE")
@pytest.fixture
def dest_dataset(request, bq, mock_env, monkeypatch):
random_dataset = (f"test_bq_ingest_gcf_"
f"{str(uuid.uuid4())[:8].replace('-','_')}")
dataset = bigquery.Dataset(f"{os.getenv('GCP_PROJECT')}"
f".{random_dataset}")
dataset.location = "US"
bq.create_dataset(dataset)
monkeypatch.setenv("BQ_LOAD_STATE_TABLE",
f"{dataset.dataset_id}.serverless_bq_loads")
print(f"created dataset {dataset.dataset_id}")
def teardown():
bq.delete_dataset(dataset, delete_contents=True, not_found_ok=True)
request.addfinalizer(teardown)
return dataset
@pytest.fixture
def dest_table(request, bq, mock_env, dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nation_"
f"{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table = bq.create_table(table)
def teardown():
bq.delete_table(table, not_found_ok=True)
request.addfinalizer(teardown)
return table
@pytest.fixture(scope="function")
def gcs_data(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
data_objs = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists:
do.delete()
request.addfinalizer(teardown)
return data_objs[-1]
@pytest.fixture(scope="function")
def gcs_data_under_sub_dirs(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
data_objs = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "foo", "bar", "baz", test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return data_objs[-1]
@pytest.fixture(scope="function")
def gcs_truncating_load_config(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
config_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_table.table_id,
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({"writeDisposition": "WRITE_TRUNCATE"}))
def teardown():
if config_obj.exists():
config_obj.delete()
request.addfinalizer(teardown)
return config_obj
@pytest.fixture(scope="function")
def gcs_batched_data(request, gcs_bucket, dest_dataset,
dest_table) -> List[storage.blob.Blob]:
"""
upload two batches of data
"""
data_objs = []
for batch in ["batch0", "batch1"]:
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_table.table_id, batch, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return [data_objs[-1], data_objs[-4]]
@pytest.fixture
def gcs_external_config(request, gcs_bucket, dest_dataset,
dest_table) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext"
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return config_objs
@pytest.fixture(scope="function")
def gcs_partitioned_data(request, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.blob.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id,
partition, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
def teardown():
for dobj in data_objs:
# we expect some backfill files to be removed by the cloud function.
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return [data_objs[-1], data_objs[-3]]
@pytest.fixture(scope="function")
def dest_partitioned_table(request, bq: bigquery.Client, mock_env,
dest_dataset) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
table: bigquery.Table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
def teardown():
bq.delete_table(table, not_found_ok=True)
request.addfinalizer(teardown)
return table
def bq_wait_for_rows(bq_client: bigquery.Client, table: bigquery.Table,
expected_num_rows: int):
"""
polls tables.get API for number of rows until reaches expected value or
times out.
This is mostly an optimization to speed up the test suite without making it
flaky.
"""
start_poll = time.monotonic()
actual_num_rows = 0
while time.monotonic() - start_poll < LOAD_JOB_POLLING_TIMEOUT:
bq_table: bigquery.Table = bq_client.get_table(table)
actual_num_rows = bq_table.num_rows
if actual_num_rows == expected_num_rows:
return
if actual_num_rows > expected_num_rows:
raise AssertionError(
f"{table.project}.{table.dataset_id}.{table.table_id} has"
f"{actual_num_rows} rows. expected {expected_num_rows} rows.")
raise AssertionError(
f"Timed out after {LOAD_JOB_POLLING_TIMEOUT} seconds waiting for "
f"{table.project}.{table.dataset_id}.{table.table_id} to "
f"reach {expected_num_rows} rows."
f"last poll returned {actual_num_rows} rows.")
@pytest.fixture
def dest_ordered_update_table(request, gcs, gcs_bucket, bq, mock_env,
dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}.{dest_dataset.dataset_id}"
f".cf_test_ordering_{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table = bq.create_table(table)
# Our test query only updates on a single row so we need to populate
# original row.
# This can be used to simulate an existing _bqlock from a prior run of the
# subscriber loop with a job that has succeeded.
job: bigquery.LoadJob = bq.load_table_from_json(
[{
"id": 1,
"alpha_update": ""
}],
table,
job_id_prefix=gcs_ocn_bq_ingest.common.constants.DEFAULT_JOB_PREFIX)
# The subscriber will be responsible for cleaning up this file.
bqlock_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}", table.table_id,
"_bqlock"
]))
bqlock_obj.upload_from_string(job.job_id)
def teardown():
bq.delete_table(table, not_found_ok=True)
if bqlock_obj.exists():
bqlock_obj.delete()
request.addfinalizer(teardown)
return table
@pytest.fixture(scope="function")
def gcs_ordered_update_data(
request, gcs_bucket, dest_dataset,
dest_ordered_update_table) -> List[storage.blob.Blob]:
data_objs = []
older_success_blob: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "00", "_SUCCESS"
]))
older_success_blob.upload_from_string("")
data_objs.append(older_success_blob)
chunks = {
"01",
"02",
"03",
}
for chunk in chunks:
for test_file in ["data.csv", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, chunk, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "ordering",
chunk, test_file))
data_objs.append(data_obj)
def teardown():
for dobj in data_objs:
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture(scope="function")
def gcs_backlog(request, gcs, gcs_bucket,
gcs_ordered_update_data) -> List[storage.blob.Blob]:
data_objs = []
# We will deal with the last incremental in the test itself to test the
# behavior of a new backlog subscriber.
for success_blob in gcs_ordered_update_data:
gcs_ocn_bq_ingest.common.ordering.backlog_publisher(gcs, success_blob)
backlog_blob = \
gcs_ocn_bq_ingest.common.ordering.success_blob_to_backlog_blob(
success_blob
)
backlog_blob.upload_from_string("")
data_objs.append(backlog_blob)
def teardown():
for dobj in data_objs:
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture
def gcs_external_update_config(request, gcs_bucket, dest_dataset,
dest_ordered_update_table) -> storage.Blob:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = """
UPDATE {dest_dataset}.{dest_table} dest
SET alpha_update = CONCAT(dest.alpha_update, src.alpha_update)
FROM temp_ext src
WHERE dest.id = src.id
"""
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
backfill_blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
gcs_ocn_bq_ingest.common.constants.BACKFILL_FILENAME
]))
backfill_blob.upload_from_string("")
config_objs.append(sql_obj)
config_objs.append(config_obj)
config_objs.append(backfill_blob)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return backfill_blob
@pytest.mark.usefixtures("bq", "gcs_bucket", "dest_dataset",
"dest_partitioned_table")
@pytest.fixture
def gcs_external_partitioned_config(
request, bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;"
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id, "_config",
"external.json"
]))
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
config = {
"schema": public_table.to_api_repr()['schema'],
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists:
do.delete()
request.addfinalizer(teardown)
return config_objs
@pytest.fixture
def no_use_error_reporting(monkeypatch):
monkeypatch.setenv("USE_ERROR_REPORTING_API", "False")
@pytest.fixture
def gcs_external_config_bad_statement(
request, gcs_bucket, dest_dataset, dest_table,
no_use_error_reporting) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = ("INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;\n"
"INSERT {dest_dataset}.{dest_table} SELECT 1/0;")
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return config_objs
| apache-2.0 |
JioEducation/edx-platform | common/test/acceptance/pages/lms/track_selection.py | 110 | 1942 | """Track selection page"""
from bok_choy.page_object import PageObject
from . import BASE_URL
from .dashboard import DashboardPage
from .pay_and_verify import PaymentAndVerificationFlow
class TrackSelectionPage(PageObject):
"""Interact with the track selection page.
This page can be accessed at `/course_modes/choose/{course_id}/`.
"""
def __init__(self, browser, course_id):
"""Initialize the page.
Arguments:
browser (Browser): The browser instance.
course_id (unicode): The course in which the user is enrolling.
"""
super(TrackSelectionPage, self).__init__(browser)
self._course_id = course_id
@property
def url(self):
"""Return the URL corresponding to the track selection page."""
url = "{base}/course_modes/choose/{course_id}/".format(
base=BASE_URL,
course_id=self._course_id
)
return url
def is_browser_on_page(self):
"""Check if the track selection page has loaded."""
return self.q(css=".wrapper-register-choose").is_present()
def enroll(self, mode="honor"):
"""Interact with one of the enrollment buttons on the page.
Keyword Arguments:
mode (str): Can be "honor" or "verified"
Raises:
ValueError
"""
if mode == "honor":
self.q(css="input[name='honor_mode']").click()
return DashboardPage(self.browser).wait_for_page()
elif mode == "verified":
# Check the first contribution option, then click the enroll button
self.q(css=".contribution-option > input").first.click()
self.q(css="input[name='verified_mode']").click()
return PaymentAndVerificationFlow(self.browser, self._course_id).wait_for_page()
else:
raise ValueError("Mode must be either 'honor' or 'verified'.")
| agpl-3.0 |
AntouanK/rethinkdb | external/v8_3.30.33.16/build/gyp/test/variables/commands/gyptest-commands-repeated.py | 330 | 1313 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands where they are evaluated
more then once..
"""
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands-repeated.gyp.stdout').replace('\r\n', '\n')
test.run_gyp('commands-repeated.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the commands-repeated.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands-repeated.gypd').replace('\r\n', '\n')
expect = test.read('commands-repeated.gypd.golden').replace('\r\n', '\n')
if not test.match(contents, expect):
print "Unexpected contents of `commands-repeated.gypd'"
test.diff(expect, contents, 'commands-repeated.gypd ')
test.fail_test()
test.pass_test()
| agpl-3.0 |
rc/sfepy | examples/homogenization/nonlinear_hyperelastic_mM.py | 2 | 6215 | import numpy as nm
import six
from sfepy import data_dir
from sfepy.base.base import Struct, output
from sfepy.terms.terms_hyperelastic_ul import HyperElasticULFamilyData
from sfepy.homogenization.micmac import get_homog_coefs_nonlinear
import sfepy.linalg as la
from sfepy.discrete.evaluate import Evaluator
hyperelastic_data = {}
def post_process(out, pb, state, extend=False):
if isinstance(state, dict):
pass
else:
pb.update_materials_flag = 2
stress = pb.evaluate('ev_integrate_mat.1.Omega(solid.S, u)',
mode='el_avg')
out['cauchy_stress'] = Struct(name='output_data',
mode='cell',
data=stress,
dofs=None)
strain = pb.evaluate('ev_integrate_mat.1.Omega(solid.E, u)',
mode='el_avg')
out['green_strain'] = Struct(name='output_data',
mode='cell',
data=strain,
dofs=None)
pb.update_materials_flag = 0
if pb.conf.options.get('recover_micro', False):
happ = pb.homogen_app
if pb.ts.step == 0:
rname = pb.conf.options.recovery_region
rcells = pb.domain.regions[rname].get_cells()
sh = hyperelastic_data['homog_mat_shape']
happ.app_options.store_micro_idxs = sh[1] * rcells
else:
hpb = happ.problem
recovery_hook = hpb.conf.options.get('recovery_hook', None)
if recovery_hook is not None:
recovery_hook = hpb.conf.get_function(recovery_hook)
rname = pb.conf.options.recovery_region
rcoors = []
for ii in happ.app_options.store_micro_idxs:
key = happ.get_micro_cache_key('coors', ii, pb.ts.step)
if key in happ.micro_state_cache:
rcoors.append(happ.micro_state_cache[key])
recovery_hook(hpb, rcoors, pb.domain.regions[rname], pb.ts)
return out
def get_homog_mat(ts, coors, mode, term=None, problem=None, **kwargs):
if problem.update_materials_flag == 2 and mode == 'qp':
out = hyperelastic_data['homog_mat']
return {k: nm.array(v) for k, v in six.iteritems(out)}
elif problem.update_materials_flag == 0 or not mode == 'qp':
return
output('get_homog_mat')
dim = problem.domain.mesh.dim
update_var = problem.conf.options.mesh_update_variables[0]
state_u = problem.equations.variables[update_var]
state_u.field.clear_mappings()
family_data = problem.family_data(state_u, term.region,
term.integral, term.integration)
mtx_f = family_data.mtx_f.reshape((coors.shape[0],)
+ family_data.mtx_f.shape[-2:])
if hasattr(problem, 'mtx_f_prev'):
rel_mtx_f = la.dot_sequences(mtx_f, nm.linalg.inv(problem.mtx_f_prev),
'AB')
else:
rel_mtx_f = mtx_f
problem.mtx_f_prev = mtx_f.copy()
macro_data = {'mtx_e': rel_mtx_f - nm.eye(dim)} # '*' - macro strain
out = get_homog_coefs_nonlinear(ts, coors, mode, macro_data,
term=term, problem=problem,
iteration=problem.iiter, **kwargs)
out['E'] = 0.5 * (la.dot_sequences(mtx_f, mtx_f, 'ATB') - nm.eye(dim))
hyperelastic_data['time'] = ts.step
hyperelastic_data['homog_mat_shape'] = family_data.det_f.shape[:2]
hyperelastic_data['homog_mat'] = \
{k: nm.array(v) for k, v in six.iteritems(out)}
return out
def ulf_iteration_hook(pb, nls, vec, it, err, err0):
Evaluator.new_ulf_iteration(pb, nls, vec, it, err, err0)
pb.iiter = it
pb.update_materials_flag = True
pb.update_materials()
pb.update_materials_flag = False
class MyEvaluator(Evaluator):
def eval_residual(self, vec, is_full=False):
if not is_full:
vec = self.problem.equations.make_full_vec(vec)
vec_r = self.problem.equations.eval_residuals(vec * 0)
return vec_r
def ulf_init(pb):
pb.family_data = HyperElasticULFamilyData()
pb_vars = pb.get_variables()
pb_vars['u'].init_data()
pb.update_materials_flag = True
pb.iiter = 0
options = {
'output_dir': 'output',
'mesh_update_variables': ['u'],
'nls_iter_hook': ulf_iteration_hook,
'pre_process_hook': ulf_init,
'micro_filename': 'examples/homogenization/nonlinear_homogenization.py',
'recover_micro': True,
'recovery_region': 'Recovery',
'post_process_hook': post_process,
'user_evaluator': MyEvaluator,
}
materials = {
'solid': 'get_homog',
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
variables = {
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
}
filename_mesh = data_dir + '/meshes/2d/its2D.mesh'
regions = {
'Omega': 'all',
'Left': ('vertices in (x < 0.001)', 'facet'),
'Bottom': ('vertices in (y < 0.001 )', 'facet'),
'Recovery': ('cell 49, 81', 'cell'),
}
ebcs = {
'l': ('Left', {'u.all': 0.0}),
'b': ('Bottom', {'u.all': 'move_bottom'}),
}
centre = nm.array([0, 0], dtype=nm.float64)
def move_bottom(ts, coor, **kwargs):
from sfepy.linalg import rotation_matrix2d
vec = coor[:, 0:2] - centre
angle = 3 * ts.step
print('angle:', angle)
mtx = rotation_matrix2d(angle)
out = nm.dot(vec, mtx) - vec
return out
functions = {
'move_bottom': (move_bottom,),
'get_homog': (get_homog_mat,),
}
equations = {
'balance_of_forces':
"""dw_nonsym_elastic.1.Omega(solid.A, v, u)
= - dw_lin_prestress.1.Omega(solid.S, v)""",
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'newton': ('nls.newton', {
'eps_a': 1e-3,
'eps_r': 1e-3,
'i_max': 20,
}),
'ts': ('ts.simple', {
't0': 0,
't1': 1,
'n_step': 3 + 1,
'verbose': 1,
})
}
| bsd-3-clause |
pombreda/django-hotclub | libs/external_libs/gdata.py-1.0.13/src/gdata/spreadsheet/service.py | 10 | 15970 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SpreadsheetsService extends the GDataService to streamline Google
Spreadsheets operations.
GBaseService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)'
import gdata
import atom.service
import gdata.service
import gdata.spreadsheet
import atom
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class RequestError(Error):
pass
class SpreadsheetsService(gdata.service.GDataService):
"""Client for the Google Spreadsheets service."""
def __init__(self, email=None, password=None, source=None,
server='spreadsheets.google.com',
additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='wise', source=source,
server=server,
additional_headers=additional_headers)
def GetSpreadsheetsFeed(self, key=None, query=None, visibility='private',
projection='full'):
"""Gets a spreadsheets feed or a specific entry if a key is defined
Args:
key: string (optional) The spreadsheet key defined in /ccc?key=
query: DocumentQuery (optional) Query parameters
Returns:
If there is no key, then a SpreadsheetsSpreadsheetsFeed.
If there is a key, then a SpreadsheetsSpreadsheet.
"""
uri = ('http://%s/feeds/spreadsheets/%s/%s'
% (self.server, visibility, projection))
if key is not None:
uri = '%s/%s' % (uri, key)
if query != None:
query.feed = uri
uri = query.ToUri()
if key:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsSpreadsheetFromString)
else:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString)
def GetWorksheetsFeed(self, key, wksht_id=None, query=None,
visibility='private', projection='full'):
"""Gets a worksheets feed or a specific entry if a wksht is defined
Args:
key: string The spreadsheet key defined in /ccc?key=
wksht_id: string (optional) The id for a specific worksheet entry
query: DocumentQuery (optional) Query parameters
Returns:
If there is no wksht_id, then a SpreadsheetsWorksheetsFeed.
If there is a wksht_id, then a SpreadsheetsWorksheet.
"""
uri = ('http://%s/feeds/worksheets/%s/%s/%s'
% (self.server, key, visibility, projection))
if wksht_id != None:
uri = '%s/%s' % (uri, wksht_id)
if query != None:
query.feed = uri
uri = query.ToUri()
if wksht_id:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString)
else:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString)
def AddWorksheet(self, title, row_count, col_count, key):
"""Creates a new worksheet in the desired spreadsheet.
The new worksheet is appended to the end of the list of worksheets. The
new worksheet will only have the available number of columns and cells
specified.
Args:
title: str The title which will be displayed in the list of worksheets.
row_count: int or str The number of rows in the new worksheet.
col_count: int or str The number of columns in the new worksheet.
key: str The spreadsheet key to the spreadsheet to which the new
worksheet should be added.
Returns:
A SpreadsheetsWorksheet if the new worksheet was created succesfully.
"""
new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheet(
title=atom.Title(text=title),
row_count=gdata.spreadsheet.RowCount(text=str(row_count)),
col_count=gdata.spreadsheet.ColCount(text=str(col_count)))
return self.Post(new_worksheet,
'http://%s/feeds/worksheets/%s/private/full' % (self.server, key),
converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString)
def UpdateWorksheet(self, worksheet_entry, url=None):
"""Changes the size and/or title of the desired worksheet.
Args:
worksheet_entry: SpreadsheetWorksheet The new contents of the
worksheet.
url: str (optional) The URL to which the edited worksheet entry should
be sent. If the url is None, the edit URL from the worksheet will
be used.
Returns:
A SpreadsheetsWorksheet with the new information about the worksheet.
"""
target_url = url or worksheet_entry.GetEditLink().href
return self.Put(worksheet_entry, target_url,
converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString)
def DeleteWorksheet(self, worksheet_entry=None, url=None):
"""Removes the desired worksheet from the spreadsheet
Args:
worksheet_entry: SpreadsheetWorksheet (optional) The worksheet to
be deleted. If this is none, then the DELETE reqest is sent to
the url specified in the url parameter.
url: str (optaional) The URL to which the DELETE request should be
sent. If left as None, the worksheet's edit URL is used.
Returns:
True if the worksheet was deleted successfully.
"""
if url:
target_url = url
else:
target_url = worksheet_entry.GetEditLink().href
return self.Delete(target_url)
def GetCellsFeed(self, key, wksht_id='default', cell=None, query=None,
visibility='private', projection='full'):
"""Gets a cells feed or a specific entry if a cell is defined
Args:
key: string The spreadsheet key defined in /ccc?key=
wksht_id: string The id for a specific worksheet entry
cell: string (optional) The R1C1 address of the cell
query: DocumentQuery (optional) Query parameters
Returns:
If there is no cell, then a SpreadsheetsCellsFeed.
If there is a cell, then a SpreadsheetsCell.
"""
uri = ('http://%s/feeds/cells/%s/%s/%s/%s'
% (self.server, key, wksht_id, visibility, projection))
if cell != None:
uri = '%s/%s' % (uri, cell)
if query != None:
query.feed = uri
uri = query.ToUri()
if cell:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsCellFromString)
else:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString)
def GetListFeed(self, key, wksht_id='default', row_id=None, query=None,
visibility='private', projection='full'):
"""Gets a list feed or a specific entry if a row_id is defined
Args:
key: string The spreadsheet key defined in /ccc?key=
wksht_id: string The id for a specific worksheet entry
row_id: string (optional) The row_id of a row in the list
query: DocumentQuery (optional) Query parameters
Returns:
If there is no row_id, then a SpreadsheetsListFeed.
If there is a row_id, then a SpreadsheetsList.
"""
uri = ('http://%s/feeds/list/%s/%s/%s/%s'
% (self.server, key, wksht_id, visibility, projection))
if row_id is not None:
uri = '%s/%s' % (uri, row_id)
if query is not None:
query.feed = uri
uri = query.ToUri()
if row_id:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsListFromString)
else:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsListFeedFromString)
def UpdateCell(self, row, col, inputValue, key, wksht_id='default'):
"""Updates an existing cell.
Args:
row: int The row the cell to be editted is in
col: int The column the cell to be editted is in
inputValue: str the new value of the cell
key: str The key of the spreadsheet in which this cell resides.
wksht_id: str The ID of the worksheet which holds this cell.
Returns:
The updated cell entry
"""
row = str(row)
col = str(col)
# make the new cell
new_cell = gdata.spreadsheet.Cell(row=row, col=col, inputValue=inputValue)
# get the edit uri and PUT
cell = 'R%sC%s' % (row, col)
entry = self.GetCellsFeed(key, wksht_id, cell)
for a_link in entry.link:
if a_link.rel == 'edit':
entry.cell = new_cell
return self.Put(entry, a_link.href,
converter=gdata.spreadsheet.SpreadsheetsCellFromString)
def _GenerateCellsBatchUrl(self, spreadsheet_key, worksheet_id):
return ('http://spreadsheets.google.com/feeds/cells/%s/%s/'
'private/full/batch' % (spreadsheet_key, worksheet_id))
def ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None,
worksheet_id=None,
converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString):
"""Sends a batch request feed to the server.
The batch request needs to be sent to the batch URL for a particular
worksheet. You can specify the worksheet by providing the spreadsheet_key
and worksheet_id, or by sending the URL from the cells feed's batch link.
Args:
batch_feed: gdata.spreadsheet.SpreadsheetsCellFeed A feed containing
BatchEntry elements which contain the desired CRUD operation and
any necessary data to modify a cell.
url: str (optional) The batch URL for the cells feed to which these
changes should be applied. This can be found by calling
cells_feed.GetBatchLink().href.
spreadsheet_key: str (optional) Used to generate the batch request URL
if the url argument is None. If using the spreadsheet key to
generate the URL, the worksheet id is also required.
worksheet_id: str (optional) Used if the url is not provided, it is
oart of the batch feed target URL. This is used with the spreadsheet
key.
converter: Function (optional) Function to be executed on the server's
response. This function should take one string as a parameter. The
default value is SpreadsheetsCellsFeedFromString which will turn the result
into a gdata.base.GBaseItem object.
Returns:
A gdata.BatchFeed containing the results.
"""
if url is None:
url = self._GenerateCellsBatchUrl(spreadsheet_key, worksheet_id)
return self.Post(batch_feed, url, converter=converter)
def InsertRow(self, row_data, key, wksht_id='default'):
"""Inserts a new row with the provided data
Args:
uri: string The post uri of the list feed
row_data: dict A dictionary of column header to row data
Returns:
The inserted row
"""
new_entry = gdata.spreadsheet.SpreadsheetsList()
for k, v in row_data.iteritems():
new_custom = gdata.spreadsheet.Custom()
new_custom.column = k
new_custom.text = v
new_entry.custom[new_custom.column] = new_custom
# Generate the post URL for the worksheet which will receive the new entry.
post_url = 'http://spreadsheets.google.com/feeds/list/%s/%s/private/full'%(
key, wksht_id)
return self.Post(new_entry, post_url,
converter=gdata.spreadsheet.SpreadsheetsListFromString)
def UpdateRow(self, entry, new_row_data):
"""Updates a row with the provided data
Args:
entry: gdata.spreadsheet.SpreadsheetsList The entry to be updated
new_row_data: dict A dictionary of column header to row data
Returns:
The updated row
"""
entry.custom = {}
for k, v in new_row_data.iteritems():
new_custom = gdata.spreadsheet.Custom()
new_custom.column = k
new_custom.text = v
entry.custom[k] = new_custom
for a_link in entry.link:
if a_link.rel == 'edit':
return self.Put(entry, a_link.href,
converter=gdata.spreadsheet.SpreadsheetsListFromString)
def DeleteRow(self, entry):
"""Deletes a row, the provided entry
Args:
entry: gdata.spreadsheet.SpreadsheetsList The row to be deleted
Returns:
The delete response
"""
for a_link in entry.link:
if a_link.rel == 'edit':
return self.Delete(a_link.href)
class DocumentQuery(gdata.service.Query):
def _GetTitleQuery(self):
return self['title']
def _SetTitleQuery(self, document_query):
self['title'] = document_query
title = property(_GetTitleQuery, _SetTitleQuery,
doc="""The title query parameter""")
def _GetTitleExactQuery(self):
return self['title-exact']
def _SetTitleExactQuery(self, document_query):
self['title-exact'] = document_query
title_exact = property(_GetTitleExactQuery, _SetTitleExactQuery,
doc="""The title-exact query parameter""")
class CellQuery(gdata.service.Query):
def _GetMinRowQuery(self):
return self['min-row']
def _SetMinRowQuery(self, cell_query):
self['min-row'] = cell_query
min_row = property(_GetMinRowQuery, _SetMinRowQuery,
doc="""The min-row query parameter""")
def _GetMaxRowQuery(self):
return self['max-row']
def _SetMaxRowQuery(self, cell_query):
self['max-row'] = cell_query
max_row = property(_GetMaxRowQuery, _SetMaxRowQuery,
doc="""The max-row query parameter""")
def _GetMinColQuery(self):
return self['min-col']
def _SetMinColQuery(self, cell_query):
self['min-col'] = cell_query
min_col = property(_GetMinColQuery, _SetMinColQuery,
doc="""The min-col query parameter""")
def _GetMaxColQuery(self):
return self['max-col']
def _SetMaxColQuery(self, cell_query):
self['max-col'] = cell_query
max_col = property(_GetMaxColQuery, _SetMaxColQuery,
doc="""The max-col query parameter""")
def _GetRangeQuery(self):
return self['range']
def _SetRangeQuery(self, cell_query):
self['range'] = cell_query
range = property(_GetRangeQuery, _SetRangeQuery,
doc="""The range query parameter""")
def _GetReturnEmptyQuery(self):
return self['return-empty']
def _SetReturnEmptyQuery(self, cell_query):
self['return-empty'] = cell_query
return_empty = property(_GetReturnEmptyQuery, _SetReturnEmptyQuery,
doc="""The return-empty query parameter""")
class ListQuery(gdata.service.Query):
def _GetSpreadsheetQuery(self):
return self['sq']
def _SetSpreadsheetQuery(self, list_query):
self['sq'] = list_query
sq = property(_GetSpreadsheetQuery, _SetSpreadsheetQuery,
doc="""The sq query parameter""")
def _GetOrderByQuery(self):
return self['orderby']
def _SetOrderByQuery(self, list_query):
self['orderby'] = list_query
orderby = property(_GetOrderByQuery, _SetOrderByQuery,
doc="""The orderby query parameter""")
def _GetReverseQuery(self):
return self['reverse']
def _SetReverseQuery(self, list_query):
self['reverse'] = list_query
reverse = property(_GetReverseQuery, _SetReverseQuery,
doc="""The reverse query parameter""")
| mit |
randynobx/ansible | lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py | 70 | 6582 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright IBM Corp. 2016
# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_local_user_manager
short_description: Manage local users on an ESXi host
description:
- Manage local users on an ESXi host
version_added: "2.2"
author: Andreas Nafpliotis
notes:
- Tested on ESXi 6.0
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
requirements:
- "python >= 2.6"
- PyVmomi installed
options:
local_user_name:
description:
- The local user name to be changed
required: True
local_user_password:
description:
- The password to be set
required: False
local_user_description:
description:
- Description for the user
required: False
state:
description:
- Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
choices: ['present', 'absent']
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_local_user_manager command from Ansible Playbooks
- name: Add local user to ESXi
local_action:
module: vmware_local_user_manager
hostname: esxi_hostname
username: root
password: vmware
local_user_name: foo
'''
RETURN = '''# '''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareLocalUserManager(object):
def __init__(self, module):
self.module = module
self.content = connect_to_api(self.module)
self.local_user_name = self.module.params['local_user_name']
self.local_user_password = self.module.params['local_user_password']
self.local_user_description = self.module.params['local_user_description']
self.state = self.module.params['state']
def process_state(self):
try:
local_account_manager_states = {
'absent': {
'present': self.state_remove_user,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_user,
'absent': self.state_create_user,
}
}
local_account_manager_states[self.state][self.check_local_user_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_user_manager_state(self):
user_account = self.find_user_account()
if not user_account:
return 'absent'
else:
return 'present'
def find_user_account(self):
searchStr = self.local_user_name
exactMatch = True
findUsers = True
findGroups = False
user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
return user_account
def create_account_spec(self):
account_spec = vim.host.LocalAccountManager.AccountSpecification()
account_spec.id = self.local_user_name
account_spec.password = self.local_user_password
account_spec.description = self.local_user_description
return account_spec
def state_create_user(self):
account_spec = self.create_account_spec()
try:
task = self.content.accountManager.CreateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_update_user(self):
account_spec = self.create_account_spec()
try:
task = self.content.accountManager.UpdateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_remove_user(self):
try:
task = self.content.accountManager.RemoveUser(self.local_user_name)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
local_user_password=dict(required=False, type='str', no_log=True),
local_user_description=dict(required=False, type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_local_user_manager = VMwareLocalUserManager(module)
vmware_local_user_manager.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
535521469/crawler_sth | scrapyd/app.py | 1 | 1586 | from twisted.application.service import Application
from twisted.application.internet import TimerService, TCPServer
from twisted.web import server
from twisted.python import log
from scrapy.utils.misc import load_object
from .interfaces import IEggStorage, IPoller, ISpiderScheduler, IEnvironment
from .launcher import Launcher
from .eggstorage import FilesystemEggStorage
from .scheduler import SpiderScheduler
from .poller import QueuePoller
from .environ import Environment
from .website import Root
from .config import Config
def application(config):
app = Application("Scrapyd")
http_port = config.getint('http_port', 6800)
bind_address = config.get('bind_address', '0.0.0.0')
poller = QueuePoller(config)
eggstorage = FilesystemEggStorage(config)
scheduler = SpiderScheduler(config)
environment = Environment(config)
app.setComponent(IPoller, poller)
app.setComponent(IEggStorage, eggstorage)
app.setComponent(ISpiderScheduler, scheduler)
app.setComponent(IEnvironment, environment)
laupath = config.get('launcher', 'scrapyd.launcher.Launcher')
laucls = load_object(laupath)
launcher = laucls(config, app)
timer = TimerService(5, poller.poll)
webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address)
log.msg(format="Scrapyd web console available at http://%(bind_address)s:%(http_port)s/",
bind_address=bind_address, http_port=http_port)
launcher.setServiceParent(app)
timer.setServiceParent(app)
webservice.setServiceParent(app)
return app
| bsd-3-clause |
DirtyUnicorns/android_kernel_samsung_trlte | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
icodemachine/Stem | test/unit/doctest.py | 9 | 3334 | """
Tests examples from our documentation.
"""
from __future__ import absolute_import
import doctest
import os
import unittest
import stem.descriptor.router_status_entry
import stem.util.connection
import stem.util.str_tools
import stem.util.system
import stem.version
import test.util
try:
# added in python 3.3
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
EXPECTED_CIRCUIT_STATUS = """\
20 EXTENDED $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA=Eskimo BUILD_FLAGS=NEED_CAPACITY PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:51:11.433755
19 BUILT $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$30BAB8EE7606CBD12F3CC269AE976E0153E7A58D=Pascal1,$2765D8A8C4BBA3F89585A9FFE0E8575615880BEB=Anthracite PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:50:56.969938\
"""
class TestDocumentation(unittest.TestCase):
def test_examples(self):
stem_dir = os.path.join(test.util.STEM_BASE, 'stem')
is_failed = False
for path in stem.util.system.files_with_suffix(stem_dir, '.py'):
args = {'module_relative': False}
test_run = None
if path.endswith('/stem/util/conf.py'):
with patch('stem.util.conf.get_config') as get_config_mock:
config = Mock()
config.load.return_value = None
get_config_mock.return_value = config
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/descriptor/router_status_entry.py'):
args['globs'] = {
'_base64_to_hex': stem.descriptor.router_status_entry._base64_to_hex,
}
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/util/connection.py'):
args['globs'] = {
'expand_ipv6_address': stem.util.connection.expand_ipv6_address,
}
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/util/str_tools.py'):
args['globs'] = {
'_to_camel_case': stem.util.str_tools._to_camel_case,
'crop': stem.util.str_tools.crop,
'size_label': stem.util.str_tools.size_label,
'time_label': stem.util.str_tools.time_label,
'time_labels': stem.util.str_tools.time_labels,
'short_time_label': stem.util.str_tools.short_time_label,
'parse_short_time_label': stem.util.str_tools.parse_short_time_label,
}
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/response/__init__.py'):
pass # the escaped slashes seem to be confusing doctest
elif path.endswith('/stem/control.py'):
controller = Mock()
controller.extend_circuit.side_effect = [19, 20]
controller.get_info.side_effect = lambda arg: {
'circuit-status': EXPECTED_CIRCUIT_STATUS,
}[arg]
args['globs'] = {'controller': controller}
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/version.py'):
with patch('stem.version.get_system_tor_version', Mock(return_value = stem.version.Version('0.2.1.30'))):
test_run = doctest.testfile(path, **args)
else:
test_run = doctest.testfile(path, **args)
if test_run and test_run.failed > 0:
is_failed = True
if is_failed:
self.fail('doctests encountered errors')
| lgpl-3.0 |
shakamunyi/tensorflow | tensorflow/contrib/learn/python/learn/tests/stability_test.py | 4 | 4977 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator regression tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
def _get_input_fn(x, y, batch_size=None):
df = data_feeder.setup_train_data_feeder(
x, y, n_classes=None, batch_size=batch_size)
return df.input_builder, df.get_feed_dict_fn()
# We use a null optimizer since we can't get deterministic results out of
# supervisor's mulitple threads.
class _NullOptimizer(tf.train.Optimizer):
def __init__(self):
super(_NullOptimizer, self).__init__(use_locking=False, name='Null')
def _apply_dense(self, grad, var):
return tf.no_op()
def _apply_sparse(self, grad, var):
return tf.no_op()
def _prepare(self):
pass
_NULL_OPTIMIZER = _NullOptimizer()
class StabilityTest(tf.test.TestCase):
"""Tests that estiamtors are reproducible."""
def testRandomStability(self):
my_seed = 42
minval = -0.3333
maxval = 0.3333
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
g.seed = my_seed
x = tf.random_uniform([10, 10], minval=minval, maxval=maxval)
val1 = session.run(x)
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
g.seed = my_seed
x = tf.random_uniform([10, 10], minval=minval, maxval=maxval)
val2 = session.run(x)
self.assertAllClose(val1, val2)
def testLinearRegression(self):
my_seed = 42
config = tf.contrib.learn.RunConfig(tf_random_seed=my_seed)
boston = tf.contrib.learn.datasets.load_boston()
columns = [tf.contrib.layers.real_valued_column('', dimension=13)]
# We train with
with tf.Graph().as_default() as g1:
random.seed(my_seed)
g1.seed = my_seed
tf.contrib.framework.create_global_step()
regressor1 = tf.contrib.learn.LinearRegressor(optimizer=_NULL_OPTIMIZER,
feature_columns=columns,
config=config)
regressor1.fit(x=boston.data, y=boston.target, steps=1)
with tf.Graph().as_default() as g2:
random.seed(my_seed)
g2.seed = my_seed
tf.contrib.framework.create_global_step()
regressor2 = tf.contrib.learn.LinearRegressor(optimizer=_NULL_OPTIMIZER,
feature_columns=columns,
config=config)
regressor2.fit(x=boston.data, y=boston.target, steps=1)
self.assertAllClose(regressor1.weights_, regressor2.weights_)
self.assertAllClose(regressor1.bias_, regressor2.bias_)
self.assertAllClose(
list(regressor1.predict(boston.data, as_iterable=True)),
list(regressor2.predict(boston.data, as_iterable=True)), atol=1e-05)
def testDNNRegression(self):
my_seed = 42
config = tf.contrib.learn.RunConfig(tf_random_seed=my_seed)
boston = tf.contrib.learn.datasets.load_boston()
columns = [tf.contrib.layers.real_valued_column('', dimension=13)]
with tf.Graph().as_default() as g1:
random.seed(my_seed)
g1.seed = my_seed
tf.contrib.framework.create_global_step()
regressor1 = tf.contrib.learn.DNNRegressor(
hidden_units=[10], feature_columns=columns,
optimizer=_NULL_OPTIMIZER, config=config)
regressor1.fit(x=boston.data, y=boston.target, steps=1)
with tf.Graph().as_default() as g2:
random.seed(my_seed)
g2.seed = my_seed
tf.contrib.framework.create_global_step()
regressor2 = tf.contrib.learn.DNNRegressor(
hidden_units=[10], feature_columns=columns,
optimizer=_NULL_OPTIMIZER, config=config)
regressor2.fit(x=boston.data, y=boston.target, steps=1)
for w1, w2 in zip(regressor1.weights_, regressor2.weights_):
self.assertAllClose(w1, w2)
for b1, b2 in zip(regressor2.bias_, regressor2.bias_):
self.assertAllClose(b1, b2)
self.assertAllClose(
list(regressor1.predict(boston.data, as_iterable=True)),
list(regressor2.predict(boston.data, as_iterable=True)), atol=1e-05)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
reidwooten99/botbot-web | botbot/apps/plugins/runner.py | 2 | 11330 | # pylint: disable=W0212
import json
import logging
from datetime import datetime
from django.utils.timezone import utc
import re
import redis
import botbot_plugins.plugins
from botbot_plugins.base import PrivateMessage
from django.core.cache import cache
from django.conf import settings
from django.utils.importlib import import_module
from django_statsd.clients import statsd
from botbot.apps.bots import models as bots_models
from botbot.apps.plugins.utils import convert_nano_timestamp, log_on_error
from .plugin import RealPluginMixin
CACHE_TIMEOUT_2H = 7200
LOG = logging.getLogger('botbot.plugin_runner')
class Line(object):
"""
All the methods and data necessary for a plugin to act on a line
"""
def __init__(self, packet, app):
self.full_text = packet['Content']
self.text = packet['Content']
self.user = packet['User']
# Private attributes not accessible to external plugins
self._chatbot_id = packet['ChatBotId']
self._raw = packet['Raw']
self._channel_name = packet['Channel'].strip()
self._command = packet['Command']
self._is_message = packet['Command'] == 'PRIVMSG'
self._host = packet['Host']
self._received = convert_nano_timestamp(packet['Received'])
self.is_direct_message = self.check_direct_message()
@property
def _chatbot(self):
"""Simple caching for ChatBot model"""
if not hasattr(self, '_chatbot_cache'):
cache_key = 'chatbot:{0}'.format(self._chatbot_id)
chatbot = cache.get(cache_key)
if not chatbot:
chatbot = bots_models.ChatBot.objects.get(id=self._chatbot_id)
cache.set(cache_key, chatbot, CACHE_TIMEOUT_2H)
self._chatbot_cache = chatbot
return self._chatbot_cache
@property
def _channel(self):
"""Simple caching for Channel model"""
if not hasattr(self, '_channel_cache'):
cache_key = 'channel:{0}-{1}'.format(self._chatbot_id, self._channel_name)
channel = cache.get(cache_key)
if not channel and self._channel_name.startswith("#"):
channel = self._chatbot.channel_set.get(
name=self._channel_name)
cache.set(cache_key, channel, CACHE_TIMEOUT_2H)
"""
The following logging is to help out in sentry. For some
channels, we are getting occasional issues with the
``channel_set.get()`` lookup above
"""
LOG.debug(channel)
LOG.debug(self._channel_name)
LOG.debug(cache_key)
LOG.debug("%s", ", ".join(self._chatbot.channel_set.values_list('name', flat=True)))
self._channel_cache = channel
return self._channel_cache
@property
def _active_plugin_slugs(self):
if not hasattr(self, '_active_plugin_slugs_cache'):
if self._channel:
self._active_plugin_slugs_cache = self._channel.active_plugin_slugs
else:
self._active_plugin_slugs_cache = set()
return self._active_plugin_slugs_cache
def check_direct_message(self):
"""
If message is addressed to the bot, strip the bot's nick
and return the rest of the message. Otherwise, return False.
"""
nick = self._chatbot.nick
# Private message
if self._channel_name == nick:
LOG.debug('Private message detected')
# Set channel as user, so plugins reply by PM to correct user
self._channel_name = self.user
return True
if len(nick) == 1:
# support @<plugin> or !<plugin>
regex = ur'^{0}(.*)'.format(re.escape(nick))
else:
# support <nick>: <plugin>
regex = ur'^{0}[:\s](.*)'.format(re.escape(nick))
match = re.match(regex, self.full_text, re.IGNORECASE)
if match:
LOG.debug('Direct message detected')
self.text = match.groups()[0].lstrip()
return True
return False
def __str__(self):
return self.full_text
def __repr__(self):
return str(self)
class PluginRunner(object):
"""
Registration and routing for plugins
Calls to plugins are done via greenlets
"""
def __init__(self, use_gevent=False):
if use_gevent:
import gevent
self.gevent = gevent
self.bot_bus = redis.StrictRedis.from_url(
settings.REDIS_PLUGIN_QUEUE_URL)
self.storage = redis.StrictRedis.from_url(
settings.REDIS_PLUGIN_STORAGE_URL)
# plugins that listen to everything coming over the wire
self.firehose_router = {}
# plugins that listen to all messages (aka PRIVMSG)
self.messages_router = {}
# plugins that listen on direct messages (starting with bot nick)
self.mentions_router = {}
def register_all_plugins(self):
"""Iterate over all plugins and register them with the app"""
for core_plugin in ['help', 'logger']:
mod = import_module('botbot.apps.plugins.core.{}'.format(core_plugin))
plugin = mod.Plugin()
self.register(plugin)
for mod in botbot_plugins.plugins.__all__:
plugin = import_module('botbot_plugins.plugins.' + mod).Plugin()
self.register(plugin)
def register(self, plugin):
"""
Introspects the Plugin class instance provided for methods
that need to be registered with the internal app routers.
"""
for key in dir(plugin):
try:
# the config attr bombs if accessed here because it tries
# to access an attribute from the dummyapp
attr = getattr(plugin, key)
except AttributeError:
continue
if (not key.startswith('__') and
getattr(attr, 'route_rule', None)):
LOG.info('Route: %s.%s listens to %s for matches to %s',
plugin.slug, key, attr.route_rule[0],
attr.route_rule[1])
getattr(self, attr.route_rule[0] + '_router').setdefault(
plugin.slug, []).append((attr.route_rule[1], attr, plugin))
def listen(self):
"""Listens for incoming messages on the Redis queue"""
while 1:
val = None
try:
val = self.bot_bus.blpop('q', 1)
# Track q length
ql = self.bot_bus.llen('q')
statsd.gauge(".".join(["plugins", "q"]), ql)
if val:
_, val = val
LOG.debug('Recieved: %s', val)
line = Line(json.loads(val), self)
# Calculate the transport latency between go and the plugins.
delta = datetime.utcnow().replace(tzinfo=utc) - line._received
statsd.timing(".".join(["plugins", "latency"]),
delta.total_seconds() * 1000)
self.dispatch(line)
except Exception:
LOG.error("Line Dispatch Failed", exc_info=True, extra={
"line": val
})
def dispatch(self, line):
"""Given a line, dispatch it to the right plugins & functions."""
# This is a pared down version of the `check_for_plugin_route_matches`
# method for firehose plugins (no regexing or return values)
active_firehose_plugins = line._active_plugin_slugs.intersection(
self.firehose_router.viewkeys())
for plugin_slug in active_firehose_plugins:
for _, func, plugin in self.firehose_router[plugin_slug]:
# firehose gets everything, no rule matching
LOG.info('Match: %s.%s', plugin_slug, func.__name__)
with statsd.timer(".".join(["plugins", plugin_slug])):
# FIXME: This will not have correct timing if go back to
# gevent.
channel_plugin = self.setup_plugin_for_channel(
plugin.__class__, line)
new_func = log_on_error(LOG, getattr(channel_plugin,
func.__name__))
if hasattr(self, 'gevent'):
self.gevent.Greenlet.spawn(new_func, line)
else:
channel_plugin.respond(new_func(line))
# pass line to other routers
if line._is_message:
self.check_for_plugin_route_matches(line, self.messages_router)
if line.is_direct_message:
self.check_for_plugin_route_matches(line, self.mentions_router)
def setup_plugin_for_channel(self, fake_plugin_class, line):
"""Given a dummy plugin class, initialize it for the line's channel"""
class RealPlugin(RealPluginMixin, fake_plugin_class):
pass
plugin = RealPlugin(slug=fake_plugin_class.__module__.split('.')[-1],
channel=line._channel,
chatbot_id=line._chatbot_id,
app=self)
return plugin
def check_for_plugin_route_matches(self, line, router):
"""Checks the active plugins' routes and calls functions on matches"""
# get the active routes for this channel
active_slugs = line._active_plugin_slugs.intersection(router.viewkeys())
for plugin_slug in active_slugs:
for rule, func, plugin in router[plugin_slug]:
match = re.match(rule, line.text, re.IGNORECASE)
if match:
LOG.info('Match: %s.%s', plugin_slug, func.__name__)
with statsd.timer(".".join(["plugins", plugin_slug])):
# FIXME: This will not have correct timing if go back to
# gevent.
# Instantiate a plugin specific to this channel
channel_plugin = self.setup_plugin_for_channel(
plugin.__class__, line)
# get the method from the channel-specific plugin
new_func = log_on_error(LOG, getattr(channel_plugin,
func.__name__))
if hasattr(self, 'gevent'):
grnlt = self.gevent.Greenlet(new_func, line,
**match.groupdict())
grnlt.link_value(channel_plugin.greenlet_respond)
grnlt.start()
else:
channel_plugin.respond(new_func(line,
**match.groupdict()))
def start_plugins(*args, **kwargs):
"""
Used by the management command to start-up plugin listener
and register the plugins.
"""
LOG.info('Starting plugins. Gevent=%s', kwargs['use_gevent'])
app = PluginRunner(**kwargs)
app.register_all_plugins()
app.listen()
| mit |
aurelijusb/arangodb | 3rdParty/V8-4.3.61/.ycm_extra_conf.py | 31 | 5867 | # Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Autocompletion config for YouCompleteMe in V8.
#
# USAGE:
#
# 1. Install YCM [https://github.com/Valloric/YouCompleteMe]
# (Googlers should check out [go/ycm])
#
# 2. Profit
#
#
# Usage notes:
#
# * You must use ninja & clang to build V8.
#
# * You must have run gyp_v8 and built V8 recently.
#
#
# Hacking notes:
#
# * The purpose of this script is to construct an accurate enough command line
# for YCM to pass to clang so it can build and extract the symbols.
#
# * Right now, we only pull the -I and -D flags. That seems to be sufficient
# for everything I've used it for.
#
# * That whole ninja & clang thing? We could support other configs if someone
# were willing to write the correct commands and a parser.
#
# * This has only been tested on gTrusty.
import os
import os.path
import subprocess
import sys
# Flags from YCM's default config.
flags = [
'-DUSE_CLANG_COMPLETER',
'-std=gnu++0x',
'-x',
'c++',
]
def PathExists(*args):
return os.path.exists(os.path.join(*args))
def FindV8SrcFromFilename(filename):
"""Searches for the root of the V8 checkout.
Simply checks parent directories until it finds .gclient and v8/.
Args:
filename: (String) Path to source file being edited.
Returns:
(String) Path of 'v8/', or None if unable to find.
"""
curdir = os.path.normpath(os.path.dirname(filename))
while not (PathExists(curdir, 'v8') and PathExists(curdir, 'v8', 'DEPS')
and (PathExists(curdir, '.gclient')
or PathExists(curdir, 'v8', '.git'))):
nextdir = os.path.normpath(os.path.join(curdir, '..'))
if nextdir == curdir:
return None
curdir = nextdir
return os.path.join(curdir, 'v8')
def GetClangCommandFromNinjaForFilename(v8_root, filename):
"""Returns the command line to build |filename|.
Asks ninja how it would build the source file. If the specified file is a
header, tries to find its companion source file first.
Args:
v8_root: (String) Path to v8/.
filename: (String) Path to source file being edited.
Returns:
(List of Strings) Command line arguments for clang.
"""
if not v8_root:
return []
# Generally, everyone benefits from including V8's root, because all of
# V8's includes are relative to that.
v8_flags = ['-I' + os.path.join(v8_root)]
# Version of Clang used to compile V8 can be newer then version of
# libclang that YCM uses for completion. So it's possible that YCM's libclang
# doesn't know about some used warning options, which causes compilation
# warnings (and errors, because of '-Werror');
v8_flags.append('-Wno-unknown-warning-option')
# Header files can't be built. Instead, try to match a header file to its
# corresponding source file.
if filename.endswith('.h'):
alternates = ['.cc', '.cpp']
for alt_extension in alternates:
alt_name = filename[:-2] + alt_extension
if os.path.exists(alt_name):
filename = alt_name
break
else:
if filename.endswith('-inl.h'):
for alt_extension in alternates:
alt_name = filename[:-6] + alt_extension
if os.path.exists(alt_name):
filename = alt_name
break;
else:
# If this is a standalone -inl.h file with no source, the best we can
# do is try to use the default flags.
return v8_flags
else:
# If this is a standalone .h file with no source, the best we can do is
# try to use the default flags.
return v8_flags
sys.path.append(os.path.join(v8_root, 'tools', 'ninja'))
from ninja_output import GetNinjaOutputDirectory
out_dir = os.path.realpath(GetNinjaOutputDirectory(v8_root))
# Ninja needs the path to the source file relative to the output build
# directory.
rel_filename = os.path.relpath(os.path.realpath(filename), out_dir)
# Ask ninja how it would build our source file.
p = subprocess.Popen(['ninja', '-v', '-C', out_dir, '-t',
'commands', rel_filename + '^'],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
return v8_flags
# Ninja might execute several commands to build something. We want the last
# clang command.
clang_line = None
for line in reversed(stdout.split('\n')):
if 'clang' in line:
clang_line = line
break
else:
return v8_flags
# Parse flags that are important for YCM's purposes.
for flag in clang_line.split(' '):
if flag.startswith('-I'):
# Relative paths need to be resolved, because they're relative to the
# output dir, not the source.
if flag[2] == '/':
v8_flags.append(flag)
else:
abs_path = os.path.normpath(os.path.join(out_dir, flag[2:]))
v8_flags.append('-I' + abs_path)
elif flag.startswith('-std'):
v8_flags.append(flag)
elif flag.startswith('-') and flag[1] in 'DWFfmO':
if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard':
# These flags causes libclang (3.3) to crash. Remove it until things
# are fixed.
continue
v8_flags.append(flag)
return v8_flags
def FlagsForFile(filename):
"""This is the main entry point for YCM. Its interface is fixed.
Args:
filename: (String) Path to source file being edited.
Returns:
(Dictionary)
'flags': (List of Strings) Command line flags.
'do_cache': (Boolean) True if the result should be cached.
"""
v8_root = FindV8SrcFromFilename(filename)
v8_flags = GetClangCommandFromNinjaForFilename(v8_root, filename)
final_flags = flags + v8_flags
return {
'flags': final_flags,
'do_cache': True
}
| apache-2.0 |
tcharding/kubernetes | cluster/juju/layers/kubernetes-master/lib/charms/kubernetes/common.py | 359 | 2002 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
from time import sleep
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = '{} --version'.format(bin_name).split()
version_string = subprocess.check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def retry(times, delay_secs):
""" Decorator for retrying a method call.
Args:
times: How many times should we retry before giving up
delay_secs: Delay in secs
Returns: A callable that would return the last call outcome
"""
def retry_decorator(func):
""" Decorator to wrap the function provided.
Args:
func: Provided function should return either True od False
Returns: A callable that would return the last call outcome
"""
def _wrapped(*args, **kwargs):
res = func(*args, **kwargs)
attempt = 0
while not res and attempt < times:
sleep(delay_secs)
res = func(*args, **kwargs)
if res:
break
attempt += 1
return res
return _wrapped
return retry_decorator
| apache-2.0 |
solintegra/addons | hw_posbox_upgrade/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ehashman/oh-mainline | vendor/packages/scrapy/scrapy/tests/test_utils_sitemap.py | 25 | 4450 | import unittest
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
class SitemapTest(unittest.TestCase):
def test_sitemap(self):
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc>http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc>http://www.example.com/Special-Offers.html</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
</urlset>""")
assert s.type == 'urlset'
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'}, {'priority': '0.8', 'loc': 'http://www.example.com/Special-Offers.html', 'lastmod': '2009-08-16', 'changefreq': 'weekly'}])
def test_sitemap_index(self):
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap>
<loc>http://www.example.com/sitemap1.xml.gz</loc>
<lastmod>2004-10-01T18:23:17+00:00</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml.gz</loc>
<lastmod>2005-01-01</lastmod>
</sitemap>
</sitemapindex>""")
assert s.type == 'sitemapindex'
self.assertEqual(list(s), [{'loc': 'http://www.example.com/sitemap1.xml.gz', 'lastmod': '2004-10-01T18:23:17+00:00'}, {'loc': 'http://www.example.com/sitemap2.xml.gz', 'lastmod': '2005-01-01'}])
def test_sitemap_strip(self):
"""Assert we can deal with trailing spaces inside <loc> tags - we've
seen those
"""
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_wrong_ns(self):
"""We have seen sitemaps with wrongs ns. Presumably, Google still works
with these, though is not 100% confirmed"""
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url xmlns="">
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url xmlns="">
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_wrong_ns2(self):
"""We have seen sitemaps with wrongs ns. Presumably, Google still works
with these, though is not 100% confirmed"""
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset>
<url xmlns="">
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url xmlns="">
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
assert s.type == 'urlset'
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_urls_from_robots(self):
robots = """User-agent: *
Disallow: /aff/
Disallow: /wl/
# Search and shopping refining
Disallow: /s*/*facet
Disallow: /s*/*tags
# Sitemap files
Sitemap: http://example.com/sitemap.xml
Sitemap: http://example.com/sitemap-product-index.xml
# Forums
Disallow: /forum/search/
Disallow: /forum/active/
"""
self.assertEqual(list(sitemap_urls_from_robots(robots)),
['http://example.com/sitemap.xml', 'http://example.com/sitemap-product-index.xml'])
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
hezuoguang/ZGVL | WLServer/site-packages/requests/packages/chardet/euctwfreq.py | 3133 | 34872 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
| apache-2.0 |
eric-haibin-lin/mxnet | python/mxnet/ndarray/numpy/_op.py | 2 | 252233 | # pylint: disable=C0302
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Namespace for numpy operators used in Gluon dispatched by F=ndarray."""
import numpy as _np
from ...base import numeric_types, integer_types
from ...util import _sanity_check_params, set_module
from ...util import wrap_np_unary_func, wrap_np_binary_func
from ...context import current_context
from . import _internal as _npi
from ..ndarray import NDArray
__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete',
'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert',
'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul',
'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram',
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort',
'tensordot', 'eye', 'linspace',
'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'minimum',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',
'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round',
'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',
'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'rot90', 'einsum',
'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory',
'diff', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',
'where', 'bincount', 'pad']
@set_module('mxnet.ndarray.numpy')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
"""
return a.shape
@set_module('mxnet.ndarray.numpy')
def zeros(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `zeros` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `ones` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.ones(shape=shape, ctx=ctx, dtype=dtype)
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.zeros_like(x)
array([[0., 0., 0.],
[0., 0., 0.]])
>>> np.zeros_like(x, int)
array([[0, 0, 0],
[0, 0, 0]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.zeros_like(y)
array([0., 0., 0.], dtype=float64)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=0, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of ones with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.ones_like(x)
array([[1., 1., 1.],
[1., 1., 1.]])
>>> np.ones_like(x, int)
array([[1, 1, 1],
[1, 1, 1]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.ones_like(y)
array([1., 1., 1.], dtype=float64)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=1, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def broadcast_to(array, shape):
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
if _np.isscalar(array):
return full(shape, array)
return _npi.broadcast_to(array, shape)
@set_module('mxnet.ndarray.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or ndarray
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
If `fill_value` is an ndarray, out will have the same context as `fill_value`
regardless of the provided `ctx`.
Notes
-----
This function differs from the original `numpy.full
https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in
the following way(s):
- Have an additional `ctx` argument to specify the device
- Have an additional `out` argument
- Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if isinstance(fill_value, NDArray):
if dtype is None:
ret = broadcast_to(fill_value, shape)
else:
ret = broadcast_to(fill_value, shape).astype(dtype)
return ret
dtype = _np.float32 if dtype is None else dtype
return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out)
# pylint: enable=too-many-arguments, redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1], dtype=int64)
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0], dtype=int64)
>>> np.full_like(x, 0.1, dtype=np.float64)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan], dtype=float64)
>>> y = np.arange(6, dtype=np.float32)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=fill_value, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : ndarray
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : {False}, optional
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(Not supported at this moment)
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.empty_like(a)
array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized
[ 4567052944, -5764607523034234880, 844424930131968]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized
[2.0e-323, 2.5e-323, 3.0e-323]])
"""
dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32',
_np.int64:'int64', _np.float16:'float16', _np.float32:'float32',
_np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'}
if order != 'C':
raise NotImplementedError("Only support C-order at this moment")
if subok:
raise NotImplementedError("Creating array by using sub-class is not supported at this moment")
if shape is not None:
raise NotImplementedError("Assigning new shape is not supported at this moment")
try:
dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]
except:
raise NotImplementedError("Do not support this dtype at this moment")
return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
@set_module('mxnet.ndarray.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. The default is `float32`.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
"""
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if stop is None:
stop = start
start = 0
if step is None:
step = 1
if start is None and stop is None:
raise ValueError('start and stop cannot be both None')
if step == 0:
raise ZeroDivisionError('step cannot be 0')
return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``numpy.float32``.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if not isinstance(n, int):
raise TypeError("Input 'n' should be an integer")
if n < 0:
raise ValueError("Input 'n' cannot be negative")
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : ndarray
The source array.
indices : ndarray
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray
The returned array has the same type as `a`.
Notes
-----
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
- Only ndarray or scalar ndarray is accepted as valid input.
Examples
--------
>>> a = np.array([4, 3, 5, 7, 6, 8])
>>> indices = np.array([0, 1, 4])
>>> np.take(a, indices)
array([4., 3., 6.])
In this example for `a` is an ndarray, "fancy" indexing can be used.
>>> a[indices]
array([4., 3., 6.])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, np.array([[0, 1], [2, 3]]))
array([[4., 3.],
[5., 7.]])
"""
if mode not in ('wrap', 'clip', 'raise'):
raise NotImplementedError(
"function take does not support mode '{}'".format(mode))
if axis is None:
return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)
else:
return _npi.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : ndarray
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : ndarray
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
Notes
-----
- Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
- If obj is a ndarray, it's dtype only supports int64
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1., 1.],
[2., 2.],
[3., 3.]])
>>> np.insert(a, 1, np.array(5))
array([1., 5., 1., 2., 2., 3., 3.])
>>> np.insert(a, 1, np.array(5), axis=1)
array([[1., 5., 1.],
[2., 5., 2.],
[3., 5., 3.]])
Difference between sequence and scalars:
>>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> b = a.flatten()
>>> b
array([1., 1., 2., 2., 3., 3.])
>>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))
array([1., 1., 5., 6., 2., 2., 3., 3.])
>>> np.insert(b, slice(2, 4), np.array([5, 6]))
array([1., 1., 5., 2., 6., 2., 3., 3.])
# type casting
>>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))
array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)
>>> x = np.arange(8).reshape(2, 4)
>>> idx = np.array([1, 3], dtype=np.int64)
>>> np.insert(x, idx, np.array([999]), axis=1)
array([[ 0., 999., 1., 2., 999., 3.],
[ 4., 999., 5., 6., 999., 7.]])
"""
if isinstance(values, numeric_types):
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, obj, val=values, axis=axis)
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if not isinstance(values, NDArray):
raise TypeError("'values' can not support type {}".format(str(type(values))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, values, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : ndarray or numeric value
Left-hand side operand.
rhs : ndarray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``ndarray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``ndarray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``ndarray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
mxnet.numpy.ndarray or scalar
result array or scalar
"""
from ...numpy import ndarray
from ..ndarray import from_numpy # pylint: disable=unused-import
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs, out=out)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs), out=out)
else:
return rfn_scalar(rhs, float(lhs), out=out)
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs), out=out)
elif isinstance(lhs, ndarray) and isinstance(rhs, ndarray):
return fn_array(lhs, rhs, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(rhs))))
#pylint: enable= too-many-arguments, no-member, protected-access
@set_module('mxnet.ndarray.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : ndarray
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
This function differs from the original `numpy.unique
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in
the following aspects:
- Only support ndarray as input.
- Object arrays or structured arrays are not supported.
Examples
--------
>>> np.unique(np.array([1, 1, 2, 2, 3, 3]))
array([1., 2., 3.])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1., 2., 3.])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1., 0., 0.],
[2., 3., 4.]])
Return the indices of the original array that give the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 5, 3, 2], dtype=int64)
>>> a[indices]
array([1., 2., 3., 4., 6.])
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 4, 3, 1, 2, 1], dtype=int64)
>>> u[indices]
array([1., 2., 6., 4., 2., 3., 2.])
"""
ret = _npi.unique(ar, return_index, return_inverse, return_counts, axis)
if isinstance(ret, list):
return tuple(ret)
else:
return ret
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
add : ndarray or scalar
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
"""
Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar,
_npi.rsubtract_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The multiplication of x1 and x2, element-wise. This is a scalar if both x1 and x2
are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
"""
Returns a true division of the inputs, element-wise.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 type.
"""
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
def true_divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 type.
"""
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : ndarray
Input array.
obj : slice, int or ndarray of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, 1, 0)
array([[ 1., 2., 3., 4.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, slice(None, None, 2), 1)
array([[ 2., 4.],
[ 6., 8.],
[10., 12.]])
>>> np.delete(arr, np.array([1,3,5]), None)
array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])
>>> np.delete(arr, np.array([1,1,5]), None)
array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])
"""
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.delete(arr, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.delete(arr, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None):
"""
Matrix product of two arrays.
Parameters
----------
a, b : ndarray
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot :
Sum products over arbitrary axes.
dot :
alternative matrix product with different broadcasting rules.
einsum :
Einstein summation convention.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional matrices.
- If either argument is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
- Multiplication by scalars is not allowed, use multiply instead.
- Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature (n,k),(k,m)->(n,m):
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4., 1.],
[2., 2.]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1., 2.])
>>> np.matmul(b, a)
array([1., 2.])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a, b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
array(98.)
>>> sum(a[0, 1, :] * b[0, :, 1])
array(98.)
Scalar multiplication raises an error.
>>> np.matmul([1, 2], 3)
Traceback (most recent call last):
...
mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.
"""
return _npi.matmul(a, b, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
"""
First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out)
@set_module('mxnet.ndarray.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : ndarray
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
"""
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64')
@set_module('mxnet.ndarray.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : ndarray
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
"""
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.sort(data=a, axis=axis, is_ascend=True)
@set_module('mxnet.ndarray.numpy')
def tensordot(a, b, axes=2):
r"""
tensordot(a, b, axes=2)
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
if _np.isscalar(axes):
return _npi.tensordot_int_axes(a, b, axes)
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes_summed, b_axes_summed = axes
if _np.isscalar(a_axes_summed):
a_axes_summed = (a_axes_summed,)
if _np.isscalar(b_axes_summed):
b_axes_summed = (b_axes_summed,)
if len(a_axes_summed) != len(b_axes_summed):
raise ValueError('Axes length mismatch')
return _npi.tensordot(a, b, a_axes_summed, b_axes_summed)
@set_module('mxnet.ndarray.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : ndarray
Input data. The histogram is computed over the flattened array.
bins : int or NDArray
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
"""
if normed is True:
raise NotImplementedError("normed is not supported yet...")
if weights is not None:
raise NotImplementedError("weights is not supported yet...")
if density is True:
raise NotImplementedError("density is not supported yet...")
if isinstance(bins, numeric_types):
if range is None:
raise NotImplementedError("automatic range is not supported yet...")
return _npi.histogram(a, bin_cnt=bins, range=range)
if isinstance(bins, (list, tuple)):
raise NotImplementedError("array_like bins is not supported yet...")
if isinstance(bins, str):
raise NotImplementedError("string bins is not supported yet...")
if isinstance(bins, NDArray):
return _npi.histogram(a, bins=bins)
raise ValueError("np.histogram fails with", locals())
@set_module('mxnet.ndarray.numpy')
def eye(N, M=None, k=0, dtype=_np.float32, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
"""
_sanity_check_params('eye', ['order'], kwargs)
ctx = kwargs.pop('ctx', current_context())
if ctx is None:
ctx = current_context()
return _npi.eye(N, M, k, ctx, dtype)
@set_module('mxnet.ndarray.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
Notes
-----
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
- `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
- axis could only be 0
- There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
if isinstance(start, (list, _np.ndarray, NDArray)) or \
isinstance(stop, (list, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
if retstep:
step = (stop - start) / (num - 1)
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step
else:
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : int or float
``base ** start`` is the starting value of the sequence.
stop : int or float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code. Now wo only support axis = 0.
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \
isinstance(stop, (list, tuple, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int and float')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm(np.arange(6, dtype=int), 20)
array([ 0, 20, 20, 60, 20, 20], dtype=int64)
"""
return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : ndarray, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
>>> np.tril(a, -1)
array([[ 0., 0., 0.],
[ 4., 0., 0.],
[ 7., 8., 0.],
[10., 11., 12.]])
"""
return _npi.tril(m, k)
def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):
"""Helper function for unary operators.
Parameters
----------
x : ndarray or scalar
Input of the unary operator.
fn_array : function
Function to be called if x is of ``ndarray`` type.
fn_scalar : function
Function to be called if x is a Python scalar.
out : ndarray
The buffer ndarray for storing the result of the unary function.
Returns
-------
out : mxnet.numpy.ndarray or scalar
Result array or scalar.
"""
if isinstance(x, numeric_types):
return fn_scalar(x, **kwargs)
elif isinstance(x, NDArray):
return fn_array(x, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sin(np.pi/2.)
1.0
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])
"""
return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.cos(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sinh(0)
0.0
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.sinh(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cosh(0)
1.0
"""
return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
- input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which t'absolute', he result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.log10(np.array([1e-15, -3.]))
array([-15., nan])
"""
return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sqrt(np.array([1,4,9]))
array([1., 2., 3.])
>>> np.sqrt(np.array([4, -1, _np.inf]))
array([ 2., nan, inf])
"""
return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
r"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
r"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
Note
-------
- Only supports real number as input elements.
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
>>> # Use scalars as inputs:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
>>> # Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.expm1(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
- Only support ndarray or scalar now.
- `where` argument is not supported.
- Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
r"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
See also
----------
cos, arctan, arcsin
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and
``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
>>> # Using default float32 dtype may lead to slightly different behavior:
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float32)
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
>>> np.log(1)
0.0
"""
return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> # Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"rad2deg(x)" is "x *180 / pi".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
"""
return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.deg2rad(180)
3.1415927
"""
return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
"""
return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
Notes
-----
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
- Complex input is not supported.
"""
return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters:
------------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
Returns:
---------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples:
---------
>>> np.negative(1)
-1
"""
return _unary_func_helper(x, _npi.negative, _np.negative, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
r"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters:
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns:
-------
y : ndarray of floats
Examples
---------
>>> np.fix(3.14)
3
"""
return _unary_func_helper(x, _npi.fix, _np.fix, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters:
----------
x : ndarray
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
where : ndarray, optional
Values of True indicate to calculate the ufunc at that position,
values of False indicate to leave the value in the output alone.
Returns:
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples:
---------
>>> np.tan(0.5)
0.5463024898437905
"""
return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a same shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> #if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The floor of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a same shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> #if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.trunc in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.logical_not in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
"""
return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic sine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. DType of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(0.0)
0.0
"""
return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
"""
axis_size = ary.shape[axis]
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
if axis_size % sections:
raise ValueError('array split does not result in an equal division')
section_size = int(axis_size / sections)
indices = [i * section_size for i in range(sections)]
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False)
assert isinstance(ret, list), 'Output of split should be list,' \
' got a return type {}'.format(type(ret))
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Examples
--------
>>> x = np.arange(9.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.array_split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Notes
------
- If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
- If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
if len(ary.shape) < 1:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.hsplit(ary, indices, 1, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def vsplit(ary, indices_or_sections):
r"""
vsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
if len(ary.shape) < 2:
raise ValueError("vsplit only works on arrays of 2 or more dimensions")
return split(ary, indices_or_sections, 0)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
- ary[:, :, :2]
- ary[:, :, 2:3]
- ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
if len(ary.shape) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def concatenate(seq, axis=0, out=None):
"""
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of ndarray
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
"""
return _npi.concatenate(*seq, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : ndarray
Values are appended to a copy of this array.
values : ndarray
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _npi.concatenate(arr, values, axis=axis, out=None)
@set_module('mxnet.ndarray.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of ndarray
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays."""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.stack(*arrays, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Returns
--------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.column_stack(*tup)
@set_module('mxnet.ndarray.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.hstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _npi.dstack(*arrays)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.ndarray.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
ndarray `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)
"""
if a_min is None and a_max is None:
raise ValueError('array_clip: must set either max or min')
if a_min is None:
a_min = float('-inf')
if a_max is None:
a_max = float('inf')
return _npi.clip(a, a_min, a_max, out=out)
@set_module('mxnet.ndarray.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _npi.argmax(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmin(a)
array(0.)
>>> np.argmin(a, axis=0)
array([0., 0., 0.])
>>> np.argmin(a, axis=1)
array([0., 0.])
>>> b = np.arange(6)
>>> b[2] = 0
>>> b
array([0., 1., 0., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(0.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmin(a, axis=1, out=b)
array([0., 0.])
>>> b
array([0., 0.])
"""
return _npi.argmin(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : ndarray
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : ndarray, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : ndarray, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : ndarray
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
- When all weights along axis sum to zero.
- When the length of 1D weights is not the same as the shape of a along axis.
- When given 1D weights, the axis is not specified or is not int.
- When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
Notes
--------
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
- Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
- Does not support complex dtype
- The dtypes of a and weights must be the same
- Integral a results in float32 returned dtype, not float64
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
if weights is None:
return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out)
else:
return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out)
@set_module('mxnet.ndarray.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
mean(a, axis=None, dtype=None, out=None, keepdims=None)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default is float32;
for floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables or scalar is not supported
- default data type for integer input is float32
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : ndarray
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def indices(dimensions, dtype=_np.int32, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `float32`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int32)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
if isinstance(dimensions, (tuple, list)):
if ctx is None:
ctx = current_context()
return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
else:
raise ValueError("The dimensions must be sequence of ints")
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-------
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
- ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out)
@set_module('mxnet.ndarray.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
if order == 'F':
raise NotImplementedError('order {} is not supported'.format(order))
if isinstance(x, numeric_types):
return _np.reshape(x, -1)
elif isinstance(x, NDArray):
return _npi.reshape(x, -1)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters:
-------------
indices : array_like
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
Returns:
-------------
unraveled_coords : ndarray
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
([3. 6. 6.]
[4. 5. 1.])
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
if order == 'C':
if isinstance(indices, numeric_types):
return _np.unravel_index(indices, shape)
ret = _npi.unravel_index_fallback(indices, shape=shape)
ret_list = []
for item in ret:
ret_list += [item]
return tuple(ret_list)
else:
raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment')
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters:
-------------
arr : ndarray
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: tuple of ndarray
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return tuple(_npi.diag_indices_from(arr))
@set_module('mxnet.ndarray.numpy')
def hanning(M, dtype=_np.float32, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hanning(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def hamming(M, dtype=_np.float32, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hamming(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def blackman(M, dtype=_np.float32, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : ndarray or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : ndarray or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : ndarray or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
"""
from ...numpy import ndarray
if isinstance(m, numeric_types):
return _np.flip(m, axis)
elif isinstance(m, ndarray):
return _npi.flip(m, axis, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(m))))
@set_module('mxnet.ndarray.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag(np.array([1.0, 2, 3]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
array(True)
>>> np.flipud(np.array([1,2]))
array([2., 1.])
"""
return flip(m, 0)
@set_module('mxnet.ndarray.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag(np.array([1.,2.,3.]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
array(True)
"""
return flip(m, 1)
@set_module('mxnet.ndarray.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : ndarray or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : ndarray or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot support complex-valued number.
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1, 2, 3, 11], decimals=-1)
array([ 0, 0, 0, 10])
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : ndarray or scalar
`y`-coordinates.
x2 : ndarray or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> x = np.array([1, -1])
>>> y = np.array([0, 0])
>>> np.arctan2(x, y)
array([ 1.5707964, -1.5707964])
"""
return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2,
_npi.arctan2_scalar, _npi.rarctan2_scalar, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : ndarray
Leg of the triangle(s).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)
array([12, 1], dtype=int32)
>>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))
array([0, 1], dtype=int32)
>>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))
array([ 2, 4, 16], dtype=int32)
>>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_xor(13, 17)
28
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), 5)
array([26, 6])
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([26, 5])
>>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_or(13, 17)
29
>>> np.bitwise_or(31, 5)
31
>>> np.bitwise_or(np.array([31,3], dtype='int32'), 5)
array([31, 7])
>>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([31, 7])
>>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, True])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : ndarray or scalar
Array of multipliers.
x2 : ndarray or scalar, int
Array of twos exponents.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.])
"""
return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)
@set_module('mxnet.ndarray.numpy')
def inner(a, b):
r"""
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : ndarray
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.ndarray.numpy')
def outer(a, b):
r"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) ndarray
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) ndarray
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.ndarray.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : ndarray
First argument to the dot product.
b : ndarray
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.ndarray.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,
_npi.less_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out)
@set_module('mxnet.ndarray.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar,
_npi.less_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar,
_npi.greater_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : ndarray
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _npi.rot90(m, k=k, axes=axes)
@set_module('mxnet.ndarray.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of ndarray
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
- Does not support 'optimal' strategy
- Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
- Does not produce view in any cases
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
array(60.)
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0., 6., 12., 18., 24.])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10., 35., 60., 85., 110.])
>>> np.sum(a, axis=1)
array([ 10., 35., 60., 85., 110.])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10., 35., 60., 85., 110.])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.einsum('ij->ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.transpose(c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
Vector inner products:
>>> np.einsum('i,i', b, b)
array(30.)
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.dot(a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.einsum('...j,j', a, b)
array([ 30., 80., 130., 180., 230.])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.einsum(',ij', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.multiply(3, c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0., 1., 2., 3., 4.],
[0., 2., 4., 6., 8.]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('k...,jk', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path. Performance
improvements can be particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Greedy `einsum` (faster optimal path approximation): ~0.117ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)
"""
# Grab non-einsum kwargs; do not optimize by default.
optimize_arg = kwargs.pop('optimize', False)
out = kwargs.pop('out', None)
subscripts = operands[0]
operands = operands[1:]
return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))
@set_module('mxnet.ndarray.numpy')
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
Parameters
----------
a : ndarray
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
ndarray.nonzero :
Equivalent ndarray method.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]], dtype=int32)
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.stack(np.nonzero(x)))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9], dtype=int32)
>>> a[a > 3]
array([4, 5, 6, 7, 8, 9], dtype=int32)
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
out = _npi.nonzero(a).transpose()
return tuple([out[i] for i in range(len(out))])
@set_module('mxnet.ndarray.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : ndarray
Input array
q : ndarray
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : scalar or ndarray
Output array.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, np.array(50))
array(3.5)
>>> np.percentile(a, np.array(50), axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, np.array(50), axis=1)
array([7., 2.])
>>> np.percentile(a, np.array(50), axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, np.array(50), axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, np.array(50), axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q, out=out)
return _npi.percentile(a, q, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
q : ndarray
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j, whichever is nearest.
midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : ndarray
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
Notes
-----
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
- q must be ndarray type even if it is a scalar
- do not support overwrite_input
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10., 7., 4.],
[3., 2., 1.]])
>>> q = np.array(0.5)
>>> q
array(0.5)
>>> np.quantile(a, q)
array(3.5)
>>> np.quantile(a, q, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, q, axis=1)
array([7., 2.])
>>> np.quantile(a, q, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, q, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, q, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> out
array([6.5, 4.5, 2.5])
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q * 100, out=out)
return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : ndarray
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : ndarray, optional
Not supported yet
Returns
-------
diff : ndarray
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _npi.diff(a, n=n, axis=axis)
@set_module('mxnet.ndarray.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : ndarray
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _npi.resize_fallback(a, new_shape=new_shape)
@set_module('mxnet.ndarray.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : ndarray
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,
1.2800000e+02])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,
1.2800000e+02])
>>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y)
array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],
[ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
"""
if isinstance(x, numeric_types):
return _np.nan_to_num(x, copy, nan, posinf, neginf)
elif isinstance(x, NDArray):
if x.dtype in ['int8', 'uint8', 'int32', 'int64']:
return x
if not copy:
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x)
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))
array([ True, False, False])
"""
return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool_)
>>> np.isinf(x, y)
array([ True, False, True])
>>> y
array([ True, False, True])
"""
return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs)
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf(np.nan)
False
>>> np.isposinf(np.array([-np.inf, 0., np.inf]))
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isposinf(x, y)
array([False, False, True])
>>> y
array([False, False, True])
"""
return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(float('-inf'))
True
>>> np.isneginf(np.array([-np.inf, 0., np.inf]))
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isneginf(x, y)
array([ True, False, False])
>>> y
array([ True, False, False])
"""
return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(-np.inf)
False
>>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isfinite(x, y)
array([False, True, False])
>>> y
array([False, True, False])
"""
return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def where(condition, x=None, y=None): # pylint: disable=too-many-return-statements
"""where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. The rest of this documentation
covers only the case where all three arguments are provided.
Parameters
----------
condition : ndarray
Where True, yield `x`, otherwise yield `y`.
x, y : ndarray
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
This function differs from the original `numpy.where
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html>`_ in
the following way(s):
- If `condition` is a scalar, this operator returns x or y directly without broadcasting.
- If `condition` is ndarray, while both `x` and `y` are scalars,
the output dtype will be `float32`.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.where(a < 5, a, 10*a)
array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])
This can be used on multidimensional arrays too:
>>> cond = np.array([[True, False], [True, True]])
>>> x = np.array([[1, 2], [3, 4]])
>>> y = np.array([[9, 8], [7, 6]])
>>> np.where(cond, x, y)
array([[1., 8.],
[3., 4.]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = onp.ogrid[:3, :4]
>>> x = np.array(x)
>>> y = np.array(y)
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]], dtype=int64)
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0., 1., 2.],
[ 0., 2., -1.],
[ 0., 3., -1.]])
"""
if x is None and y is None:
return nonzero(condition)
else:
if isinstance(condition, numeric_types):
if condition != 0:
return x
else:
return y
else:
if isinstance(x, numeric_types) and isinstance(y, numeric_types):
return _npi.where_scalar2(condition, float(x), float(y), out=None)
elif isinstance(x, NDArray) and isinstance(y, NDArray):
return _npi.where(condition, x, y, out=None)
elif isinstance(y, NDArray):
return _npi.where_lscalar(condition, y, float(x), out=None)
elif isinstance(x, NDArray):
return _npi.where_rscalar(condition, x, float(y), out=None)
else:
raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))
@set_module('mxnet.ndarray.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
from ...numpy import ndarray
if isinstance(p, ndarray) and isinstance(x, ndarray):
return _npi.polyval(p, x)
elif not isinstance(p, ndarray) and not isinstance(x, ndarray):
return _np.polyval(p, x)
else:
raise TypeError('type not supported')
@set_module('mxnet.ndarray.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : ndarray
input array, 1 dimension, nonnegative ints.
weights: ndarray
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : ndarray
the result of binning the input array. The length of out is equal to amax(x)+1.
Raises
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
if not isinstance(x, NDArray):
raise TypeError("Input data should be NDarray")
if minlength < 0:
raise ValueError("Minlength value should greater than 0")
if weights is None:
return _npi.bincount(x, minlength=minlength, has_weights=False)
return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True)
@set_module('mxnet.ndarray.numpy')
def pad(x, pad_width, mode='constant', **kwargs): # pylint: disable=too-many-arguments
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
"""
# pylint: disable = too-many-return-statements, inconsistent-return-statements
if not _np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
if not isinstance(pad_width, tuple):
raise TypeError("`pad_width` must be tuple.")
if mode == "linear_ramp":
raise ValueError("mode {'linear_ramp'} is not supported.")
if mode == "wrap":
raise ValueError("mode {'wrap'} is not supported.")
if mode == "median":
raise ValueError("mode {'median'} is not supported.")
if mode == "mean":
raise ValueError("mode {'mean'} is not supported.")
if mode == "empty":
raise ValueError("mode {'empty'} is not supported.")
if callable(mode):
raise ValueError("mode {'<function>'} is not supported.")
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
if isinstance(mode, _np.compat.basestring):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %(key, allowedkwargs[mode]))
unsupported_kwargs = set(kwargs) - set(allowedkwargs[mode])
if unsupported_kwargs:
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
if mode == "constant":
values = kwargs.get("constant_values", 0)
if isinstance(values, tuple):
raise TypeError("unsupported constant_values type: {'tuple'}.")
_npi.pad(x, pad_width, mode='constant', constant_value=values)
elif mode == "symmetric":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='symmetric', reflect_type="even")
elif mode == "edge":
return _npi.pad(x, pad_width, mode='edge')
elif mode == "reflect":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='reflect', reflect_type="even")
elif mode == "maximum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='maximum')
elif mode == "minimum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='minimum')
return _npi.pad(x, pad_width, mode='constant', constant_value=0)
| apache-2.0 |
deepmind/lab2d | dmlab2d/lib/game_scripts/levels/clean_up/play.py | 1 | 3449 | # Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `clean_up` level.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire clean.
Use `LEFT_CTRL` to fire fine.
Use `TAB` to switch between players.
Use `[]` to switch between levels.
Use `R` to restart a level.
Use `ESCAPE` to quit.
"""
import argparse
import collections
import json
from typing import Mapping
from dmlab2d import ui_renderer
_ACTION_MAP = {
'move': ui_renderer.get_direction_pressed,
'turn': ui_renderer.get_turn_pressed,
'fireClean': ui_renderer.get_space_key_pressed,
'fireFine': ui_renderer.get_left_control_pressed
}
_FRAMES_PER_SECOND = 8
def _run(rgb_observation: str, config: Mapping[str, str]):
"""Run multiplayer environment, with per player rendering and actions."""
player_count = int(config.get('numPlayers', '1'))
score = collections.defaultdict(float)
total_contrib = collections.defaultdict(float)
prefixes = [str(i + 1) + '.' for i in range(player_count)]
ui = ui_renderer.Renderer(
config=config,
action_map=_ACTION_MAP,
rgb_observation=rgb_observation,
player_prefixes=[str(i + 1) + '.' for i in range(player_count)],
frames_per_second=_FRAMES_PER_SECOND)
def player_printer(idx: int):
print(f'Player({idx}) contrib({total_contrib[idx]}) score({score[idx]})')
for step in ui.run():
if step.type == ui_renderer.StepType.FIRST:
print(f'=== Start episode {step.episode} ===')
print_player = False
for idx, prefix in enumerate(prefixes):
reward = step.env.observation(prefix + 'REWARD')
score[idx] += reward
contrib = step.env.observation(prefix + 'CONTRIB')
total_contrib[idx] += contrib
if step.player == idx and (reward != 0 or contrib != 0):
print_player = True
if print_player:
player_printer(step.player)
if step.type == ui_renderer.StepType.LAST:
print(f'=== End episode {step.episode} ===')
for idx in range(player_count):
player_printer(idx)
print('======')
print('=== Exiting ===')
for idx in range(player_count):
player_printer(idx)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--players', type=int, default=4, help='Number of players.')
args = parser.parse_args()
if 'levelName' not in args.settings:
args.settings['levelName'] = 'clean_up'
if 'numPlayers' not in args.settings:
args.settings['numPlayers'] = args.players
for k in args.settings:
args.settings[k] = str(args.settings[k])
_run(args.observation, args.settings)
if __name__ == '__main__':
main()
| apache-2.0 |
ganeshkoilada/libforensics | unittests/tests/dec/raw.py | 13 | 4098 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the lf.dec.raw module."""
# stdlib imports
import os.path
from unittest import TestCase
# local imports
from lf.dec.consts import SEEK_SET, SEEK_CUR, SEEK_END
from lf.dec.base import StreamInfo
from lf.dec.raw import Raw, RawIStream
__docformat__ = "restructuredtext en"
__all__ = [
"RawTestCase", "RawIStreamTestCase"
]
class RawTestCase(TestCase):
def setUp(self):
name = os.path.join("data", "txt", "alpha.txt")
self.raw = Raw(name)
# end def setUp
def test_list(self):
ae = self.assertEqual
ae(self.raw.list(), [StreamInfo(0)])
# end def test_list
def test_open(self):
ae = self.assertEqual
ae(self.raw.open(), self.raw.stream)
# end def test_open
# end class RawTestCase
class RawIStreamTestCase(TestCase):
def setUp(self):
name = os.path.join("data", "txt", "alpha.txt")
self.ris = RawIStream(name)
# end def setUp
def test__init__(self):
ae = self.assertEqual
ae(self.ris.size, 26)
# end def test__init__
def test_seek(self):
ae = self.assertEqual
ar = self.assertRaises
ris = self.ris
ae(ris.seek(10, SEEK_SET), 10)
ae(ris._stream.tell(), 10)
ar(IOError, ris.seek, -10, SEEK_SET)
ris.seek(3, SEEK_SET)
ae(ris.seek(5, SEEK_CUR), 8)
ae(ris._stream.tell(), 8)
ae(ris.seek(-2, SEEK_CUR), 6)
ae(ris._stream.tell(), 6)
ae(ris.seek(-3, SEEK_END), 23)
ae(ris._stream.tell(), 23)
ae(ris.seek(3, SEEK_END), 29)
ae(ris._stream.tell(), 29)
# end def test_seek
def test_tell(self):
ae = self.assertEqual
ris = self.ris
stream = self.ris._stream
stream.seek(0, SEEK_SET)
ae(ris.tell(), 0)
stream.seek(2, SEEK_SET)
ae(ris.tell(), 2)
# end def test_tell
def test_read(self):
ae = self.assertEqual
self.ris.seek(0, SEEK_SET)
ae(self.ris.read(0), b"")
ae(self.ris.read(1), b"a")
ae(self.ris.read(2), b"bc")
ae(self.ris.read(), b"defghijklmnopqrstuvwxyz")
self.ris.seek(-3, SEEK_END)
ae(self.ris.read(5), b"xyz")
self.ris.seek(30, SEEK_SET)
ae(self.ris.read(), b"")
# end def test_read
def test_readall(self):
ae = self.assertEqual
self.ris.seek(0, SEEK_SET)
ae(self.ris.readall(), b"abcdefghijklmnopqrstuvwxyz")
self.ris.seek(3, SEEK_SET)
ae(self.ris.readall(), b"defghijklmnopqrstuvwxyz")
# end def test_readall
def test_readinto(self):
ae = self.assertEqual
ris = self.ris
barray0 = bytearray(5)
barray1 = bytearray(10)
barray2 = bytearray(26)
barray3 = bytearray(1)
ris.seek(-12, SEEK_END)
retval0 = ris.readinto(barray0)
retval1 = ris.readinto(barray1)
ris.seek(0, SEEK_SET)
retval2 = ris.readinto(barray2)
ris.seek(30, SEEK_SET)
retval3 = ris.readinto(barray3)
ae(retval0, 5)
ae(retval1, 7)
ae(retval2, 26)
ae(retval3, 0)
ae(barray0, b"opqrs")
ae(barray1, b"tuvwxyz\x00\x00\x00")
ae(barray2, b"abcdefghijklmnopqrstuvwxyz")
ae(barray3, b"\x00")
# end def test_readinto
# end class RawIStreamTestCase
| gpl-3.0 |
chetaldrich/MLOCR | naiveBayesProbs.py | 1 | 243497 | def getSavedProbs():
return {0: 0.09871452420701168, 1: 0.11242070116861436, 2: 0.09931552587646077, 3: 0.10223706176961603, 4: 0.09736227045075126, 5: 0.09021702838063439, 6: 0.09863105175292154, 7: 0.10445742904841403, 8: 0.0974457429048414, 9: 0.09919866444073455}, {0: {(7, 3): 0.00016926556668285482, (20, 25): 0.0008456514475334237, (16, 9): 0.6496688076534416, (19, 4): 0.1641928416729458, (17, 20): 0.7873133344065324, (7, 25): 0.002029326739021919, (22, 19): 0.2629451802771288, (20, 7): 0.7959372543873771, (18, 19): 0.8070976214214115, (23, 26): 1.690964702126422e-07, (21, 6): 0.5137152456024773, (8, 5): 0.017924394939010286, (9, 0): 1.690964702126422e-07, (10, 7): 0.36829228121960494, (11, 22): 0.6899137675640504, (0, 17): 1.690964702126422e-07, (24, 14): 0.18397712868782493, (14, 1): 1.690964702126422e-07, (12, 17): 0.18127158516442266, (25, 15): 0.0018602302688092767, (15, 4): 0.10602365591979687, (13, 20): 0.7282986663023202, (2, 27): 1.690964702126422e-07, (26, 12): 1.690964702126422e-07, (3, 2): 1.690964702126422e-07, (27, 1): 1.690964702126422e-07, (4, 5): 0.0010147479177460659, (5, 24): 0.007609510256039112, (16, 0): 1.690964702126422e-07, (6, 23): 0.10483998062830838, (19, 13): 0.39670048821532883, (17, 13): 0.0708515901155673, (7, 22): 0.3600065541791855, (20, 14): 0.7205202286725386, (18, 10): 0.4225722481578631, (23, 19): 0.10027437593256704, (21, 15): 0.8803163930234855, (8, 12): 0.845144327219256, (22, 12): 0.9187012917617553, (9, 9): 0.6217678900683556, (23, 9): 0.6183859606641028, (10, 14): 0.5094878338471612, (8, 18): 0.8476807742724456, (11, 15): 0.24248450738139912, (9, 19): 0.7977973155597161, (24, 21): 0.0016911337985966346, (14, 8): 0.8400714331128767, (12, 8): 0.7712491697363313, (1, 21): 1.690964702126422e-07, (25, 16): 0.0010147479177460659, (15, 13): 0.04464163723260775, (13, 13): 0.17484591929634224, (2, 18): 0.00016926556668285482, (26, 23): 1.690964702126422e-07, (0, 14): 1.690964702126422e-07, (3, 11): 0.009638667898590818, (27, 6): 1.690964702126422e-07, (1, 15): 0.00016926556668285482, (4, 12): 0.201901354530365, (2, 12): 0.000338362036895497, (5, 1): 1.690964702126422e-07, (3, 17): 0.026379218449642396, (16, 7): 0.8333075743043711, (6, 14): 0.8449752307490433, (19, 18): 0.8253600402043768, (17, 6): 0.6993831698959584, (7, 15): 0.8953659788724108, (20, 21): 0.30099188607497335, (18, 5): 0.49021083624292, (21, 8): 0.8497099319149973, (17, 24): 0.109743778264475, (22, 7): 0.5558202666854252, (23, 6): 0.18245526045591115, (10, 9): 0.7318496921767857, (8, 25): 0.002029326739021919, (11, 4): 0.017078912587947075, (9, 20): 0.8307711272511814, (14, 19): 0.5536220125726609, (12, 7): 0.5992780595300742, (10, 19): 0.6954939510810676, (25, 25): 1.690964702126422e-07, (15, 10): 0.4988347562237647, (13, 6): 0.47431576804293163, (11, 26): 0.00016926556668285482, (0, 5): 1.690964702126422e-07, (24, 2): 1.690964702126422e-07, (1, 0): 1.690964702126422e-07, (25, 3): 1.690964702126422e-07, (4, 11): 0.12293330294106108, (2, 7): 0.000338362036895497, (5, 10): 0.19513749572185932, (3, 22): 0.0027057126198724883, (6, 1): 1.690964702126422e-07, (4, 17): 0.40228067173234605, (19, 27): 1.690964702126422e-07, (7, 4): 0.0025366161496598455, (5, 20): 0.39196578704937485, (16, 20): 0.7932317108639748, (6, 27): 0.00016926556668285482, (21, 17): 0.7707418803256935, (19, 1): 0.00016926556668285482, (17, 17): 0.5079659656152474, (18, 22): 0.38976753293661054, (8, 0): 0.00016926556668285482, (14, 26): 1.690964702126422e-07, (10, 26): 1.690964702126422e-07, (15, 19): 0.6199078288960166, (11, 19): 0.6087474618619821, (26, 25): 1.690964702126422e-07, (24, 9): 0.15759807933465275, (12, 20): 0.7426718662703948, (27, 20): 1.690964702126422e-07, (1, 25): 1.690964702126422e-07, (25, 4): 1.690964702126422e-07, (13, 25): 0.009976860839016102, (26, 3): 1.690964702126422e-07, (27, 10): 1.690964702126422e-07, (4, 24): 0.0030439055602977725, (20, 27): 1.690964702126422e-07, (16, 11): 0.20156316158993973, (6, 18): 0.8456516166298939, (21, 26): 0.0005074585071081392, (19, 6): 0.6708058664300218, (17, 10): 0.3777616835515129, (7, 27): 0.00016926556668285482, (22, 17): 0.5619077396130803, (20, 1): 0.00016926556668285482, (18, 17): 0.6669166476151311, (23, 20): 0.04193609370920548, (21, 4): 0.09824521829001533, (8, 7): 0.1618254910899688, (22, 11): 0.9220832211660082, (9, 6): 0.11650763707298069, (10, 5): 0.07440261599003278, (15, 24): 0.18245526045591115, (11, 8): 0.6780770146491655, (0, 19): 1.690964702126422e-07, (24, 16): 0.1053472700389463, (14, 7): 0.7673599509214406, (12, 19): 0.5492255043471321, (1, 18): 1.690964702126422e-07, (25, 13): 0.0027057126198724883, (15, 6): 0.6374938617981314, (13, 18): 0.30792484135369164, (2, 25): 1.690964702126422e-07, (26, 10): 1.690964702126422e-07, (0, 9): 1.690964702126422e-07, (3, 4): 0.000338362036895497, (27, 3): 1.690964702126422e-07, (4, 7): 0.006425834964550616, (5, 6): 0.007778606726251754, (16, 2): 1.690964702126422e-07, (6, 21): 0.40549350466638623, (19, 15): 0.588963174847103, (17, 3): 0.0023675196794472036, (7, 16): 0.9007770659192152, (20, 8): 0.8727070518639166, (18, 8): 0.8116632261171528, (16, 24): 0.15117241346657234, (21, 13): 0.873045244804342, (8, 14): 0.8160597343426815, (22, 2): 1.690964702126422e-07, (9, 15): 0.6143276453789994, (23, 11): 0.7641471179874003, (10, 12): 0.741657287449119, (8, 20): 0.8138614802299172, (11, 1): 0.00016926556668285482, (9, 17): 0.6229515653598441, (24, 23): 0.000338362036895497, (14, 14): 0.03906145371559056, (12, 10): 0.7768293532533486, (10, 22): 0.6496688076534416, (25, 22): 1.690964702126422e-07, (15, 15): 0.042950672530481335, (13, 11): 0.5206482008811956, (2, 16): 0.0006765549773207815, (26, 21): 1.690964702126422e-07, (0, 0): 1.690964702126422e-07, (3, 13): 0.02282819257517691, (27, 24): 1.690964702126422e-07, (1, 13): 0.00016926556668285482, (4, 14): 0.35645552830472, (2, 10): 1.690964702126422e-07, (5, 15): 0.7617797674044233, (3, 19): 0.016402526707096507, (6, 12): 0.6566017629321599, (4, 20): 0.16842025342826183, (19, 20): 0.6202460218364418, (17, 4): 0.1597963334474171, (7, 9): 0.33954588128345575, (20, 23): 0.05901483720068234, (18, 3): 0.002029326739021919, (21, 22): 0.0708515901155673, (22, 5): 0.18296254986654908, (23, 0): 1.690964702126422e-07, (8, 27): 1.690964702126422e-07, (11, 6): 0.2820530814111574, (9, 26): 1.690964702126422e-07, (14, 17): 0.17569140164740546, (12, 1): 0.00016926556668285482, (10, 17): 0.4180066434621218, (15, 20): 0.7621179603448487, (13, 4): 0.04700898781558474, (0, 7): 1.690964702126422e-07, (24, 4): 0.002029326739021919, (27, 17): 1.690964702126422e-07, (1, 6): 1.690964702126422e-07, (25, 1): 1.690964702126422e-07, (2, 5): 1.690964702126422e-07, (26, 6): 1.690964702126422e-07, (5, 8): 0.04954543486877437, (3, 24): 0.00016926556668285482, (6, 7): 0.048869048987923804, (4, 19): 0.2577031897005369, (7, 6): 0.033988559609211294, (5, 18): 0.6716513487810851, (16, 22): 0.586595824264126, (6, 25): 0.0011838443879587082, (19, 3): 0.003382098500723057, (17, 23): 0.2915224837430654, (20, 4): 0.1357846346772219, (18, 20): 0.7352316215810385, (23, 25): 0.000338362036895497, (8, 2): 0.000338362036895497, (9, 3): 0.0005074585071081392, (14, 24): 0.20037948629845123, (10, 24): 0.14170301113466438, (11, 21): 0.8189343743362965, (24, 11): 0.21881100155162922, (14, 2): 1.690964702126422e-07, (12, 22): 0.7117272122214813, (27, 22): 1.690964702126422e-07, (25, 10): 0.003213002030510415, (13, 23): 0.49579101975993717, (26, 1): 1.690964702126422e-07, (3, 1): 1.690964702126422e-07, (27, 12): 1.690964702126422e-07, (4, 26): 1.690964702126422e-07, (5, 27): 1.690964702126422e-07, (16, 13): 0.03483404196027451, (6, 16): 0.9016225482702784, (21, 24): 0.00439667732199891, (19, 8): 0.8387186613511756, (17, 8): 0.8086194896533253, (7, 21): 0.5570039419769137, (22, 23): 0.005242159673062121, (20, 3): 0.003382098500723057, (18, 15): 0.3740415612068348, (23, 22): 0.004565773792211552, (21, 2): 0.00016926556668285482, (8, 9): 0.4749921539237822, (22, 9): 0.8434533625171295, (9, 4): 0.005242159673062121, (23, 12): 0.7660071791597395, (10, 3): 0.0005074585071081392, (15, 26): 1.690964702126422e-07, (11, 10): 0.8300947413703308, (0, 21): 1.690964702126422e-07, (24, 18): 0.038723260775165275, (14, 5): 0.3315983471834616, (12, 13): 0.3104612884068813, (1, 16): 0.00016926556668285482, (25, 19): 1.690964702126422e-07, (15, 0): 1.690964702126422e-07, (13, 16): 0.06916062541344087, (2, 23): 1.690964702126422e-07, (26, 8): 1.690964702126422e-07, (0, 11): 1.690964702126422e-07, (3, 6): 0.000338362036895497, (27, 5): 1.690964702126422e-07, (1, 10): 1.690964702126422e-07, (4, 1): 1.690964702126422e-07, (5, 4): 0.0011838443879587082, (16, 4): 0.1386592746708368, (6, 11): 0.5074586762046095, (19, 17): 0.7950917720363139, (17, 1): 1.690964702126422e-07, (7, 18): 0.9055117670851692, (20, 10): 0.7857914661746186, (18, 6): 0.6993831698959584, (16, 26): 1.690964702126422e-07, (21, 11): 0.8997624870979394, (17, 27): 0.00016926556668285482, (22, 0): 1.690964702126422e-07, (9, 13): 0.7583978380001706, (23, 5): 0.07474080893045806, (10, 10): 0.8334766707745837, (8, 22): 0.476514022155696, (11, 3): 0.00016926556668285482, (9, 23): 0.3500298624366396, (24, 25): 0.00016926556668285482, (14, 12): 0.1922628557282444, (12, 4): 0.030606630204958452, (10, 20): 0.8092958755341758, (25, 20): 1.690964702126422e-07, (15, 9): 0.7056397392938262, (13, 9): 0.814876059051193, (11, 25): 0.005749449083700047, (26, 19): 1.690964702126422e-07, (0, 2): 1.690964702126422e-07, (3, 15): 0.030606630204958452, (27, 26): 1.690964702126422e-07, (1, 3): 1.690964702126422e-07, (4, 8): 0.015557044356033296, (2, 8): 0.00016926556668285482, (5, 13): 0.5904850430790168, (3, 21): 0.007271317315613827, (6, 2): 1.690964702126422e-07, (4, 22): 0.04734718075601003, (19, 22): 0.26818717085372074, (7, 11): 0.6740186993640621, (5, 23): 0.04667079487515945, (20, 17): 0.847511677802233, (18, 1): 1.690964702126422e-07, (16, 17): 0.35645552830472, (21, 20): 0.2972717637302952, (22, 27): 0.00016926556668285482, (18, 27): 1.690964702126422e-07, (23, 2): 1.690964702126422e-07, (9, 24): 0.10314901592618195, (14, 23): 0.47042654922804084, (12, 3): 0.00016926556668285482, (15, 22): 0.6496688076534416, (13, 2): 0.000338362036895497, (26, 26): 1.690964702126422e-07, (0, 25): 1.690964702126422e-07, (24, 6): 0.02874656903261939, (12, 25): 0.008454992607102323, (27, 19): 1.690964702126422e-07, (1, 4): 1.690964702126422e-07, (25, 7): 0.0005074585071081392, (2, 3): 0.00016926556668285482, (26, 4): 1.690964702126422e-07, (3, 26): 1.690964702126422e-07, (27, 9): 1.690964702126422e-07, (6, 5): 0.007271317315613827, (7, 0): 1.690964702126422e-07, (5, 16): 0.7783512214852623, (20, 24): 0.013189693773056305, (16, 8): 0.8192725672767217, (19, 5): 0.4526714198557134, (17, 21): 0.6823044264044815, (22, 18): 0.4092136270110644, (20, 6): 0.6122984877364477, (18, 18): 0.77564567796186, (23, 27): 1.690964702126422e-07, (21, 7): 0.7120654051619065, (8, 4): 0.0030439055602977725, (9, 1): 1.690964702126422e-07, (10, 6): 0.19243195219845705, (11, 23): 0.4810796268514373, (0, 16): 1.690964702126422e-07, (24, 13): 0.21001798510057182, (14, 0): 1.690964702126422e-07, (12, 16): 0.11329480413894048, (25, 8): 0.0016911337985966346, (15, 5): 0.4012660929110702, (13, 21): 0.8060830426001356, (2, 26): 1.690964702126422e-07, (26, 15): 1.690964702126422e-07, (3, 3): 0.00016926556668285482, (27, 14): 1.690964702126422e-07, (4, 4): 0.0006765549773207815, (5, 25): 0.000338362036895497, (16, 15): 0.09097407007087172, (6, 22): 0.23622793798353137, (19, 10): 0.5717153348854135, (17, 14): 0.1053472700389463, (7, 23): 0.17653688399846867, (22, 21): 0.06307315248578575, (20, 13): 0.6755405675959758, (18, 13): 0.17755146281974452, (23, 16): 0.43237984343019636, (21, 0): 1.690964702126422e-07, (8, 11): 0.776152967372498, (22, 15): 0.8091267790639632, (9, 10): 0.7638089250469751, (23, 14): 0.6596454993959875, (10, 1): 1.690964702126422e-07, (8, 17): 0.8133541908192793, (11, 12): 0.6236279512406947, (0, 23): 1.690964702126422e-07, (24, 20): 0.006256738494337974, (14, 11): 0.3857092176515071, (12, 15): 0.11566215472191747, (1, 22): 1.690964702126422e-07, (25, 17): 0.000338362036895497, (15, 2): 1.690964702126422e-07, (13, 14): 0.08387201832194074, (2, 21): 1.690964702126422e-07, (26, 22): 1.690964702126422e-07, (0, 13): 1.690964702126422e-07, (3, 8): 0.0011838443879587082, (27, 7): 1.690964702126422e-07, (1, 8): 1.690964702126422e-07, (4, 3): 0.000338362036895497, (2, 15): 0.000338362036895497, (5, 2): 1.690964702126422e-07, (16, 6): 0.6801061722917172, (6, 9): 0.20088677570908914, (19, 19): 0.7719255556171819, (17, 7): 0.840409626053302, (7, 12): 0.7934008073341875, (20, 20): 0.4579134104323053, (18, 4): 0.16791296401762393, (21, 9): 0.9159957482383531, (17, 25): 0.005242159673062121, (22, 6): 0.3593301682983349, (23, 7): 0.32703274248772024, (10, 8): 0.5646132831364825, (8, 24): 0.07000610776450408, (11, 5): 0.12107324176872203, (9, 21): 0.7604269956427222, (24, 27): 1.690964702126422e-07, (14, 18): 0.33836220599196726, (12, 6): 0.3780998764919382, (10, 18): 0.5411088737769253, (25, 26): 1.690964702126422e-07, (15, 11): 0.27309096848988734, (13, 7): 0.6932956969683033, (11, 27): 1.690964702126422e-07, (26, 17): 1.690964702126422e-07, (0, 4): 1.690964702126422e-07, (24, 1): 1.690964702126422e-07, (1, 1): 1.690964702126422e-07, (4, 10): 0.06797695012195237, (2, 6): 0.00016926556668285482, (5, 11): 0.3153650860430479, (3, 23): 0.0015220373283839925, (6, 0): 1.690964702126422e-07, (4, 16): 0.4273069493238171, (19, 24): 0.03483404196027451, (7, 5): 0.011667825541142525, (5, 21): 0.24738830501756576, (20, 19): 0.6359719935662176, (16, 19): 0.6983685910746825, (6, 26): 0.000338362036895497, (21, 18): 0.6285317488768614, (17, 18): 0.6630274288002403, (22, 25): 0.0005074585071081392, (18, 25): 0.00287480909008513, (14, 21): 0.7945844826256759, (15, 16): 0.1033181123963946, (13, 0): 1.690964702126422e-07, (11, 16): 0.21559816861758901, (26, 24): 1.690964702126422e-07, (0, 27): 1.690964702126422e-07, (24, 8): 0.10872919944319914, (12, 27): 1.690964702126422e-07, (27, 21): 1.690964702126422e-07, (1, 26): 1.690964702126422e-07, (25, 5): 1.690964702126422e-07, (13, 26): 0.00016926556668285482, (2, 1): 1.690964702126422e-07, (26, 2): 1.690964702126422e-07, (27, 11): 1.690964702126422e-07, (7, 2): 1.690964702126422e-07, (20, 26): 0.00016926556668285482, (16, 10): 0.4102282058323402, (21, 27): 0.00016926556668285482, (19, 7): 0.82620552255544, (17, 11): 0.17721326987931924, (7, 24): 0.04210519017941812, (22, 16): 0.70716160752574, (20, 0): 1.690964702126422e-07, (18, 16): 0.5176044644173681, (23, 21): 0.015218851415608012, (21, 5): 0.29304435197497913, (8, 6): 0.0661168889496133, (22, 10): 0.904666284734106, (9, 7): 0.2588868649920254, (10, 4): 0.008624089077314965, (15, 25): 0.010145957309228744, (11, 9): 0.8118323225873655, (0, 18): 1.690964702126422e-07, (24, 15): 0.1462686158304057, (14, 6): 0.5750972642896663, (12, 18): 0.3353184695281397, (1, 19): 1.690964702126422e-07, (25, 14): 0.0023675196794472036, (15, 7): 0.807774007302262, (13, 19): 0.5213245867620462, (2, 24): 1.690964702126422e-07, (26, 13): 1.690964702126422e-07, (0, 8): 1.690964702126422e-07, (3, 5): 0.000338362036895497, (27, 0): 1.690964702126422e-07, (4, 6): 0.003213002030510415, (5, 7): 0.020799034932625204, (16, 1): 1.690964702126422e-07, (6, 20): 0.5811847372173214, (19, 12): 0.3789453588430014, (17, 12): 0.09029768419002114, (7, 17): 0.9112610470723991, (20, 15): 0.7888352026384461, (18, 11): 0.24620462972607726, (23, 18): 0.19209375925803174, (21, 14): 0.8764271742085948, (8, 13): 0.8454825201596813, (22, 13): 0.90043887297879, (9, 8): 0.4289979140259435, (23, 8): 0.47972685508973617, (10, 15): 0.41631567875999537, (8, 19): 0.8644213248234972, (11, 14): 0.33836220599196726, (9, 18): 0.7088525722278664, (24, 22): 0.0006765549773207815, (14, 9): 0.7605960921129349, (12, 9): 0.8373658895894744, (1, 20): 1.690964702126422e-07, (25, 23): 1.690964702126422e-07, (15, 12): 0.11600034766234275, (13, 12): 0.3229744272026168, (2, 19): 0.00016926556668285482, (26, 20): 1.690964702126422e-07, (0, 15): 1.690964702126422e-07, (3, 10): 0.005411256143274762, (27, 25): 1.690964702126422e-07, (1, 14): 1.690964702126422e-07, (4, 13): 0.2800239237686057, (2, 13): 0.0006765549773207815, (5, 0): 1.690964702126422e-07, (3, 16): 0.030944823145383736, (6, 15): 0.8901239882958188, (19, 21): 0.44218743870252963, (17, 5): 0.49021083624292, (7, 14): 0.8850510941894395, (20, 22): 0.15337066757933668, (18, 2): 1.690964702126422e-07, (21, 23): 0.0219827102241137, (22, 4): 0.052927364273027216, (23, 1): 1.690964702126422e-07, (8, 26): 1.690964702126422e-07, (11, 7): 0.4913945115344085, (9, 27): 1.690964702126422e-07, (14, 16): 0.06949881835386615, (12, 0): 1.690964702126422e-07, (10, 16): 0.3780998764919382, (25, 24): 1.690964702126422e-07, (15, 21): 0.7776748356044118, (13, 5): 0.250093848540968, (0, 6): 1.690964702126422e-07, (24, 3): 1.690964702126422e-07, (1, 7): 1.690964702126422e-07, (25, 2): 1.690964702126422e-07, (2, 4): 0.00016926556668285482, (5, 9): 0.10196534063469345, (3, 25): 0.00016926556668285482, (6, 6): 0.01674071964752179, (4, 18): 0.3402222671643063, (19, 26): 1.690964702126422e-07, (7, 7): 0.09114316654108436, (5, 19): 0.5390797161343736, (16, 21): 0.7425027698001821, (6, 24): 0.018769877290073497, (21, 16): 0.8537682472001007, (19, 0): 1.690964702126422e-07, (17, 16): 0.33599485540899027, (18, 23): 0.2054523804048305, (8, 1): 1.690964702126422e-07, (14, 27): 1.690964702126422e-07, (10, 27): 1.690964702126422e-07, (15, 18): 0.4240941163897769, (11, 18): 0.4129337493557425, (24, 10): 0.19327743454952026, (12, 21): 0.8121705155277907, (27, 23): 1.690964702126422e-07, (1, 24): 1.690964702126422e-07, (25, 11): 0.003382098500723057, (13, 24): 0.2098488886303592, (26, 0): 1.690964702126422e-07, (27, 13): 1.690964702126422e-07, (4, 25): 0.000338362036895497, (16, 12): 0.0793064136261994, (6, 19): 0.7431791556810328, (21, 25): 0.000338362036895497, (19, 9): 0.7320187886469983, (17, 9): 0.6188932500747407, (7, 26): 0.00016926556668285482, (22, 22): 0.024857350217728617, (20, 2): 0.00016926556668285482, (18, 14): 0.25364487441543354, (23, 23): 0.0018602302688092767, (21, 3): 0.0025366161496598455, (8, 8): 0.3026828507770998, (22, 8): 0.7242403510172168, (9, 5): 0.03956874312622849, (23, 13): 0.7343861392299753, (10, 2): 0.00016926556668285482, (15, 27): 1.690964702126422e-07, (11, 11): 0.7541704262448544, (0, 20): 1.690964702126422e-07, (24, 17): 0.06831514306237765, (14, 4): 0.0760935806921592, (12, 12): 0.47211751393016727, (1, 17): 0.00016926556668285482, (25, 12): 0.003213002030510415, (15, 1): 1.690964702126422e-07, (13, 17): 0.15438524640061255, (2, 22): 1.690964702126422e-07, (26, 11): 1.690964702126422e-07, (0, 10): 1.690964702126422e-07, (3, 7): 0.0005074585071081392, (27, 2): 1.690964702126422e-07, (1, 11): 0.00016926556668285482, (4, 0): 1.690964702126422e-07, (5, 5): 0.0030439055602977725, (16, 3): 0.0015220373283839925, (6, 10): 0.34732431891323734, (19, 14): 0.474146671572719, (17, 2): 1.690964702126422e-07, (7, 19): 0.8537682472001007, (20, 9): 0.856981080134141, (18, 9): 0.6398612123811084, (16, 25): 0.00828589613688968, (21, 12): 0.879640007142635, (8, 15): 0.792217132042699, (22, 3): 0.0016911337985966346, (9, 14): 0.6794297864108666, (23, 10): 0.7196747463214754, (10, 13): 0.6288699418172866, (8, 21): 0.6860245487491596, (11, 0): 1.690964702126422e-07, (9, 16): 0.6016454101130512, (24, 24): 0.000338362036895497, (14, 15): 0.03703229607303886, (12, 11): 0.6417212735534474, (10, 23): 0.4273069493238171, (25, 21): 1.690964702126422e-07, (15, 14): 0.02891566550283203, (13, 10): 0.6983685910746825, (2, 17): 0.0005074585071081392, (26, 18): 1.690964702126422e-07, (0, 1): 1.690964702126422e-07, (3, 12): 0.01589523729645858, (27, 27): 1.690964702126422e-07, (1, 12): 0.00016926556668285482, (4, 15): 0.4088754340706391, (2, 11): 0.00016926556668285482, (5, 14): 0.692957504027878, (3, 18): 0.022151806694326343, (6, 13): 0.7665144685703773, (4, 21): 0.09739973593895211, (19, 23): 0.12174962764957259, (7, 8): 0.19293924160909495, (20, 16): 0.8343221531256468, (18, 0): 1.690964702126422e-07, (16, 16): 0.19141737337718118, (21, 21): 0.16791296401762393, (22, 26): 0.0005074585071081392, (18, 26): 1.690964702126422e-07, (23, 3): 0.00016926556668285482, (9, 25): 0.0025366161496598455, (14, 22): 0.6828117158151195, (12, 2): 0.00016926556668285482, (15, 23): 0.42950520343658144, (13, 3): 0.0005074585071081392, (0, 24): 1.690964702126422e-07, (24, 5): 0.011160536130504599, (12, 24): 0.20342322276227878, (27, 16): 1.690964702126422e-07, (1, 5): 1.690964702126422e-07, (25, 0): 1.690964702126422e-07, (2, 2): 1.690964702126422e-07, (26, 7): 1.690964702126422e-07, (3, 27): 1.690964702126422e-07, (6, 4): 0.0025366161496598455, (7, 1): 1.690964702126422e-07, (5, 17): 0.7514648827214522, (16, 23): 0.3691377635706682, (19, 2): 0.00016926556668285482, (17, 22): 0.49325457270674755, (20, 5): 0.3904439188174611, (18, 21): 0.5771264219322181, (23, 24): 0.0008456514475334237, (8, 3): 0.0005074585071081392, (9, 2): 0.000338362036895497, (14, 25): 0.00980776436880346, (10, 25): 0.004058484381573625, (11, 20): 0.7771675461937738, (24, 12): 0.22574395683034754, (14, 3): 0.000338362036895497, (12, 23): 0.5008639138663165, (25, 9): 0.0025366161496598455, (13, 22): 0.7081761863470158, (26, 14): 1.690964702126422e-07, (3, 0): 1.690964702126422e-07, (27, 15): 1.690964702126422e-07, (4, 27): 1.690964702126422e-07, (5, 26): 1.690964702126422e-07, (16, 14): 0.04159790076878019, (6, 17): 0.8908003741766694, (19, 11): 0.4388055092982768, (17, 15): 0.1986885215963248, (7, 20): 0.7337097533491248, (22, 20): 0.14237939701551494, (20, 12): 0.6748641817151253, (18, 12): 0.1711257969516641, (23, 17): 0.30302104371752503, (21, 1): 1.690964702126422e-07, (8, 10): 0.651359772355568, (22, 14): 0.8705087977511523, (9, 11): 0.8365204072384113, (23, 15): 0.5504091796386206, (10, 0): 1.690964702126422e-07, (8, 16): 0.7854532732341932, (11, 13): 0.4739775751025063, (0, 22): 1.690964702126422e-07, (24, 19): 0.017078912587947075, (14, 10): 0.5920069113109306, (12, 14): 0.18008790987293416, (1, 23): 1.690964702126422e-07, (25, 18): 0.00016926556668285482, (15, 3): 0.0008456514475334237, (13, 15): 0.05630929367728006, (2, 20): 1.690964702126422e-07, (26, 9): 1.690964702126422e-07, (0, 12): 1.690964702126422e-07, (3, 9): 0.00287480909008513, (27, 4): 1.690964702126422e-07, (1, 9): 1.690964702126422e-07, (4, 2): 1.690964702126422e-07, (2, 14): 0.000338362036895497, (5, 3): 0.0005074585071081392, (16, 5): 0.46281720806847193, (6, 8): 0.10128895475384289, (19, 16): 0.700059555776809, (17, 0): 1.690964702126422e-07, (7, 13): 0.856473790723503, (20, 11): 0.7107126334002054, (18, 7): 0.8395641437022388, (16, 27): 0.00016926556668285482, (21, 10): 0.9208995458745197, (17, 26): 1.690964702126422e-07, (22, 1): 1.690964702126422e-07, (9, 12): 0.8255291366745895, (23, 4): 0.016233430236883863, (10, 11): 0.82620552255544, (8, 23): 0.26091602263457714, (11, 2): 1.690964702126422e-07, (9, 22): 0.5796628689854078, (24, 26): 1.690964702126422e-07, (14, 13): 0.0845484042027913, (12, 5): 0.18651357574101457, (10, 21): 0.7969518332086529, (25, 27): 1.690964702126422e-07, (15, 8): 0.8365204072384113, (13, 8): 0.8233308825618252, (11, 24): 0.17856604164102038, (26, 16): 1.690964702126422e-07, (0, 3): 1.690964702126422e-07, (24, 0): 1.690964702126422e-07, (3, 14): 0.027901086681556178, (1, 2): 1.690964702126422e-07, (4, 9): 0.033312173728360726, (2, 9): 1.690964702126422e-07, (5, 12): 0.4516568410344376, (3, 20): 0.011836922011355168, (6, 3): 1.690964702126422e-07, (4, 23): 0.016909816117734434, (19, 25): 0.0013529408581713501, (7, 10): 0.5106715091386497, (5, 22): 0.125807942934676, (20, 18): 0.782747729710791, (16, 18): 0.5368814620216092, (21, 19): 0.4618026292471961, (17, 19): 0.7766602567831359, (22, 24): 0.0023675196794472036, (18, 24): 0.07068249364535464, (14, 20): 0.736415296872527, (15, 17): 0.238764385036721, (13, 1): 0.00016926556668285482, (11, 17): 0.26260698733670357, (26, 27): 1.690964702126422e-07, (0, 26): 1.690964702126422e-07, (24, 7): 0.06493321365812481, (12, 26): 0.00016926556668285482, (27, 18): 1.690964702126422e-07, (1, 27): 1.690964702126422e-07, (25, 6): 0.00016926556668285482, (13, 27): 1.690964702126422e-07, (2, 0): 1.690964702126422e-07, (26, 5): 1.690964702126422e-07, (27, 8): 1.690964702126422e-07}, 1: {(7, 3): 0.00044559706740409196, (20, 25): 0.0013364942364892474, (16, 9): 0.029251272201157457, (19, 4): 0.0005940799289182846, (17, 20): 0.003860702882230522, (7, 25): 0.00014863134437570678, (22, 19): 0.020639266233334286, (20, 7): 0.06815378191787591, (18, 19): 0.010542431650369189, (23, 26): 0.0005940799289182846, (21, 6): 0.04350562690651994, (8, 5): 0.0010395285134608624, (9, 0): 1.484828615141926e-07, (10, 7): 0.004009185743744714, (11, 22): 0.0025243571286027887, (0, 17): 1.484828615141926e-07, (24, 14): 0.14506790418222767, (14, 1): 1.484828615141926e-07, (12, 17): 0.352646944579069, (25, 15): 0.003415254297687944, (15, 4): 0.0007425627904324772, (13, 20): 0.0026728399901169812, (2, 27): 1.484828615141926e-07, (26, 12): 1.484828615141926e-07, (3, 2): 0.00014863134437570678, (27, 1): 1.484828615141926e-07, (4, 5): 0.0002971142058898994, (5, 24): 0.005939462943429218, (16, 0): 1.484828615141926e-07, (6, 23): 0.019302920479706553, (19, 13): 0.864615851080005, (17, 13): 0.9578630881109179, (7, 22): 0.036526932415352896, (20, 14): 0.6384764529938897, (18, 10): 0.29770828581881764, (23, 19): 0.01291815743459627, (21, 15): 0.40921891481597633, (8, 12): 0.20178835728064926, (22, 12): 0.6089283635525654, (9, 9): 0.011878777403996923, (23, 9): 0.3066172575096692, (10, 14): 0.8490251506210148, (8, 18): 0.40461594610903634, (11, 15): 0.9606842624796876, (9, 19): 0.18916731405194287, (24, 21): 0.0007425627904324772, (14, 8): 0.005197048635858254, (12, 8): 0.006533394389485988, (1, 21): 1.484828615141926e-07, (25, 16): 0.0013364942364892474, (15, 13): 0.9704841313396243, (13, 13): 0.9197029927017705, (2, 18): 0.00044559706740409196, (26, 23): 1.484828615141926e-07, (0, 14): 1.484828615141926e-07, (3, 11): 0.001930425682546018, (27, 6): 1.484828615141926e-07, (1, 15): 0.0002971142058898994, (4, 12): 0.10720477449610856, (2, 12): 0.0007425627904324772, (5, 1): 1.484828615141926e-07, (3, 17): 0.009057603035227261, (16, 7): 0.002821322851631174, (6, 14): 0.5066236719692867, (19, 18): 0.0283603750320723, (17, 6): 0.001930425682546018, (7, 15): 0.6992059433531944, (20, 21): 0.008018223004627914, (18, 5): 0.0008910456519466697, (21, 8): 0.19777932001976606, (17, 24): 0.0010395285134608624, (22, 7): 0.11715312621755947, (23, 6): 0.04899949278254507, (10, 9): 0.010245465927340804, (8, 25): 1.484828615141926e-07, (11, 4): 0.0008910456519466697, (9, 20): 0.0764688221626707, (14, 19): 0.0026728399901169812, (12, 7): 0.0020789085440602105, (10, 19): 0.12576513218538263, (25, 25): 1.484828615141926e-07, (15, 10): 0.05493880724311277, (13, 6): 0.0008910456519466697, (11, 26): 0.0002971142058898994, (0, 5): 1.484828615141926e-07, (24, 2): 1.484828615141926e-07, (1, 0): 1.484828615141926e-07, (25, 3): 1.484828615141926e-07, (4, 11): 0.0463268012752896, (2, 7): 1.484828615141926e-07, (5, 10): 0.02895430647812907, (3, 22): 0.0008910456519466697, (6, 1): 1.484828615141926e-07, (4, 17): 0.28152365391377066, (19, 27): 0.00044559706740409196, (7, 4): 0.0008910456519466697, (5, 20): 0.19763083715825186, (16, 20): 0.0017819428210318253, (6, 27): 1.484828615141926e-07, (21, 17): 0.12413182070872653, (19, 1): 0.00014863134437570678, (17, 17): 0.05211763287434311, (18, 22): 0.0037122200207163293, (8, 0): 1.484828615141926e-07, (14, 26): 0.00014863134437570678, (10, 26): 0.00044559706740409196, (15, 19): 0.001930425682546018, (11, 19): 0.06533260754910625, (26, 25): 1.484828615141926e-07, (24, 9): 0.08908986539137706, (12, 20): 0.004900082912829869, (27, 20): 1.484828615141926e-07, (1, 25): 1.484828615141926e-07, (25, 4): 1.484828615141926e-07, (13, 25): 0.0002971142058898994, (26, 3): 1.484828615141926e-07, (27, 10): 1.484828615141926e-07, (4, 24): 0.0029698057131453664, (20, 27): 0.00014863134437570678, (16, 11): 0.36883157648411596, (6, 18): 0.44351845582575483, (21, 26): 0.00044559706740409196, (19, 6): 0.013215123157624655, (17, 10): 0.21723057487812528, (7, 27): 1.484828615141926e-07, (22, 17): 0.1288832722771807, (20, 1): 1.484828615141926e-07, (18, 17): 0.06310536462639336, (23, 20): 0.005197048635858254, (21, 4): 0.003118288574659559, (8, 7): 0.004454634328287292, (22, 11): 0.5450807331014625, (9, 6): 0.0026728399901169812, (10, 5): 0.00148497709800344, (15, 24): 0.00014863134437570678, (11, 8): 0.004900082912829869, (0, 19): 1.484828615141926e-07, (24, 16): 0.06355081321093595, (14, 7): 0.002227391405574403, (12, 19): 0.026430097832387797, (1, 18): 1.484828615141926e-07, (25, 13): 0.006681877251000181, (15, 6): 0.0005940799289182846, (13, 18): 0.04766314702891733, (2, 25): 1.484828615141926e-07, (26, 10): 0.00014863134437570678, (0, 9): 1.484828615141926e-07, (3, 4): 1.484828615141926e-07, (27, 3): 1.484828615141926e-07, (4, 7): 0.0013364942364892474, (5, 6): 0.001930425682546018, (16, 2): 0.00044559706740409196, (6, 21): 0.11269864037213369, (19, 15): 0.46089095062291535, (17, 3): 1.484828615141926e-07, (7, 16): 0.6983150461841093, (20, 8): 0.1526405301194515, (18, 8): 0.05078128712071538, (16, 24): 0.0005940799289182846, (21, 13): 0.6980180804610809, (8, 14): 0.6310523099181801, (22, 2): 0.00014863134437570678, (9, 15): 0.8742672370784276, (23, 11): 0.46965143945225274, (10, 12): 0.2204971978314375, (8, 20): 0.12710147793901036, (11, 1): 0.00014863134437570678, (9, 17): 0.5566623962995696, (24, 23): 0.0002971142058898994, (14, 14): 0.9951322863509803, (12, 10): 0.019896851925763324, (10, 22): 0.003860702882230522, (25, 22): 1.484828615141926e-07, (15, 15): 0.9225241670705401, (13, 11): 0.07142040487118814, (2, 16): 0.001930425682546018, (26, 21): 1.484828615141926e-07, (0, 0): 1.484828615141926e-07, (3, 13): 0.00890912017371307, (27, 24): 1.484828615141926e-07, (1, 13): 1.484828615141926e-07, (4, 14): 0.2628148133629824, (2, 10): 0.00014863134437570678, (5, 15): 0.5251840296585607, (3, 19): 0.005048565774344062, (6, 12): 0.19570055995856736, (4, 20): 0.12131064633995686, (19, 20): 0.010393948788854997, (17, 4): 0.0002971142058898994, (7, 9): 0.010690914511883382, (20, 23): 0.001930425682546018, (18, 3): 1.484828615141926e-07, (21, 22): 0.0037122200207163293, (22, 5): 0.021975611986962018, (23, 0): 1.484828615141926e-07, (8, 27): 1.484828615141926e-07, (11, 6): 0.0016334599595176325, (9, 26): 0.0002971142058898994, (14, 17): 0.12576513218538263, (12, 1): 1.484828615141926e-07, (10, 17): 0.5118205721222834, (15, 20): 0.0011880113749750548, (13, 4): 0.00044559706740409196, (0, 7): 1.484828615141926e-07, (24, 4): 0.0016334599595176325, (27, 17): 1.484828615141926e-07, (1, 6): 1.484828615141926e-07, (25, 1): 1.484828615141926e-07, (2, 5): 1.484828615141926e-07, (26, 6): 1.484828615141926e-07, (5, 8): 0.003860702882230522, (3, 24): 1.484828615141926e-07, (6, 7): 0.0035637371592021367, (4, 19): 0.18575220823711644, (7, 6): 0.002375874267088596, (5, 18): 0.408179534785377, (16, 22): 0.0011880113749750548, (6, 25): 0.00014863134437570678, (19, 3): 0.00044559706740409196, (17, 23): 0.0016334599595176325, (20, 4): 0.001930425682546018, (18, 20): 0.007424291558571144, (23, 25): 0.0002971142058898994, (8, 2): 0.00014863134437570678, (9, 3): 0.00014863134437570678, (14, 24): 0.00014863134437570678, (10, 24): 0.00014863134437570678, (11, 21): 0.003860702882230522, (24, 11): 0.13883162399863158, (14, 2): 0.00014863134437570678, (12, 22): 0.0017819428210318253, (27, 22): 1.484828615141926e-07, (25, 10): 0.002375874267088596, (13, 23): 0.00044559706740409196, (26, 1): 1.484828615141926e-07, (3, 1): 1.484828615141926e-07, (27, 12): 1.484828615141926e-07, (4, 26): 1.484828615141926e-07, (5, 27): 1.484828615141926e-07, (16, 13): 0.9684053712784256, (6, 16): 0.6298644470260665, (21, 24): 0.0013364942364892474, (19, 8): 0.0984442856667712, (17, 8): 0.021381680540905248, (7, 21): 0.08715958819169256, (22, 23): 0.0005940799289182846, (20, 3): 0.00044559706740409196, (18, 15): 0.53542934710304, (23, 22): 0.0007425627904324772, (21, 2): 0.00014863134437570678, (8, 9): 0.009800017342798227, (22, 9): 0.3317108611055678, (9, 4): 0.00044559706740409196, (23, 12): 0.5100387777841131, (10, 3): 1.484828615141926e-07, (15, 26): 0.00014863134437570678, (11, 10): 0.02331195774058975, (0, 21): 1.484828615141926e-07, (24, 18): 0.007721257281599529, (14, 5): 0.0007425627904324772, (12, 13): 0.8098256751812679, (1, 16): 0.0002971142058898994, (25, 19): 1.484828615141926e-07, (15, 0): 1.484828615141926e-07, (13, 16): 0.6940090432001977, (2, 23): 1.484828615141926e-07, (26, 8): 1.484828615141926e-07, (0, 11): 1.484828615141926e-07, (3, 6): 1.484828615141926e-07, (27, 5): 1.484828615141926e-07, (1, 10): 1.484828615141926e-07, (4, 1): 1.484828615141926e-07, (5, 4): 0.0007425627904324772, (16, 4): 0.00044559706740409196, (6, 11): 0.08508082813049386, (19, 17): 0.08285358520778098, (17, 1): 0.0002971142058898994, (7, 18): 0.4360943127500452, (20, 10): 0.42629444389010845, (18, 6): 0.0043061514667731, (16, 26): 0.00014863134437570678, (21, 11): 0.5784893769421559, (17, 27): 0.00014863134437570678, (22, 0): 0.00014863134437570678, (9, 13): 0.45495163616234763, (23, 5): 0.021233197679391056, (10, 10): 0.02256954343301879, (8, 22): 0.020639266233334286, (11, 3): 0.00044559706740409196, (9, 23): 0.0025243571286027887, (24, 25): 0.00014863134437570678, (14, 12): 0.5623047450371089, (12, 4): 0.0008910456519466697, (10, 20): 0.039199623922608354, (25, 20): 1.484828615141926e-07, (15, 9): 0.010987880234911767, (13, 9): 0.010542431650369189, (11, 25): 0.00044559706740409196, (26, 19): 1.484828615141926e-07, (0, 2): 1.484828615141926e-07, (3, 15): 0.01351208888065304, (27, 26): 1.484828615141926e-07, (1, 3): 1.484828615141926e-07, (4, 8): 0.0025243571286027887, (2, 8): 1.484828615141926e-07, (5, 13): 0.3190898178768614, (3, 21): 0.002227391405574403, (6, 2): 0.00014863134437570678, (4, 22): 0.032814860877498075, (19, 22): 0.0043061514667731, (7, 11): 0.08329903379232356, (5, 23): 0.021084714817876864, (20, 17): 0.10364118581976794, (18, 1): 0.0002971142058898994, (16, 17): 0.05107825284374376, (21, 20): 0.011581811680968538, (22, 27): 1.484828615141926e-07, (18, 27): 0.0002971142058898994, (23, 2): 1.484828615141926e-07, (9, 24): 0.0007425627904324772, (14, 23): 0.00014863134437570678, (12, 3): 0.00044559706740409196, (15, 22): 0.0010395285134608624, (13, 2): 0.00014863134437570678, (26, 26): 1.484828615141926e-07, (0, 25): 1.484828615141926e-07, (24, 6): 0.015887814664880123, (12, 25): 0.0002971142058898994, (27, 19): 1.484828615141926e-07, (1, 4): 1.484828615141926e-07, (25, 7): 0.0008910456519466697, (2, 3): 1.484828615141926e-07, (26, 4): 1.484828615141926e-07, (3, 26): 1.484828615141926e-07, (27, 9): 1.484828615141926e-07, (6, 5): 0.0010395285134608624, (7, 0): 1.484828615141926e-07, (5, 16): 0.5434474216248064, (20, 24): 0.0017819428210318253, (16, 8): 0.008463671589170491, (19, 5): 0.0026728399901169812, (17, 21): 0.002821322851631174, (22, 18): 0.05107825284374376, (20, 6): 0.029251272201157457, (18, 18): 0.019005954756678167, (23, 27): 1.484828615141926e-07, (21, 7): 0.09978063142039893, (8, 4): 0.00044559706740409196, (9, 1): 0.00014863134437570678, (10, 6): 0.002227391405574403, (11, 23): 0.0007425627904324772, (0, 16): 1.484828615141926e-07, (24, 13): 0.15917377602607596, (14, 0): 1.484828615141926e-07, (12, 16): 0.7486507362374206, (25, 8): 0.00148497709800344, (15, 5): 0.0007425627904324772, (13, 21): 0.0016334599595176325, (2, 26): 1.484828615141926e-07, (26, 15): 0.00014863134437570678, (3, 3): 0.00014863134437570678, (27, 14): 1.484828615141926e-07, (4, 4): 0.0002971142058898994, (5, 25): 0.0002971142058898994, (16, 15): 0.819180095456662, (6, 22): 0.050632804259201185, (19, 10): 0.37224668229894237, (17, 14): 0.9421239047904135, (7, 23): 0.013809054603681425, (22, 21): 0.005048565774344062, (20, 13): 0.780574551462972, (18, 13): 0.9254938243008239, (23, 16): 0.22302140647717877, (21, 0): 0.00014863134437570678, (8, 11): 0.08003241083901132, (22, 15): 0.3918464200188158, (9, 10): 0.022866509156047175, (23, 14): 0.45628798191597536, (10, 1): 0.00014863134437570678, (8, 17): 0.5826468970645533, (11, 12): 0.24054238413585352, (0, 23): 1.484828615141926e-07, (24, 20): 0.0011880113749750548, (14, 11): 0.13007113516929422, (12, 15): 0.9701871656165959, (1, 22): 1.484828615141926e-07, (25, 17): 0.0002971142058898994, (15, 2): 0.0002971142058898994, (13, 14): 0.9937959405973525, (2, 21): 1.484828615141926e-07, (26, 22): 1.484828615141926e-07, (0, 13): 1.484828615141926e-07, (3, 8): 0.0002971142058898994, (27, 7): 1.484828615141926e-07, (1, 8): 1.484828615141926e-07, (4, 3): 0.00014863134437570678, (2, 15): 0.0020789085440602105, (5, 2): 0.0002971142058898994, (16, 6): 0.0013364942364892474, (6, 9): 0.0123242259885395, (19, 19): 0.01514540035730916, (17, 7): 0.00549401435888664, (7, 12): 0.20149139155762086, (20, 20): 0.012621191711567885, (18, 4): 1.484828615141926e-07, (21, 9): 0.3186443692923189, (17, 25): 0.00044559706740409196, (22, 6): 0.05360246148948504, (23, 7): 0.10868960311125049, (10, 8): 0.005048565774344062, (8, 24): 0.0016334599595176325, (11, 5): 0.0011880113749750548, (9, 21): 0.027469477862987146, (24, 27): 1.484828615141926e-07, (14, 18): 0.018115057587593013, (12, 6): 0.0011880113749750548, (10, 18): 0.28983869415856545, (25, 26): 1.484828615141926e-07, (15, 11): 0.24366052422765155, (13, 7): 0.002375874267088596, (11, 27): 0.0002971142058898994, (26, 17): 1.484828615141926e-07, (0, 4): 1.484828615141926e-07, (24, 1): 1.484828615141926e-07, (1, 1): 1.484828615141926e-07, (4, 10): 0.017372643280022047, (2, 6): 1.484828615141926e-07, (5, 11): 0.07899303080841197, (3, 23): 0.0002971142058898994, (6, 0): 1.484828615141926e-07, (4, 16): 0.30854753470935375, (19, 24): 0.00148497709800344, (7, 5): 0.0013364942364892474, (5, 21): 0.11611374618696012, (20, 19): 0.02093623195636267, (16, 19): 0.003118288574659559, (6, 26): 1.484828615141926e-07, (21, 18): 0.05078128712071538, (17, 18): 0.012769674573082078, (22, 25): 0.0005940799289182846, (18, 25): 0.0007425627904324772, (14, 21): 0.0016334599595176325, (15, 16): 0.44025183287244257, (13, 0): 0.00014863134437570678, (11, 16): 0.7743382712793759, (26, 24): 1.484828615141926e-07, (0, 27): 1.484828615141926e-07, (24, 8): 0.05553273868916954, (12, 27): 0.00014863134437570678, (27, 21): 1.484828615141926e-07, (1, 26): 1.484828615141926e-07, (25, 5): 0.00014863134437570678, (13, 26): 0.00014863134437570678, (2, 1): 1.484828615141926e-07, (26, 2): 1.484828615141926e-07, (27, 11): 1.484828615141926e-07, (7, 2): 0.00014863134437570678, (20, 26): 0.0002971142058898994, (16, 10): 0.12828934083112392, (21, 27): 1.484828615141926e-07, (19, 7): 0.03682389813838128, (17, 11): 0.45495163616234763, (7, 24): 0.003415254297687944, (22, 16): 0.2528664616415315, (20, 0): 1.484828615141926e-07, (18, 16): 0.23445458681377163, (23, 21): 0.002375874267088596, (21, 5): 0.018115057587593013, (8, 6): 0.003118288574659559, (22, 10): 0.44812142453269477, (9, 7): 0.003860702882230522, (10, 4): 0.0007425627904324772, (15, 25): 0.0002971142058898994, (11, 9): 0.010690914511883382, (0, 18): 1.484828615141926e-07, (24, 15): 0.10779870594216534, (14, 6): 0.0008910456519466697, (12, 18): 0.11418346898727562, (1, 19): 1.484828615141926e-07, (25, 14): 0.006533394389485988, (15, 7): 0.002821322851631174, (13, 19): 0.006681877251000181, (2, 24): 1.484828615141926e-07, (26, 13): 1.484828615141926e-07, (0, 8): 1.484828615141926e-07, (3, 5): 1.484828615141926e-07, (27, 0): 1.484828615141926e-07, (4, 6): 0.0005940799289182846, (5, 7): 0.003118288574659559, (16, 1): 0.0002971142058898994, (6, 20): 0.198521734327337, (19, 12): 0.7510264620216477, (17, 12): 0.7447901818380516, (7, 17): 0.5827953799260674, (20, 15): 0.42718534105919365, (18, 11): 0.5155326436601382, (23, 18): 0.03905114106109416, (21, 14): 0.5781924112191275, (8, 13): 0.41055526056960406, (22, 13): 0.6118980207828493, (9, 8): 0.005345531497372448, (23, 8): 0.20149139155762086, (10, 15): 0.9360361074683317, (8, 19): 0.2487089415191341, (11, 14): 0.9427178362364703, (9, 18): 0.35428025605572505, (24, 22): 0.00044559706740409196, (14, 9): 0.008760637312198876, (12, 9): 0.012621191711567885, (1, 20): 1.484828615141926e-07, (25, 23): 1.484828615141926e-07, (15, 12): 0.672627511142154, (13, 12): 0.41307946921534533, (2, 19): 0.00014863134437570678, (26, 20): 1.484828615141926e-07, (0, 15): 1.484828615141926e-07, (3, 10): 0.0013364942364892474, (27, 25): 1.484828615141926e-07, (1, 14): 0.0002971142058898994, (4, 13): 0.19377028275888283, (2, 13): 0.0010395285134608624, (5, 0): 1.484828615141926e-07, (3, 16): 0.0123242259885395, (6, 15): 0.6162040237667608, (19, 21): 0.006978842974028566, (17, 5): 0.00148497709800344, (7, 14): 0.5609683992834812, (20, 22): 0.004454634328287292, (18, 2): 0.00014863134437570678, (21, 23): 0.00148497709800344, (22, 4): 0.005345531497372448, (23, 1): 1.484828615141926e-07, (8, 26): 1.484828615141926e-07, (11, 7): 0.0026728399901169812, (9, 27): 1.484828615141926e-07, (14, 16): 0.5811620684494113, (12, 0): 1.484828615141926e-07, (10, 16): 0.7820593800781139, (25, 24): 1.484828615141926e-07, (15, 21): 0.0017819428210318253, (13, 5): 0.0010395285134608624, (0, 6): 1.484828615141926e-07, (24, 3): 0.00014863134437570678, (1, 7): 1.484828615141926e-07, (25, 2): 1.484828615141926e-07, (2, 4): 1.484828615141926e-07, (5, 9): 0.009503051619769842, (3, 25): 1.484828615141926e-07, (6, 6): 0.001930425682546018, (4, 18): 0.24039390127433932, (19, 26): 0.0002971142058898994, (7, 7): 0.0037122200207163293, (5, 19): 0.30260822024878603, (16, 21): 0.0017819428210318253, (6, 24): 0.00549401435888664, (21, 16): 0.2589542589636134, (19, 0): 0.00014863134437570678, (17, 16): 0.25761791320998567, (18, 23): 0.0020789085440602105, (8, 1): 1.484828615141926e-07, (14, 27): 1.484828615141926e-07, (10, 27): 0.00014863134437570678, (15, 18): 0.008463671589170491, (11, 18): 0.20579739454153245, (24, 10): 0.11730160907907367, (12, 21): 0.002375874267088596, (27, 23): 1.484828615141926e-07, (1, 24): 1.484828615141926e-07, (25, 11): 0.0037122200207163293, (13, 24): 0.00014863134437570678, (26, 0): 1.484828615141926e-07, (27, 13): 1.484828615141926e-07, (4, 25): 1.484828615141926e-07, (16, 12): 0.724299546949093, (6, 19): 0.3192383007383756, (21, 25): 0.0008910456519466697, (19, 9): 0.20965794894090145, (17, 9): 0.07587489071661392, (7, 26): 0.00014863134437570678, (22, 22): 0.0016334599595176325, (20, 2): 0.00014863134437570678, (18, 14): 0.8470948734213303, (23, 23): 0.0005940799289182846, (21, 3): 0.0002971142058898994, (8, 8): 0.006384911527971796, (22, 8): 0.21604271198601174, (9, 5): 0.0011880113749750548, (23, 13): 0.5085539491689712, (10, 2): 1.484828615141926e-07, (15, 27): 1.484828615141926e-07, (11, 11): 0.05894784450399597, (0, 20): 1.484828615141926e-07, (24, 17): 0.02553920066330264, (14, 4): 0.0007425627904324772, (12, 12): 0.30364760027938537, (1, 17): 1.484828615141926e-07, (25, 12): 0.00549401435888664, (15, 1): 1.484828615141926e-07, (13, 17): 0.23623638115194193, (2, 22): 1.484828615141926e-07, (26, 11): 0.00014863134437570678, (0, 10): 1.484828615141926e-07, (3, 7): 0.00014863134437570678, (27, 2): 1.484828615141926e-07, (1, 11): 1.484828615141926e-07, (4, 0): 1.484828615141926e-07, (5, 5): 0.0008910456519466697, (16, 3): 0.00014863134437570678, (6, 10): 0.032666378015983884, (19, 14): 0.7280116184869478, (17, 2): 0.00014863134437570678, (7, 19): 0.29369924855793444, (20, 9): 0.2749904080071462, (18, 9): 0.13808920969106062, (16, 25): 0.00014863134437570678, (21, 12): 0.677972894156665, (8, 15): 0.7826533115241707, (22, 3): 0.0002971142058898994, (9, 14): 0.729644929963604, (23, 10): 0.40298263463238027, (10, 13): 0.5312718269806427, (8, 21): 0.055087290104626964, (11, 0): 1.484828615141926e-07, (9, 16): 0.7762685484790605, (24, 24): 1.484828615141926e-07, (14, 15): 0.9611297110642302, (12, 11): 0.05657211871976889, (10, 23): 0.0013364942364892474, (25, 21): 1.484828615141926e-07, (15, 14): 0.9949838034894661, (13, 10): 0.018412023310621396, (2, 17): 0.0008910456519466697, (26, 18): 1.484828615141926e-07, (0, 1): 1.484828615141926e-07, (3, 12): 0.00549401435888664, (27, 27): 1.484828615141926e-07, (1, 12): 1.484828615141926e-07, (4, 15): 0.30394456600241376, (2, 11): 0.0002971142058898994, (5, 14): 0.44485480157938256, (3, 18): 0.007721257281599529, (6, 13): 0.3529439103020973, (4, 21): 0.06904467908696106, (19, 23): 0.0025243571286027887, (7, 8): 0.006384911527971796, (20, 16): 0.24752107862702055, (18, 0): 0.00014863134437570678, (16, 16): 0.3190898178768614, (21, 21): 0.00786974014311372, (22, 26): 0.0002971142058898994, (18, 26): 0.0002971142058898994, (23, 3): 0.00014863134437570678, (9, 25): 0.00014863134437570678, (14, 22): 0.0010395285134608624, (12, 2): 0.00014863134437570678, (15, 23): 0.00014863134437570678, (13, 3): 0.0002971142058898994, (0, 24): 1.484828615141926e-07, (24, 5): 0.006681877251000181, (12, 24): 0.0002971142058898994, (27, 16): 1.484828615141926e-07, (1, 5): 1.484828615141926e-07, (25, 0): 1.484828615141926e-07, (2, 2): 1.484828615141926e-07, (26, 7): 1.484828615141926e-07, (3, 27): 1.484828615141926e-07, (6, 4): 0.0013364942364892474, (7, 1): 1.484828615141926e-07, (5, 17): 0.48687545138789906, (16, 23): 0.0005940799289182846, (19, 2): 0.00014863134437570678, (17, 22): 0.0026728399901169812, (20, 5): 0.009651534481284033, (18, 21): 0.006236428666457603, (23, 24): 0.00044559706740409196, (8, 3): 0.00014863134437570678, (9, 2): 1.484828615141926e-07, (14, 25): 0.0002971142058898994, (10, 25): 0.00044559706740409196, (11, 20): 0.015590848941851736, (24, 12): 0.15293749584247987, (14, 3): 0.00044559706740409196, (12, 23): 0.0007425627904324772, (25, 9): 0.001930425682546018, (13, 22): 0.0010395285134608624, (26, 14): 0.00014863134437570678, (3, 0): 1.484828615141926e-07, (27, 15): 1.484828615141926e-07, (4, 27): 1.484828615141926e-07, (5, 26): 0.00014863134437570678, (16, 14): 0.9865202803831571, (6, 17): 0.5571078448841121, (19, 11): 0.5572563277456264, (17, 15): 0.651097496222596, (7, 20): 0.17372509645446685, (22, 20): 0.009206085896741455, (20, 12): 0.7223692697494085, (18, 12): 0.7569657764822154, (23, 17): 0.10868960311125049, (21, 1): 0.00014863134437570678, (8, 10): 0.02553920066330264, (22, 14): 0.5259264439661318, (9, 11): 0.06949012767150364, (23, 15): 0.34641066439547286, (10, 0): 1.484828615141926e-07, (8, 16): 0.7498385991295341, (11, 13): 0.648721770438369, (0, 22): 1.484828615141926e-07, (24, 19): 0.0025243571286027887, (14, 10): 0.025093752078760065, (12, 14): 0.9862233146601288, (1, 23): 1.484828615141926e-07, (25, 18): 1.484828615141926e-07, (15, 3): 0.0007425627904324772, (13, 15): 0.9741962028774791, (2, 20): 0.00014863134437570678, (26, 9): 1.484828615141926e-07, (0, 12): 1.484828615141926e-07, (3, 9): 0.0007425627904324772, (27, 4): 1.484828615141926e-07, (1, 9): 1.484828615141926e-07, (4, 2): 0.00014863134437570678, (2, 14): 0.002375874267088596, (5, 3): 0.0005940799289182846, (16, 5): 0.0010395285134608624, (6, 8): 0.005642497220400833, (19, 16): 0.23757272690556966, (17, 0): 1.484828615141926e-07, (7, 13): 0.38085868826676555, (20, 11): 0.5802711712803262, (18, 7): 0.016333263249422702, (16, 27): 1.484828615141926e-07, (21, 10): 0.45346680754720575, (17, 26): 0.0002971142058898994, (22, 1): 0.00014863134437570678, (9, 12): 0.20891553463333049, (23, 4): 0.0043061514667731, (10, 11): 0.06592653899516303, (8, 23): 0.005790980081915025, (11, 2): 0.0002971142058898994, (9, 22): 0.009206085896741455, (24, 26): 0.00014863134437570678, (14, 13): 0.9629115054024006, (12, 5): 0.0010395285134608624, (10, 21): 0.010542431650369189, (25, 27): 1.484828615141926e-07, (15, 8): 0.0047516000513156765, (13, 8): 0.006236428666457603, (11, 24): 0.00014863134437570678, (26, 16): 0.00014863134437570678, (0, 3): 1.484828615141926e-07, (24, 0): 1.484828615141926e-07, (3, 14): 0.013215123157624655, (1, 2): 1.484828615141926e-07, (4, 9): 0.0060879458049434105, (2, 9): 1.484828615141926e-07, (5, 12): 0.17877351374594938, (3, 20): 0.003415254297687944, (6, 3): 0.0005940799289182846, (4, 23): 0.011284845957940152, (19, 25): 0.0013364942364892474, (7, 10): 0.02939975506267165, (5, 22): 0.054344875797056, (20, 18): 0.040832935399264476, (16, 18): 0.006681877251000181, (21, 19): 0.023608923463618137, (17, 19): 0.006830360112514373, (22, 24): 0.00044559706740409196, (18, 24): 0.0010395285134608624, (14, 20): 0.0016334599595176325, (15, 17): 0.0665204704412198, (13, 1): 1.484828615141926e-07, (11, 17): 0.44916080456329416, (26, 27): 1.484828615141926e-07, (0, 26): 1.484828615141926e-07, (24, 7): 0.03058761795478519, (12, 26): 0.00014863134437570678, (27, 18): 1.484828615141926e-07, (1, 27): 1.484828615141926e-07, (25, 6): 0.00014863134437570678, (13, 27): 1.484828615141926e-07, (2, 0): 1.484828615141926e-07, (26, 5): 1.484828615141926e-07, (27, 8): 1.484828615141926e-07}, 2: {(7, 3): 0.0023531946705964454, (20, 25): 0.060506566288793, (16, 9): 0.6193503831399594, (19, 4): 0.1826958760183563, (17, 20): 0.40858642935609096, (7, 25): 0.00033631473008095754, (22, 19): 0.33497031152727563, (20, 7): 0.6776718280865323, (18, 19): 0.5805254442850363, (23, 26): 0.00016824140170466692, (21, 6): 0.4605210878243648, (8, 5): 0.0803392190371953, (9, 0): 1.6807332837629065e-07, (10, 7): 0.14605589043232495, (11, 22): 0.11496232468271117, (0, 17): 1.6807332837629065e-07, (24, 14): 0.028068413912168914, (14, 1): 1.6807332837629065e-07, (12, 17): 0.715152180314445, (25, 15): 0.0030254879841016082, (15, 4): 0.03142988047969473, (13, 20): 0.33093655164624464, (2, 27): 1.6807332837629065e-07, (26, 12): 1.6807332837629065e-07, (3, 2): 0.00016824140170466692, (27, 1): 1.6807332837629065e-07, (4, 5): 0.017984014209591476, (5, 24): 0.0010086080435861202, (16, 0): 1.6807332837629065e-07, (6, 23): 0.030421440509436983, (19, 13): 0.8297781902670753, (17, 13): 0.8598633160464314, (7, 22): 0.14034139726753106, (20, 14): 0.7230516267481307, (18, 10): 0.7822134383365851, (23, 19): 0.14067754392428364, (21, 15): 0.5250612459208603, (8, 12): 0.48371520714029287, (22, 12): 0.5055647398292107, (9, 9): 0.3122804121964764, (23, 9): 0.2623626336687181, (10, 14): 0.25412704057827984, (8, 18): 0.6995213607754501, (11, 15): 0.4003508362656527, (9, 19): 0.6312835894546761, (24, 21): 0.01378218100018421, (14, 8): 0.202192382110006, (12, 8): 0.06941445269273641, (1, 21): 1.6807332837629065e-07, (25, 16): 0.0030254879841016082, (15, 13): 0.7712886719921261, (13, 13): 0.37009763715792043, (2, 18): 0.009916494447529524, (26, 23): 1.6807332837629065e-07, (0, 14): 0.00016824140170466692, (3, 11): 0.17664523619680983, (27, 6): 1.6807332837629065e-07, (1, 15): 0.002017048013843864, (4, 12): 0.5470788519381544, (2, 12): 0.023698507374385357, (5, 1): 1.6807332837629065e-07, (3, 17): 0.1937887156911915, (16, 7): 0.4176623890884107, (6, 14): 0.8141473707280803, (19, 18): 0.6801929280121767, (17, 6): 0.3995104696237713, (7, 15): 0.6996894341038263, (20, 21): 0.4985056600374065, (18, 5): 0.30942316561407945, (21, 8): 0.7255727266737751, (17, 24): 0.15412341019438688, (22, 7): 0.45850420788384927, (23, 6): 0.12824011762443813, (10, 9): 0.20488155536402666, (8, 25): 0.0005043880584572482, (11, 4): 0.010420714432658396, (9, 20): 0.4670759476310401, (14, 19): 0.44875595483802444, (12, 7): 0.0515986798848496, (10, 19): 0.6282582695439028, (25, 25): 0.00016824140170466692, (15, 10): 0.5435493120422523, (13, 6): 0.03798474028637006, (11, 26): 0.0005043880584572482, (0, 5): 1.6807332837629065e-07, (24, 2): 1.6807332837629065e-07, (1, 0): 1.6807332837629065e-07, (25, 3): 1.6807332837629065e-07, (4, 11): 0.4561511812865812, (2, 7): 0.0021851213422201545, (5, 10): 0.5282546391600099, (3, 22): 0.009916494447529524, (6, 1): 1.6807332837629065e-07, (4, 17): 0.5168256528304221, (19, 27): 0.00033631473008095754, (7, 4): 0.020337040806859545, (5, 20): 0.2800103331482286, (16, 20): 0.31648224540588366, (6, 27): 1.6807332837629065e-07, (21, 17): 0.5042201532022004, (19, 1): 1.6807332837629065e-07, (17, 17): 0.6702766016379755, (18, 22): 0.37093800379980185, (8, 0): 1.6807332837629065e-07, (14, 26): 0.006555027880003711, (10, 26): 0.0005043880584572482, (15, 19): 0.41951119570054984, (11, 19): 0.6070810301684902, (26, 25): 1.6807332837629065e-07, (24, 9): 0.02689190061353488, (12, 20): 0.3827031367861422, (27, 20): 1.6807332837629065e-07, (1, 25): 1.6807332837629065e-07, (25, 4): 1.6807332837629065e-07, (13, 25): 0.010588787761034687, (26, 3): 1.6807332837629065e-07, (27, 10): 1.6807332837629065e-07, (4, 24): 0.0010086080435861202, (20, 27): 1.6807332837629065e-07, (16, 11): 0.7711205986637499, (6, 18): 0.67868026805679, (21, 26): 0.005378514581369676, (19, 6): 0.5250612459208603, (17, 10): 0.7581789523787755, (7, 27): 1.6807332837629065e-07, (22, 17): 0.3342980182137705, (20, 1): 0.00016824140170466692, (18, 17): 0.6963279675363006, (23, 20): 0.12471057772853603, (21, 4): 0.16017405001593335, (8, 7): 0.26303492698222325, (22, 11): 0.5652307714027939, (9, 6): 0.14924928367147447, (10, 5): 0.055464366437504284, (15, 24): 0.060002346303664134, (11, 8): 0.10773517156253068, (0, 19): 1.6807332837629065e-07, (24, 16): 0.027396120598663753, (14, 7): 0.13832451732701556, (12, 19): 0.5652307714027939, (1, 18): 0.00033631473008095754, (25, 13): 0.002689341327349027, (15, 6): 0.17798982282382017, (13, 18): 0.6573349553530011, (2, 25): 0.00016824140170466692, (26, 10): 1.6807332837629065e-07, (0, 9): 1.6807332837629065e-07, (3, 4): 0.0005043880584572482, (27, 3): 1.6807332837629065e-07, (4, 7): 0.09160013203840678, (5, 6): 0.0931127919937934, (16, 2): 0.0008405347152098296, (6, 21): 0.22420998812730009, (19, 15): 0.7608681256327962, (17, 3): 0.03227024712157618, (7, 16): 0.7317914398236979, (20, 8): 0.7859110515608635, (18, 8): 0.6938068676106562, (16, 24): 0.10269297171124196, (21, 13): 0.6391830358883618, (8, 14): 0.48758089369294755, (22, 2): 0.002521267998972736, (9, 15): 0.4233768822532045, (23, 11): 0.24958906071211998, (10, 12): 0.19126761576554713, (8, 20): 0.4526216413906791, (11, 1): 1.6807332837629065e-07, (9, 17): 0.6654024751150631, (24, 23): 0.007395394521885164, (14, 14): 0.6665789884136971, (12, 10): 0.09781884518832953, (10, 22): 0.1398371772824022, (25, 22): 0.0005043880584572482, (15, 15): 0.8235594771171526, (13, 11): 0.21967200826114025, (2, 16): 0.023026214060880196, (26, 21): 1.6807332837629065e-07, (0, 0): 1.6807332837629065e-07, (3, 13): 0.24958906071211998, (27, 24): 1.6807332837629065e-07, (1, 13): 0.0008405347152098296, (4, 14): 0.6428806491126401, (2, 10): 0.012773741029926466, (5, 15): 0.8148196640415855, (3, 19): 0.08874288545600983, (6, 12): 0.753809045840992, (4, 20): 0.17160303634552113, (19, 20): 0.5865760841065828, (17, 4): 0.11462617802595859, (7, 9): 0.4961526334401384, (20, 23): 0.2867332662832802, (18, 3): 0.04857335997407637, (21, 22): 0.3102635322559609, (22, 5): 0.20168816212487714, (23, 0): 1.6807332837629065e-07, (8, 27): 1.6807332837629065e-07, (11, 6): 0.06454032616982398, (9, 26): 0.00033631473008095754, (14, 17): 0.726076946658904, (12, 1): 1.6807332837629065e-07, (10, 17): 0.6712850416082332, (15, 20): 0.27698501323745534, (13, 4): 0.005714661238122258, (0, 7): 1.6807332837629065e-07, (24, 4): 0.0023531946705964454, (27, 17): 1.6807332837629065e-07, (1, 6): 1.6807332837629065e-07, (25, 1): 1.6807332837629065e-07, (2, 5): 0.00016824140170466692, (26, 6): 1.6807332837629065e-07, (5, 8): 0.2875736329251617, (3, 24): 0.00033631473008095754, (6, 7): 0.2474041074432282, (4, 19): 0.27345547334155323, (7, 6): 0.17462835625629436, (5, 18): 0.5811977375985414, (16, 22): 0.18857844251152647, (6, 25): 0.00033631473008095754, (19, 3): 0.05781739303477235, (17, 23): 0.2189997149476351, (20, 4): 0.18252780268998, (18, 20): 0.5126238196210149, (23, 25): 0.0043700746111119335, (8, 2): 0.0005043880584572482, (9, 3): 0.002689341327349027, (14, 24): 0.03596786034585458, (10, 24): 0.0042020012827356425, (11, 21): 0.24807640075673337, (24, 11): 0.03210217379319989, (14, 2): 1.6807332837629065e-07, (12, 22): 0.09529774526268517, (27, 22): 1.6807332837629065e-07, (25, 10): 0.0008405347152098296, (13, 23): 0.03344676042021021, (26, 1): 1.6807332837629065e-07, (3, 1): 1.6807332837629065e-07, (27, 12): 1.6807332837629065e-07, (4, 26): 1.6807332837629065e-07, (5, 27): 1.6807332837629065e-07, (16, 13): 0.8571741427924107, (6, 16): 0.81280278410107, (21, 24): 0.11395388471245343, (19, 8): 0.7591873923490332, (17, 8): 0.6299390028276658, (7, 21): 0.26790905350513566, (22, 23): 0.12202140447451539, (20, 3): 0.059834272975287844, (18, 15): 0.786247198217616, (23, 22): 0.07546509251428288, (21, 2): 0.0030254879841016082, (8, 9): 0.42152807564106537, (22, 9): 0.604055710257717, (9, 4): 0.020337040806859545, (23, 12): 0.22017622824626912, (10, 3): 0.002689341327349027, (15, 26): 0.009748421119153233, (11, 10): 0.1252147977136649, (0, 21): 1.6807332837629065e-07, (24, 18): 0.025211167329771973, (14, 5): 0.03832088694312264, (12, 13): 0.22118466821652685, (1, 16): 0.002017048013843864, (25, 19): 0.0018489746854675733, (15, 0): 1.6807332837629065e-07, (13, 16): 0.6976725541633109, (2, 23): 0.00033631473008095754, (26, 8): 1.6807332837629065e-07, (0, 11): 1.6807332837629065e-07, (3, 6): 0.011765301059668722, (27, 5): 1.6807332837629065e-07, (1, 10): 0.0008405347152098296, (4, 1): 1.6807332837629065e-07, (5, 4): 0.008739981148895489, (16, 4): 0.06924637936436012, (6, 11): 0.6964960408646769, (19, 17): 0.7037231939848574, (17, 1): 1.6807332837629065e-07, (7, 18): 0.6980087008200635, (20, 10): 0.8586868027477973, (18, 6): 0.4811941072146485, (16, 26): 0.014622547642065662, (21, 11): 0.7684314254097292, (17, 27): 0.0013447547003387014, (22, 0): 1.6807332837629065e-07, (9, 13): 0.31244848552485266, (23, 5): 0.07361628590214367, (10, 10): 0.20740265528967103, (8, 22): 0.15059387029848478, (11, 3): 0.001512828028714992, (9, 23): 0.04554804006330314, (24, 25): 0.0008405347152098296, (14, 12): 0.5025394199184374, (12, 4): 0.006050807894874839, (10, 20): 0.45631925461495754, (25, 20): 0.0013447547003387014, (15, 9): 0.4516132014204214, (13, 9): 0.12958470425144847, (11, 25): 0.0031935613124778987, (26, 19): 1.6807332837629065e-07, (0, 2): 1.6807332837629065e-07, (3, 15): 0.26303492698222325, (27, 26): 1.6807332837629065e-07, (1, 3): 1.6807332837629065e-07, (4, 8): 0.16538432319559837, (2, 8): 0.005042367924617095, (5, 13): 0.7788519717690593, (3, 21): 0.02453887401626681, (6, 2): 0.0005043880584572482, (4, 22): 0.03798474028637006, (19, 22): 0.42304073559645194, (7, 11): 0.6371661559478462, (5, 23): 0.0193286008366018, (20, 17): 0.6432167957693927, (18, 1): 1.6807332837629065e-07, (16, 17): 0.664730181801558, (21, 20): 0.46774824094454526, (22, 27): 1.6807332837629065e-07, (18, 27): 0.0008405347152098296, (23, 2): 0.0005043880584572482, (9, 24): 0.0033616346408541892, (14, 23): 0.04857335997407637, (12, 3): 0.0006724613868335389, (15, 22): 0.11781957126510811, (13, 2): 0.00016824140170466692, (26, 26): 1.6807332837629065e-07, (0, 25): 1.6807332837629065e-07, (24, 6): 0.012437594373173886, (12, 25): 0.00638695455162742, (27, 19): 1.6807332837629065e-07, (1, 4): 1.6807332837629065e-07, (25, 7): 0.00033631473008095754, (2, 3): 1.6807332837629065e-07, (26, 4): 1.6807332837629065e-07, (3, 26): 1.6807332837629065e-07, (27, 9): 1.6807332837629065e-07, (6, 5): 0.05916197966178268, (7, 0): 1.6807332837629065e-07, (5, 16): 0.7837260982919717, (20, 24): 0.16521624986722208, (16, 8): 0.526573905876247, (19, 5): 0.34202939131907983, (17, 21): 0.34034865803531694, (22, 18): 0.3408528780204458, (20, 6): 0.5257335392343655, (18, 18): 0.6442252357396504, (23, 27): 1.6807332837629065e-07, (21, 7): 0.610442496736016, (8, 4): 0.020505114135235834, (9, 1): 1.6807332837629065e-07, (10, 6): 0.10437370499500487, (11, 23): 0.03563171368910199, (0, 16): 1.6807332837629065e-07, (24, 13): 0.02941300053917924, (14, 0): 1.6807332837629065e-07, (12, 16): 0.6282582695439028, (25, 8): 0.00033631473008095754, (15, 5): 0.0931127919937934, (13, 21): 0.1736199162860366, (2, 26): 1.6807332837629065e-07, (26, 15): 1.6807332837629065e-07, (3, 3): 0.00016824140170466692, (27, 14): 1.6807332837629065e-07, (4, 4): 0.0031935613124778987, (5, 25): 0.00033631473008095754, (16, 15): 0.8297781902670753, (6, 22): 0.1089116848611647, (19, 10): 0.8317950702075908, (17, 14): 0.8477620364033385, (7, 23): 0.041178133525519585, (22, 21): 0.25177401398101174, (20, 13): 0.7795242650825644, (18, 13): 0.840534883283158, (23, 16): 0.14101369058103622, (21, 0): 1.6807332837629065e-07, (8, 11): 0.48976584696183934, (22, 15): 0.3336257249002653, (9, 10): 0.3337937982286416, (23, 14): 0.1593336833740519, (10, 1): 1.6807332837629065e-07, (8, 17): 0.6948153075809139, (11, 12): 0.128744337609567, (0, 23): 1.6807332837629065e-07, (24, 20): 0.017143647567710023, (14, 11): 0.42001541568567874, (12, 15): 0.47480732073634946, (1, 22): 1.6807332837629065e-07, (25, 17): 0.002689341327349027, (15, 2): 0.00033631473008095754, (13, 14): 0.46774824094454526, (2, 21): 0.0006724613868335389, (26, 22): 1.6807332837629065e-07, (0, 13): 0.00016824140170466692, (3, 8): 0.05445592646724654, (27, 7): 1.6807332837629065e-07, (1, 8): 0.00033631473008095754, (4, 3): 0.0006724613868335389, (2, 15): 0.02722804727028746, (5, 2): 0.00033631473008095754, (16, 6): 0.2937923460750844, (6, 9): 0.4869086003794424, (19, 19): 0.644561382396403, (17, 7): 0.5405239921314792, (7, 12): 0.6590156886367641, (20, 20): 0.5716175578810929, (18, 4): 0.15866139006054675, (21, 9): 0.7793561917541881, (17, 25): 0.0766416058129169, (22, 6): 0.32942389169085806, (23, 7): 0.18790614919802132, (10, 8): 0.18235972936160372, (8, 24): 0.0021851213422201545, (11, 5): 0.03563171368910199, (9, 21): 0.2911031728210638, (24, 27): 1.6807332837629065e-07, (14, 18): 0.6050641502279748, (12, 6): 0.0391612535850041, (10, 18): 0.6996894341038263, (25, 26): 0.00016824140170466692, (15, 11): 0.6311155161262998, (13, 7): 0.06689335276709205, (11, 27): 1.6807332837629065e-07, (26, 17): 1.6807332837629065e-07, (0, 4): 1.6807332837629065e-07, (24, 1): 1.6807332837629065e-07, (1, 1): 1.6807332837629065e-07, (4, 10): 0.3556433309175594, (2, 6): 0.0010086080435861202, (5, 11): 0.6401914758586195, (3, 23): 0.002017048013843864, (6, 0): 1.6807332837629065e-07, (4, 16): 0.5904417706592374, (19, 24): 0.1971501822587173, (7, 5): 0.07680967914129319, (5, 21): 0.16353551658345916, (20, 19): 0.6134678166467893, (16, 19): 0.4364866018665552, (6, 26): 0.00016824140170466692, (21, 18): 0.5171617994871748, (17, 18): 0.5882568173903456, (22, 25): 0.0193286008366018, (18, 25): 0.08487719890335514, (14, 21): 0.16000597668755706, (15, 16): 0.7855749049041109, (13, 0): 1.6807332837629065e-07, (11, 16): 0.5680880179851908, (26, 24): 1.6807332837629065e-07, (0, 27): 1.6807332837629065e-07, (24, 8): 0.022690067404127615, (12, 27): 0.00033631473008095754, (27, 21): 1.6807332837629065e-07, (1, 26): 1.6807332837629065e-07, (25, 5): 1.6807332837629065e-07, (13, 26): 0.0043700746111119335, (2, 1): 1.6807332837629065e-07, (26, 2): 1.6807332837629065e-07, (27, 11): 1.6807332837629065e-07, (7, 2): 0.00033631473008095754, (20, 26): 0.00907612780564807, (16, 10): 0.6996894341038263, (21, 27): 1.6807332837629065e-07, (19, 7): 0.6596879819502692, (17, 11): 0.8033906777119977, (7, 24): 0.0018489746854675733, (22, 16): 0.32522205848145075, (20, 0): 1.6807332837629065e-07, (18, 16): 0.7423800595114042, (23, 21): 0.10118031175585535, (21, 5): 0.2946327127169659, (8, 6): 0.17143496301714484, (22, 10): 0.6045599302428458, (9, 7): 0.20941953523018653, (10, 4): 0.01630328092582857, (15, 25): 0.03142988047969473, (11, 9): 0.11966837787724731, (0, 18): 1.6807332837629065e-07, (24, 15): 0.027396120598663753, (14, 6): 0.0838687589330974, (12, 18): 0.6970002608498057, (1, 19): 1.6807332837629065e-07, (25, 14): 0.0028574146557253173, (15, 7): 0.26992593344565113, (13, 19): 0.5033797865603189, (2, 24): 0.00033631473008095754, (26, 13): 1.6807332837629065e-07, (0, 8): 1.6807332837629065e-07, (3, 5): 0.0033616346408541892, (27, 0): 1.6807332837629065e-07, (4, 6): 0.044707673421421686, (5, 7): 0.17967055610758306, (16, 1): 1.6807332837629065e-07, (6, 20): 0.3627024107093636, (19, 12): 0.8438963498506837, (17, 12): 0.838854149999395, (7, 17): 0.7462457460640589, (20, 15): 0.6808652213256818, (18, 11): 0.8131389307578225, (23, 18): 0.14706433040258268, (21, 14): 0.5749790244486187, (8, 13): 0.4773284206619938, (22, 13): 0.4304359620450088, (9, 8): 0.27093437341590887, (23, 8): 0.23076484793397542, (10, 15): 0.38169469681588447, (8, 19): 0.6139720366319181, (11, 14): 0.2527824539512695, (9, 18): 0.6991852141186975, (24, 22): 0.010084567775905814, (14, 9): 0.26958978678889856, (12, 9): 0.08538141888848402, (1, 20): 1.6807332837629065e-07, (25, 23): 0.00016824140170466692, (15, 12): 0.7126310803888007, (13, 12): 0.28370794637250696, (2, 19): 0.0042020012827356425, (26, 20): 1.6807332837629065e-07, (0, 15): 0.00016824140170466692, (3, 10): 0.12739975098255668, (27, 25): 1.6807332837629065e-07, (1, 14): 0.001176681371962411, (4, 13): 0.6111147900495212, (2, 13): 0.02689190061353488, (5, 0): 1.6807332837629065e-07, (3, 16): 0.24152154095005804, (6, 15): 0.8222148904901423, (19, 21): 0.5141364795764015, (17, 5): 0.2475721807716045, (7, 14): 0.6699404549812229, (20, 22): 0.3986701029818898, (18, 2): 0.001512828028714992, (21, 23): 0.2099237552153154, (22, 4): 0.10202067839773679, (23, 1): 1.6807332837629065e-07, (8, 26): 1.6807332837629065e-07, (11, 7): 0.08487719890335514, (9, 27): 1.6807332837629065e-07, (14, 16): 0.7596916123341622, (12, 0): 1.6807332837629065e-07, (10, 16): 0.5353137189518141, (25, 24): 0.00016824140170466692, (15, 21): 0.17731752951031501, (13, 5): 0.01865630752309664, (0, 6): 1.6807332837629065e-07, (24, 3): 0.00016824140170466692, (1, 7): 0.00033631473008095754, (25, 2): 1.6807332837629065e-07, (2, 4): 1.6807332837629065e-07, (5, 9): 0.41043523596823017, (3, 25): 0.00016824140170466692, (6, 6): 0.14387093716343316, (4, 18): 0.39446826977248256, (19, 26): 0.012269521044797595, (7, 7): 0.28202721308874407, (5, 19): 0.4274106421342355, (16, 21): 0.23816007438253223, (6, 24): 0.001512828028714992, (21, 16): 0.5030436399035663, (19, 0): 1.6807332837629065e-07, (17, 16): 0.7465818927208114, (18, 23): 0.2853886796562699, (8, 1): 0.00016824140170466692, (14, 27): 0.0006724613868335389, (10, 27): 1.6807332837629065e-07, (15, 18): 0.5677518713284382, (11, 18): 0.7106142004482853, (24, 10): 0.028908780554050367, (12, 21): 0.21110026851394942, (27, 23): 1.6807332837629065e-07, (1, 24): 1.6807332837629065e-07, (25, 11): 0.0010086080435861202, (13, 24): 0.01882438085147293, (26, 0): 1.6807332837629065e-07, (27, 13): 1.6807332837629065e-07, (4, 25): 0.00033631473008095754, (16, 12): 0.8242317704306578, (6, 19): 0.5336329856680512, (21, 25): 0.0373124469728649, (19, 9): 0.8086009508916627, (17, 9): 0.6985129208051923, (7, 26): 0.00016824140170466692, (22, 22): 0.18891458916827905, (20, 2): 0.0031935613124778987, (18, 14): 0.8212064505198845, (23, 23): 0.044875746749797975, (21, 3): 0.05143060655647331, (8, 8): 0.3474077378271212, (22, 8): 0.5467427052814019, (9, 5): 0.07445665254402513, (23, 13): 0.18656156257101097, (10, 2): 0.00033631473008095754, (15, 27): 0.0010086080435861202, (11, 11): 0.12555094437041747, (0, 20): 1.6807332837629065e-07, (24, 17): 0.02705997394191117, (14, 4): 0.012269521044797595, (12, 12): 0.14992157698497963, (1, 17): 0.001512828028714992, (25, 12): 0.002017048013843864, (15, 1): 1.6807332837629065e-07, (13, 17): 0.7354890530479763, (2, 22): 0.00016824140170466692, (26, 11): 1.6807332837629065e-07, (0, 10): 1.6807332837629065e-07, (3, 7): 0.0265557539567823, (27, 2): 1.6807332837629065e-07, (1, 11): 0.0006724613868335389, (4, 0): 1.6807332837629065e-07, (5, 5): 0.03748052030124119, (16, 3): 0.01647135425420486, (6, 10): 0.6069129568401139, (19, 14): 0.801878017756611, (17, 2): 0.0010086080435861202, (7, 19): 0.588424890718722, (20, 9): 0.8420475432385446, (18, 9): 0.7504475792734662, (16, 25): 0.05479207312399912, (21, 12): 0.7159925469563265, (8, 15): 0.5393474788328451, (22, 3): 0.031093733822942147, (9, 14): 0.33278535825838385, (23, 10): 0.26606024689299645, (10, 13): 0.19328449570606263, (8, 21): 0.2874055595967854, (11, 0): 1.6807332837629065e-07, (9, 16): 0.5492638052070462, (24, 24): 0.0031935613124778987, (14, 15): 0.732463733137203, (12, 11): 0.11714727795160296, (10, 23): 0.04168235351064846, (25, 21): 0.0010086080435861202, (15, 14): 0.8149877373699618, (13, 10): 0.17227532965902628, (2, 17): 0.017647867552838894, (26, 18): 1.6807332837629065e-07, (0, 1): 1.6807332837629065e-07, (3, 12): 0.21950393493276396, (27, 27): 1.6807332837629065e-07, (1, 12): 0.0010086080435861202, (4, 15): 0.6383426692464803, (2, 11): 0.01916052750822551, (5, 14): 0.8126347107726937, (3, 18): 0.1396691039540259, (6, 13): 0.7934743513377965, (4, 21): 0.09059169206814903, (19, 23): 0.3114400455545949, (7, 8): 0.3911068032049567, (20, 16): 0.6531331221435939, (18, 0): 1.6807332837629065e-07, (16, 16): 0.765406105498956, (21, 21): 0.3990062496386424, (22, 26): 0.0016809013570912826, (18, 26): 0.014118327656936791, (23, 3): 0.00890805447727178, (9, 25): 0.0005043880584572482, (14, 22): 0.08286031896283966, (12, 2): 1.6807332837629065e-07, (15, 23): 0.08437297891822627, (13, 3): 0.0008405347152098296, (0, 24): 1.6807332837629065e-07, (24, 5): 0.006050807894874839, (12, 24): 0.01142915440291614, (27, 16): 1.6807332837629065e-07, (1, 5): 1.6807332837629065e-07, (25, 0): 1.6807332837629065e-07, (2, 2): 1.6807332837629065e-07, (26, 7): 1.6807332837629065e-07, (3, 27): 1.6807332837629065e-07, (6, 4): 0.014454474313689373, (7, 1): 1.6807332837629065e-07, (5, 17): 0.7022105340294708, (16, 23): 0.14303057052155171, (19, 2): 0.0023531946705964454, (17, 22): 0.2783295998644657, (20, 5): 0.34152517133395094, (18, 21): 0.4474113682110141, (23, 24): 0.01865630752309664, (8, 3): 0.0023531946705964454, (9, 2): 0.0005043880584572482, (14, 25): 0.019832820821730674, (10, 25): 0.0013447547003387014, (11, 20): 0.4257299088504726, (24, 12): 0.03159795380807102, (14, 3): 0.0023531946705964454, (12, 23): 0.02907685388242666, (25, 9): 0.0005043880584572482, (13, 22): 0.07899463241018498, (26, 14): 1.6807332837629065e-07, (3, 0): 1.6807332837629065e-07, (27, 15): 1.6807332837629065e-07, (4, 27): 1.6807332837629065e-07, (5, 26): 1.6807332837629065e-07, (16, 14): 0.866586249181483, (6, 17): 0.7736416985893942, (19, 11): 0.8427198365520497, (17, 15): 0.8049033376673843, (7, 20): 0.4185027557302921, (22, 20): 0.30185986583714636, (20, 12): 0.8274251636698072, (18, 12): 0.832467363521096, (23, 17): 0.14370286383505687, (21, 1): 0.00016824140170466692, (8, 10): 0.4741350274228443, (22, 14): 0.3739633237105751, (9, 11): 0.33597875149753337, (23, 15): 0.14622396376070124, (10, 0): 1.6807332837629065e-07, (8, 16): 0.6215353364088512, (11, 13): 0.16202285662807256, (0, 22): 1.6807332837629065e-07, (24, 19): 0.02168162743386987, (14, 10): 0.34169324466232726, (12, 14): 0.3354745315124045, (1, 23): 1.6807332837629065e-07, (25, 18): 0.0021851213422201545, (15, 3): 0.007227321193508874, (13, 15): 0.595988190495655, (2, 20): 0.0016809013570912826, (26, 9): 1.6807332837629065e-07, (0, 12): 0.00016824140170466692, (3, 9): 0.08655793218711806, (27, 4): 1.6807332837629065e-07, (1, 9): 0.0006724613868335389, (4, 2): 0.00033631473008095754, (2, 14): 0.028404560568921496, (5, 3): 0.0010086080435861202, (16, 5): 0.16740120313611384, (6, 8): 0.371610297113307, (19, 16): 0.7339763930925897, (17, 0): 1.6807332837629065e-07, (7, 13): 0.664225961816429, (20, 11): 0.8595271693896788, (18, 7): 0.6077533234819954, (16, 27): 0.001176681371962411, (21, 10): 0.7897767381135181, (17, 26): 0.015967134269075988, (22, 1): 1.6807332837629065e-07, (9, 12): 0.3184991253463991, (23, 4): 0.03613593367423087, (10, 11): 0.19782247557222246, (8, 23): 0.045043820078174264, (11, 2): 1.6807332837629065e-07, (9, 22): 0.15462763017951578, (24, 26): 0.00016824140170466692, (14, 13): 0.5848953508228198, (12, 5): 0.01916052750822551, (10, 21): 0.27547235328206876, (25, 27): 1.6807332837629065e-07, (15, 8): 0.3618620440674822, (13, 8): 0.09344893865054597, (11, 24): 0.006891174536756292, (26, 16): 1.6807332837629065e-07, (0, 3): 1.6807332837629065e-07, (24, 0): 1.6807332837629065e-07, (3, 14): 0.26656446687812535, (1, 2): 1.6807332837629065e-07, (4, 9): 0.2490848407269911, (2, 9): 0.008235761163766618, (5, 12): 0.7213708934643679, (3, 20): 0.05143060655647331, (6, 3): 0.0021851213422201545, (4, 23): 0.008739981148895489, (19, 25): 0.07529701918590657, (7, 10): 0.5852314974795725, (5, 22): 0.0731120659170148, (20, 18): 0.635317349335707, (16, 18): 0.5526252717745721, (21, 19): 0.5065731797994684, (17, 19): 0.49245502021586, (22, 24): 0.0623553729009322, (18, 24): 0.1936206423628152, (14, 20): 0.28236335974549664, (15, 17): 0.6966641141930532, (13, 1): 1.6807332837629065e-07, (11, 17): 0.6860754945053468, (26, 27): 1.6807332837629065e-07, (0, 26): 1.6807332837629065e-07, (24, 7): 0.017815940881215186, (12, 26): 0.0018489746854675733, (27, 18): 1.6807332837629065e-07, (1, 27): 1.6807332837629065e-07, (25, 6): 0.00033631473008095754, (13, 27): 0.0005043880584572482, (2, 0): 1.6807332837629065e-07, (26, 5): 1.6807332837629065e-07, (27, 8): 1.6807332837629065e-07}, 3: {(7, 3): 0.024001009668259323, (20, 25): 0.0001634343349904258, (16, 9): 0.19396618721574507, (19, 4): 0.15331169229804675, (17, 20): 0.5459786010412776, (7, 25): 1.632710639264993e-07, (22, 19): 0.337154910279285, (20, 7): 0.44017895161690607, (18, 19): 0.7125150862463069, (23, 26): 0.0001634343349904258, (21, 6): 0.42254567671284415, (8, 5): 0.13812748335288233, (9, 0): 1.632710639264993e-07, (10, 7): 0.11184084206071594, (11, 22): 0.03216456286458429, (0, 17): 1.632710639264993e-07, (24, 14): 0.37176837583170286, (14, 1): 0.0003267053989169251, (12, 17): 0.7856605228853786, (25, 15): 0.039511760741276755, (15, 4): 0.015184372216228361, (13, 20): 0.2560091915078148, (2, 27): 1.632710639264993e-07, (26, 12): 0.002449229229961416, (3, 2): 1.632710639264993e-07, (27, 1): 1.632710639264993e-07, (4, 5): 0.017633438175125853, (5, 24): 0.0009797896546229223, (16, 0): 1.632710639264993e-07, (6, 23): 0.01959269094224384, (19, 13): 0.22172226808324996, (17, 13): 0.13943365186429432, (7, 22): 0.10579981269543547, (20, 14): 0.5482643959362485, (18, 10): 0.12767813526158636, (23, 19): 0.1678428169875052, (21, 15): 0.8419890399400208, (8, 12): 0.4948747580322833, (22, 12): 0.8746432527253207, (9, 9): 0.2867041515259967, (23, 9): 0.6819833972920515, (10, 14): 0.6302264700273512, (8, 18): 0.7092496649677769, (11, 15): 0.8767657765563652, (9, 19): 0.5371619635892466, (24, 21): 0.006204463700270899, (14, 8): 0.21143619105588052, (12, 8): 0.15184225272270827, (1, 21): 1.632710639264993e-07, (25, 16): 0.031184936481025292, (15, 13): 0.5623057074339275, (13, 13): 0.9115425131727095, (2, 18): 0.0003267053989169251, (26, 23): 1.632710639264993e-07, (0, 14): 1.632710639264993e-07, (3, 11): 0.027919515202495308, (27, 6): 1.632710639264993e-07, (1, 15): 1.632710639264993e-07, (4, 12): 0.336175283895726, (2, 12): 0.0003267053989169251, (5, 1): 1.632710639264993e-07, (3, 17): 0.030531852225319296, (16, 7): 0.08441130332106406, (6, 14): 0.910726157853077, (19, 18): 0.7774969696890536, (17, 6): 0.10302420460868497, (7, 15): 0.8414992267482413, (20, 21): 0.2514376017178728, (18, 5): 0.1479237471884723, (21, 8): 0.6212465615113938, (17, 24): 0.008980071787021386, (22, 7): 0.5466316852969836, (23, 6): 0.29111247025201215, (10, 9): 0.1670264616678727, (8, 25): 1.632710639264993e-07, (11, 4): 0.01779670923905235, (9, 20): 0.3645844490189369, (14, 19): 0.5587137440275446, (12, 7): 0.06808419692841412, (10, 19): 0.4813232597263839, (25, 25): 1.632710639264993e-07, (15, 10): 0.4300561456534631, (13, 6): 0.03428708669562878, (11, 26): 1.632710639264993e-07, (0, 5): 1.632710639264993e-07, (24, 2): 0.002122687102108417, (1, 0): 1.632710639264993e-07, (25, 3): 0.0006532475267699237, (4, 11): 0.29454116259446866, (2, 7): 1.632710639264993e-07, (5, 10): 0.5812451508494014, (3, 22): 0.002122687102108417, (6, 1): 0.0003267053989169251, (4, 17): 0.2586215285306388, (19, 27): 1.632710639264993e-07, (7, 4): 0.07428849735762111, (5, 20): 0.21813030467686698, (16, 20): 0.5275289708175832, (6, 27): 1.632710639264993e-07, (21, 17): 0.812273706305398, (19, 1): 0.0006532475267699237, (17, 17): 0.6525946057852816, (18, 22): 0.18074123103769865, (8, 0): 1.632710639264993e-07, (14, 26): 1.632710639264993e-07, (10, 26): 1.632710639264993e-07, (15, 19): 0.6532476900409876, (11, 19): 0.41160651542976867, (26, 25): 1.632710639264993e-07, (24, 9): 0.3278484596354745, (12, 20): 0.20327263785955554, (27, 20): 1.632710639264993e-07, (1, 25): 1.632710639264993e-07, (25, 4): 0.003102313485667413, (13, 25): 0.0001634343349904258, (26, 3): 1.632710639264993e-07, (27, 10): 1.632710639264993e-07, (4, 24): 0.0001634343349904258, (20, 27): 1.632710639264993e-07, (16, 11): 0.2938880783387626, (6, 18): 0.6514517083377962, (21, 26): 0.0001634343349904258, (19, 6): 0.26939741874978773, (17, 10): 0.13061701441226337, (7, 27): 1.632710639264993e-07, (22, 17): 0.678554704949595, (20, 1): 0.0009797896546229223, (18, 17): 0.6710442360089761, (23, 20): 0.08457457438499055, (21, 4): 0.18253721274089013, (8, 7): 0.31054172685926557, (22, 11): 0.8545609118623613, (9, 6): 0.16049561911081273, (10, 5): 0.06106354117957466, (15, 24): 0.005224837316711904, (11, 8): 0.10710598120684746, (0, 19): 1.632710639264993e-07, (24, 16): 0.23478395319736992, (14, 7): 0.0986158858826695, (12, 19): 0.38727912690472027, (1, 18): 1.632710639264993e-07, (25, 13): 0.05894101734853017, (15, 6): 0.045226247978704226, (13, 18): 0.6444310525889567, (2, 25): 1.632710639264993e-07, (26, 10): 0.0016328739103289194, (0, 9): 1.632710639264993e-07, (3, 4): 0.0003267053989169251, (27, 3): 1.632710639264993e-07, (4, 7): 0.07004344969553213, (5, 6): 0.12408617185520339, (16, 2): 0.0044084819970794074, (6, 21): 0.1931498318961126, (19, 15): 0.4596082082241595, (17, 3): 0.03363400243992278, (7, 16): 0.8509689484559783, (20, 8): 0.4733229775939854, (18, 8): 0.17698599656738917, (16, 24): 0.007347361147756394, (21, 13): 0.7358628483877963, (8, 14): 0.5851636563836374, (22, 2): 0.014204745832669365, (9, 15): 0.6442677815250302, (23, 11): 0.8142329590725159, (10, 12): 0.35446164305549394, (8, 20): 0.3931568852060743, (11, 1): 0.0001634343349904258, (9, 17): 0.7817420173511426, (24, 23): 0.0003267053989169251, (14, 14): 0.8023141714058815, (12, 10): 0.47152699589079394, (10, 22): 0.06220643862706016, (25, 22): 0.0003267053989169251, (15, 15): 0.6037765576712584, (13, 11): 0.7534961232918582, (2, 16): 0.0003267053989169251, (26, 21): 0.0003267053989169251, (0, 0): 1.632710639264993e-07, (3, 13): 0.04098120031661525, (27, 24): 1.632710639264993e-07, (1, 13): 1.632710639264993e-07, (4, 14): 0.37487052604630633, (2, 10): 0.0001634343349904258, (5, 15): 0.7819052884150691, (3, 19): 0.013714932640889868, (6, 12): 0.8668062416568487, (4, 20): 0.07755391863615109, (19, 20): 0.5005892452697108, (17, 4): 0.059594101604236165, (7, 9): 0.6117768398036568, (20, 23): 0.036246339462746774, (18, 3): 0.05698176458141218, (21, 22): 0.0751048526772536, (22, 5): 0.26188694980916877, (23, 0): 1.632710639264993e-07, (8, 27): 1.632710639264993e-07, (11, 6): 0.04457316372299823, (9, 26): 1.632710639264993e-07, (14, 17): 0.799865105446984, (12, 1): 0.0001634343349904258, (10, 17): 0.795293515657042, (15, 20): 0.46238381631090997, (13, 4): 0.005877921572417901, (0, 7): 1.632710639264993e-07, (24, 4): 0.03363400243992278, (27, 17): 1.632710639264993e-07, (1, 6): 1.632710639264993e-07, (25, 1): 1.632710639264993e-07, (2, 5): 1.632710639264993e-07, (26, 6): 0.0003267053989169251, (5, 8): 0.32621574899620953, (3, 24): 1.632710639264993e-07, (6, 7): 0.3373181813432115, (4, 19): 0.12963738802870436, (7, 6): 0.25339685448499083, (5, 18): 0.4834457835574284, (16, 22): 0.15445458974553225, (6, 25): 1.632710639264993e-07, (19, 3): 0.07771718970007759, (17, 23): 0.06041045692386866, (20, 4): 0.18041468890984563, (18, 20): 0.5358557950778347, (23, 25): 0.0001634343349904258, (8, 2): 0.0014696028464024201, (9, 3): 0.021551943709361834, (14, 24): 0.0026125002938879155, (10, 24): 0.0004899764628434243, (11, 21): 0.10449364418402347, (24, 11): 0.4218925924571381, (14, 2): 0.0004899764628434243, (12, 22): 0.024001009668259323, (27, 22): 1.632710639264993e-07, (25, 10): 0.050124379896499205, (13, 23): 0.008653529659168388, (26, 1): 1.632710639264993e-07, (3, 1): 1.632710639264993e-07, (27, 12): 1.632710639264993e-07, (4, 26): 1.632710639264993e-07, (5, 27): 1.632710639264993e-07, (16, 13): 0.3072763055807356, (6, 16): 0.8749697948531737, (21, 24): 0.000816518590696423, (19, 8): 0.3025414447268671, (17, 8): 0.1204942084488204, (7, 21): 0.2349472242612964, (22, 23): 0.007184090083829895, (20, 3): 0.09061560375027103, (18, 15): 0.3224605145259, (23, 22): 0.008816800723094887, (21, 2): 0.020082504134023342, (8, 9): 0.47152699589079394, (22, 9): 0.7758642590497886, (9, 4): 0.059267559476383166, (23, 12): 0.8334989446158428, (10, 3): 0.01338839051303687, (15, 26): 0.0003267053989169251, (11, 10): 0.2942146204666157, (0, 21): 1.632710639264993e-07, (24, 18): 0.0973097173712575, (14, 5): 0.01551091434408136, (12, 13): 0.8775821318759977, (1, 16): 1.632710639264993e-07, (25, 19): 0.006694276892050397, (15, 0): 1.632710639264993e-07, (13, 16): 0.8733370842139087, (2, 23): 1.632710639264993e-07, (26, 8): 0.0006532475267699237, (0, 11): 1.632710639264993e-07, (3, 6): 0.0014696028464024201, (27, 5): 1.632710639264993e-07, (1, 10): 1.632710639264993e-07, (4, 1): 0.0001634343349904258, (5, 4): 0.0290624126499808, (16, 4): 0.03200129180065779, (6, 11): 0.8181514646067519, (19, 17): 0.7490878045658427, (17, 1): 0.0006532475267699237, (7, 18): 0.7143110679494984, (20, 10): 0.4375666145940821, (18, 6): 0.17714926763131567, (16, 26): 0.0001634343349904258, (21, 11): 0.682146668355978, (17, 27): 1.632710639264993e-07, (22, 0): 1.632710639264993e-07, (9, 13): 0.3791155737083953, (23, 5): 0.1666999195400197, (10, 10): 0.20833404084127702, (8, 22): 0.10563654163150897, (11, 3): 0.007510632211682894, (9, 23): 0.020082504134023342, (24, 25): 1.632710639264993e-07, (14, 12): 0.7786398671365391, (12, 4): 0.009959698170580383, (10, 20): 0.2986229391926311, (25, 20): 0.0034288556135204117, (15, 9): 0.3097253715396331, (13, 9): 0.3843402477540433, (11, 25): 0.0003267053989169251, (26, 19): 0.000816518590696423, (0, 2): 1.632710639264993e-07, (3, 15): 0.04179755563624774, (27, 26): 1.632710639264993e-07, (1, 3): 1.632710639264993e-07, (4, 8): 0.11624916078673142, (2, 8): 1.632710639264993e-07, (5, 13): 0.7931709918259975, (3, 21): 0.0048982951888589055, (6, 2): 0.0014696028464024201, (4, 22): 0.01534764328015486, (19, 22): 0.16180178762222472, (7, 11): 0.7337403245567518, (5, 23): 0.009469884978800885, (20, 17): 0.8202739884377964, (18, 1): 0.0006532475267699237, (16, 17): 0.695861437725804, (21, 20): 0.3232768698455325, (22, 27): 1.632710639264993e-07, (18, 27): 1.632710639264993e-07, (23, 2): 0.007510632211682894, (9, 24): 0.0009797896546229223, (14, 23): 0.020082504134023342, (12, 3): 0.004081939869226409, (15, 22): 0.11673897397851092, (13, 2): 1.632710639264993e-07, (26, 26): 1.632710639264993e-07, (0, 25): 1.632710639264993e-07, (24, 6): 0.11673897397851092, (12, 25): 0.0003267053989169251, (27, 19): 1.632710639264993e-07, (1, 4): 1.632710639264993e-07, (25, 7): 0.021225401581508836, (2, 3): 1.632710639264993e-07, (26, 4): 0.0001634343349904258, (3, 26): 1.632710639264993e-07, (27, 9): 1.632710639264993e-07, (6, 5): 0.11739205823421692, (7, 0): 1.632710639264993e-07, (5, 16): 0.7237807896572354, (20, 24): 0.0035921266774469112, (16, 8): 0.13518860420220533, (19, 5): 0.21813030467686698, (17, 21): 0.338134536662844, (22, 18): 0.5072833588906973, (20, 6): 0.3686662256170994, (18, 18): 0.7485979913740632, (23, 27): 1.632710639264993e-07, (21, 7): 0.5350394397582021, (8, 4): 0.0734721420379886, (9, 1): 0.0001634343349904258, (10, 6): 0.08800326672744704, (11, 23): 0.006204463700270899, (0, 16): 1.632710639264993e-07, (24, 13): 0.41650464734756365, (14, 0): 1.632710639264993e-07, (12, 16): 0.8818271795380866, (25, 8): 0.030368581161392797, (15, 5): 0.025796991371450816, (13, 21): 0.11559607653102542, (2, 26): 1.632710639264993e-07, (26, 15): 0.001959416038181918, (3, 3): 1.632710639264993e-07, (27, 14): 1.632710639264993e-07, (4, 4): 0.0060411926363444, (5, 25): 1.632710639264993e-07, (16, 15): 0.4037695043612967, (6, 22): 0.08326840587357856, (19, 10): 0.22874292383208944, (17, 14): 0.1887415131700971, (7, 23): 0.02498063605181832, (22, 21): 0.09257485651738902, (20, 13): 0.4524242814113935, (18, 13): 0.11722878717029042, (23, 16): 0.5818982351051074, (21, 0): 1.632710639264993e-07, (8, 11): 0.4999361610140048, (22, 15): 0.8702349339993052, (9, 10): 0.29486770472232166, (23, 14): 0.7794562224561716, (10, 1): 0.0001634343349904258, (8, 17): 0.7784765960726125, (11, 12): 0.5779797295708715, (0, 23): 1.632710639264993e-07, (24, 20): 0.01959269094224384, (14, 11): 0.7048413462417614, (12, 15): 0.9247674693507559, (1, 22): 1.632710639264993e-07, (25, 17): 0.02204175690114133, (15, 2): 0.002449229229961416, (13, 14): 0.9244409272229029, (2, 21): 1.632710639264993e-07, (26, 22): 0.0001634343349904258, (0, 13): 1.632710639264993e-07, (3, 8): 0.007347361147756394, (27, 7): 1.632710639264993e-07, (1, 8): 1.632710639264993e-07, (4, 3): 0.0009797896546229223, (2, 15): 0.0004899764628434243, (5, 2): 0.0003267053989169251, (16, 6): 0.059104288412456664, (6, 9): 0.6204302061917613, (19, 19): 0.687044800273773, (17, 7): 0.11314701057212793, (7, 12): 0.7463121964790922, (20, 20): 0.42058642394572615, (18, 4): 0.10433037312009698, (21, 9): 0.6718605913286085, (17, 25): 1.632710639264993e-07, (22, 6): 0.4065451124480472, (23, 7): 0.4264641822470801, (10, 8): 0.1425358020788978, (8, 24): 0.0017961449742554187, (11, 5): 0.029715496905686797, (9, 21): 0.20131338509243754, (24, 27): 1.632710639264993e-07, (14, 18): 0.7205153683787053, (12, 6): 0.03232783392851079, (10, 18): 0.6671257304747401, (25, 26): 1.632710639264993e-07, (15, 11): 0.5188756044294787, (13, 7): 0.08979924843063854, (11, 27): 1.632710639264993e-07, (26, 17): 0.0016328739103289194, (0, 4): 1.632710639264993e-07, (24, 1): 1.632710639264993e-07, (1, 1): 1.632710639264993e-07, (4, 10): 0.2373962902201939, (2, 6): 1.632710639264993e-07, (5, 11): 0.6787179760135216, (3, 23): 1.632710639264993e-07, (6, 0): 1.632710639264993e-07, (4, 16): 0.3203379906948555, (19, 24): 0.006367734764197399, (7, 5): 0.15053608421129627, (5, 21): 0.1222901901520119, (20, 19): 0.6152055321461133, (16, 19): 0.6988003168764809, (6, 26): 1.632710639264993e-07, (21, 18): 0.677901620693889, (17, 18): 0.7436998594562683, (22, 25): 1.632710639264993e-07, (18, 25): 1.632710639264993e-07, (14, 21): 0.18253721274089013, (15, 16): 0.6810037709084925, (13, 0): 1.632710639264993e-07, (11, 16): 0.8703982050632317, (26, 24): 1.632710639264993e-07, (0, 27): 1.632710639264993e-07, (24, 8): 0.25731536001922684, (12, 27): 1.632710639264993e-07, (27, 21): 1.632710639264993e-07, (1, 26): 1.632710639264993e-07, (25, 5): 0.007020819019903396, (13, 26): 1.632710639264993e-07, (2, 1): 1.632710639264993e-07, (26, 2): 1.632710639264993e-07, (27, 11): 1.632710639264993e-07, (7, 2): 0.0016328739103289194, (20, 26): 0.0001634343349904258, (16, 10): 0.2597644259781243, (21, 27): 1.632710639264993e-07, (19, 7): 0.3033578000464996, (17, 11): 0.13355589356294034, (7, 24): 0.002122687102108417, (22, 16): 0.8042734241729995, (20, 0): 1.632710639264993e-07, (18, 16): 0.5066302746349912, (23, 21): 0.032980918184216786, (21, 5): 0.29225536769949767, (8, 6): 0.22253862340288247, (22, 10): 0.8272946441866359, (9, 7): 0.20490534849882053, (10, 4): 0.035429984143114275, (15, 25): 0.0001634343349904258, (11, 9): 0.18106777316555164, (0, 18): 1.632710639264993e-07, (24, 15): 0.3142969613295751, (14, 6): 0.03918521861342376, (12, 18): 0.6049194551187439, (1, 19): 1.632710639264993e-07, (25, 14): 0.050124379896499205, (15, 7): 0.08979924843063854, (13, 19): 0.4511181128999815, (2, 24): 1.632710639264993e-07, (26, 13): 0.001959416038181918, (0, 8): 1.632710639264993e-07, (3, 5): 0.0011430607185494216, (27, 0): 1.632710639264993e-07, (4, 6): 0.036246339462746774, (5, 7): 0.214538341270484, (16, 1): 0.0003267053989169251, (6, 20): 0.3268688332519155, (19, 12): 0.17943506252628666, (17, 12): 0.13143336973189584, (7, 17): 0.816518753967487, (20, 15): 0.6702278806893436, (18, 11): 0.10890196291003895, (23, 18): 0.2857245251424377, (21, 14): 0.7931709918259975, (8, 13): 0.5159367252788017, (22, 13): 0.8895009195426321, (9, 8): 0.2520906859735788, (23, 8): 0.562142436370001, (10, 15): 0.7567615445703882, (8, 19): 0.5636118759453396, (11, 14): 0.8191310909903109, (9, 18): 0.697167606237216, (24, 22): 0.002122687102108417, (14, 9): 0.3856464162654553, (12, 9): 0.2850714408867317, (1, 20): 1.632710639264993e-07, (25, 23): 1.632710639264993e-07, (15, 12): 0.5518563593426316, (13, 12): 0.8592957727162297, (2, 19): 0.0001634343349904258, (26, 20): 0.0004899764628434243, (0, 15): 1.632710639264993e-07, (3, 10): 0.01959269094224384, (27, 25): 1.632710639264993e-07, (1, 14): 1.632710639264993e-07, (4, 13): 0.3683396834892464, (2, 13): 0.0003267053989169251, (5, 0): 1.632710639264993e-07, (3, 16): 0.03591979733489377, (6, 15): 0.9108894289170035, (19, 21): 0.3076028477085886, (17, 5): 0.08457457438499055, (7, 14): 0.797579310552013, (20, 22): 0.12490252717483588, (18, 2): 0.014041474768742866, (21, 23): 0.018613064558684847, (22, 4): 0.1425358020788978, (23, 1): 1.632710639264993e-07, (8, 26): 1.632710639264993e-07, (11, 7): 0.06759438373663464, (9, 27): 1.632710639264993e-07, (14, 16): 0.812600248433251, (12, 0): 1.632710639264993e-07, (10, 16): 0.8266415599309299, (25, 24): 1.632710639264993e-07, (15, 21): 0.2609073234256098, (13, 5): 0.013551661576963369, (0, 6): 1.632710639264993e-07, (24, 3): 0.013714932640889868, (1, 7): 1.632710639264993e-07, (25, 2): 0.0003267053989169251, (2, 4): 1.632710639264993e-07, (5, 9): 0.45830203971274747, (3, 25): 1.632710639264993e-07, (6, 6): 0.213885257014778, (4, 18): 0.1907007659372151, (19, 26): 1.632710639264993e-07, (7, 7): 0.37062547838421733, (5, 19): 0.3415632290053005, (16, 21): 0.31282752175423656, (6, 24): 0.002122687102108417, (21, 16): 0.8606019412276418, (19, 0): 1.632710639264993e-07, (17, 16): 0.46695540610085196, (18, 23): 0.06367587820239864, (8, 1): 0.0003267053989169251, (14, 27): 1.632710639264993e-07, (10, 27): 1.632710639264993e-07, (15, 18): 0.7588840684014326, (11, 18): 0.6243487117259973, (24, 10): 0.3848300609458228, (12, 21): 0.08506438757677005, (27, 23): 1.632710639264993e-07, (1, 24): 1.632710639264993e-07, (25, 11): 0.057961390964971174, (13, 24): 0.0013063317824759208, (26, 0): 1.632710639264993e-07, (27, 13): 1.632710639264993e-07, (4, 25): 1.632710639264993e-07, (16, 12): 0.3051537817496911, (6, 19): 0.4819763439820899, (21, 25): 1.632710639264993e-07, (19, 9): 0.27282611109224425, (17, 9): 0.1255556114305419, (7, 26): 1.632710639264993e-07, (22, 22): 0.03167474967280479, (20, 2): 0.02220502796506783, (18, 14): 0.18563936295549363, (23, 23): 0.001959416038181918, (21, 3): 0.08424803225713756, (8, 8): 0.4000142698909872, (22, 8): 0.6790445181413746, (9, 5): 0.10661616801506796, (23, 13): 0.8235394097163264, (10, 2): 0.0013063317824759208, (15, 27): 1.632710639264993e-07, (11, 11): 0.4333215669319931, (0, 20): 1.632710639264993e-07, (24, 17): 0.15739346889620925, (14, 4): 0.007837174339535893, (12, 12): 0.784517625437893, (1, 17): 1.632710639264993e-07, (25, 12): 0.06041045692386866, (15, 1): 0.0001634343349904258, (13, 17): 0.7997018343830575, (2, 22): 1.632710639264993e-07, (26, 11): 0.0017961449742554187, (0, 10): 1.632710639264993e-07, (3, 7): 0.002939042421740914, (27, 2): 1.632710639264993e-07, (1, 11): 1.632710639264993e-07, (4, 0): 1.632710639264993e-07, (5, 5): 0.06645148628914914, (16, 3): 0.01795998030297885, (6, 10): 0.7348832220042373, (19, 14): 0.3100519136674861, (17, 2): 0.007184090083829895, (7, 19): 0.5520196304065581, (20, 9): 0.466302321845146, (18, 9): 0.15167898165878177, (16, 25): 0.0001634343349904258, (21, 12): 0.6943919981504655, (8, 15): 0.6839426500591695, (22, 3): 0.06155335437135415, (9, 14): 0.5054873771875058, (23, 10): 0.7694966875566551, (10, 13): 0.4751189592971769, (8, 21): 0.23576357958092892, (11, 0): 1.632710639264993e-07, (9, 16): 0.7575778998900207, (24, 24): 1.632710639264993e-07, (14, 15): 0.801497816086249, (12, 11): 0.6476964738674866, (10, 23): 0.012245493065551375, (25, 21): 0.000816518590696423, (15, 14): 0.568999821054914, (13, 10): 0.5889188908539469, (2, 17): 0.0003267053989169251, (26, 18): 0.0011430607185494216, (0, 1): 1.632710639264993e-07, (3, 12): 0.034940170951334774, (27, 27): 1.632710639264993e-07, (1, 12): 1.632710639264993e-07, (4, 15): 0.3605026724207744, (2, 11): 0.0003267053989169251, (5, 14): 0.8023141714058815, (3, 18): 0.023347925412553327, (6, 13): 0.8955419489079126, (4, 21): 0.04049138712483575, (19, 23): 0.05404288543073519, (7, 8): 0.4965074686715483, (20, 16): 0.7745580905383767, (18, 0): 1.632710639264993e-07, (16, 16): 0.5448357035937921, (21, 21): 0.17780235188702165, (22, 26): 0.0001634343349904258, (18, 26): 1.632710639264993e-07, (23, 3): 0.03657288159059977, (9, 25): 0.0003267053989169251, (14, 22): 0.0769008343804451, (12, 2): 0.0004899764628434243, (15, 23): 0.04081792925268875, (13, 3): 0.002122687102108417, (0, 24): 1.632710639264993e-07, (24, 5): 0.06530858884166364, (12, 24): 0.000816518590696423, (27, 16): 1.632710639264993e-07, (1, 5): 1.632710639264993e-07, (25, 0): 1.632710639264993e-07, (2, 2): 1.632710639264993e-07, (26, 7): 0.0004899764628434243, (3, 27): 1.632710639264993e-07, (6, 4): 0.05289998798324969, (7, 1): 0.000816518590696423, (5, 17): 0.6222261878949528, (16, 23): 0.05355307223895569, (19, 2): 0.019266148814390843, (17, 22): 0.17763908082309515, (20, 5): 0.2759282613068477, (18, 21): 0.33650182602357903, (23, 24): 0.0001634343349904258, (8, 3): 0.02514390711574482, (9, 2): 0.0013063317824759208, (14, 25): 0.0001634343349904258, (10, 25): 0.0003267053989169251, (11, 20): 0.23364105574988442, (24, 12): 0.4300561456534631, (14, 3): 0.0017961449742554187, (12, 23): 0.005551379444564902, (25, 9): 0.04098120031661525, (13, 22): 0.03836886329379126, (26, 14): 0.002122687102108417, (3, 0): 1.632710639264993e-07, (27, 15): 1.632710639264993e-07, (4, 27): 1.632710639264993e-07, (5, 26): 1.632710639264993e-07, (16, 14): 0.3281750017633275, (6, 17): 0.7938240760817036, (19, 11): 0.1911905791289946, (17, 15): 0.30205163153508763, (7, 20): 0.3827075371147783, (22, 20): 0.19151712125684758, (20, 12): 0.4062185703201942, (18, 12): 0.10155476503334648, (23, 17): 0.43675025927444955, (21, 1): 0.000816518590696423, (8, 10): 0.4996096188861518, (22, 14): 0.8939092382686475, (9, 11): 0.29699022855336615, (23, 15): 0.7054944304974674, (10, 0): 1.632710639264993e-07, (8, 16): 0.7572513577621677, (11, 13): 0.7125150862463069, (0, 22): 1.632710639264993e-07, (24, 19): 0.04898148244901371, (14, 10): 0.5685100078631345, (12, 14): 0.918889711049402, (1, 23): 1.632710639264993e-07, (25, 18): 0.012735306257330872, (15, 3): 0.007347361147756394, (13, 15): 0.912032326364489, (2, 20): 0.0001634343349904258, (26, 9): 0.000816518590696423, (0, 12): 1.632710639264993e-07, (3, 9): 0.012898577321257371, (27, 4): 1.632710639264993e-07, (1, 9): 1.632710639264993e-07, (4, 2): 0.0001634343349904258, (2, 14): 0.0004899764628434243, (5, 3): 0.006367734764197399, (16, 5): 0.04620587436226322, (6, 8): 0.48262942823779587, (19, 16): 0.6227160010867323, (17, 0): 1.632710639264993e-07, (7, 13): 0.7634556581913746, (20, 11): 0.40327969116951723, (18, 7): 0.18498627869978762, (16, 27): 1.632710639264993e-07, (21, 10): 0.6800241445249335, (17, 26): 1.632710639264993e-07, (22, 1): 0.0004899764628434243, (9, 12): 0.3222972434619735, (23, 4): 0.08620728502425555, (10, 11): 0.26433601576806626, (8, 23): 0.026123533499303814, (11, 2): 0.0009797896546229223, (9, 22): 0.08653382715210856, (24, 26): 1.632710639264993e-07, (14, 13): 0.800844731830543, (12, 5): 0.01812325136690535, (10, 21): 0.15184225272270827, (25, 27): 1.632710639264993e-07, (15, 8): 0.18155758635733113, (13, 8): 0.20229301147599654, (11, 24): 0.0004899764628434243, (26, 16): 0.001959416038181918, (0, 3): 1.632710639264993e-07, (24, 0): 1.632710639264993e-07, (3, 14): 0.04245063989195374, (1, 2): 1.632710639264993e-07, (4, 9): 0.17290421996922667, (2, 9): 1.632710639264993e-07, (5, 12): 0.7502307020133282, (3, 20): 0.009306613914874386, (6, 3): 0.014368016896595866, (4, 23): 0.0026125002938879155, (19, 25): 0.0001634343349904258, (7, 10): 0.6891673241048175, (5, 22): 0.04865494032116071, (20, 18): 0.7703130428762877, (16, 18): 0.7601902369128447, (21, 19): 0.49324204739301836, (17, 19): 0.7125150862463069, (22, 24): 0.0003267053989169251, (18, 24): 0.008653529659168388, (14, 20): 0.35854341965365644, (15, 17): 0.7583942552096532, (13, 1): 1.632710639264993e-07, (11, 17): 0.785497251821452, (26, 27): 1.632710639264993e-07, (0, 26): 1.632710639264993e-07, (24, 7): 0.18106777316555164, (12, 26): 1.632710639264993e-07, (27, 18): 1.632710639264993e-07, (1, 27): 1.632710639264993e-07, (25, 6): 0.01306184838518387, (13, 27): 1.632710639264993e-07, (2, 0): 1.632710639264993e-07, (26, 5): 0.0003267053989169251, (27, 8): 1.632710639264993e-07}, 4: {(7, 3): 0.0037719552104106724, (20, 25): 0.00034306087796153606, (16, 9): 0.7932748752568243, (19, 4): 0.01251563575815597, (17, 20): 0.25853880411138147, (7, 25): 0.021773650455768637, (22, 19): 0.19236114349511316, (20, 7): 0.029317217987156734, (18, 19): 0.2897417425366686, (23, 26): 0.00017161616133907923, (21, 6): 0.007200849542859808, (8, 5): 0.046461689649402414, (9, 0): 1.714447166224568e-07, (10, 7): 0.3046574328828224, (11, 22): 0.11349657384878302, (0, 17): 1.714447166224568e-07, (24, 14): 0.28614140348759703, (14, 1): 0.00017161616133907923, (12, 17): 0.8644244326551438, (25, 15): 0.0889799793717717, (15, 4): 0.06617783206098495, (13, 20): 0.425354513385032, (2, 27): 1.714447166224568e-07, (26, 12): 0.003086176343920845, (3, 2): 1.714447166224568e-07, (27, 1): 1.714447166224568e-07, (4, 5): 0.0032576210605433017, (5, 24): 0.061720269428801065, (16, 0): 1.714447166224568e-07, (6, 23): 0.16544432298538744, (19, 13): 0.4603292355760132, (17, 13): 0.6994946152643404, (7, 22): 0.2683111529588615, (20, 14): 0.5534237167020072, (18, 10): 0.30808632721527146, (23, 19): 0.18276023936425556, (21, 15): 0.5314787929743328, (8, 12): 0.4867317219358715, (22, 12): 0.3530048429703552, (9, 9): 0.5410796971051903, (23, 9): 0.10509578273428263, (10, 14): 0.19990471102650126, (8, 18): 0.6247447188169493, (11, 15): 0.2904275214031584, (9, 19): 0.6818358094522273, (24, 21): 0.04303279531695328, (14, 8): 0.8450511796768062, (12, 8): 0.7162961974933412, (1, 21): 1.714447166224568e-07, (25, 16): 0.07492151260873024, (15, 13): 0.8284210421644279, (13, 13): 0.36637753086690683, (2, 18): 0.00017161616133907923, (26, 23): 0.00017161616133907923, (0, 14): 1.714447166224568e-07, (3, 11): 0.003086176343920845, (27, 6): 1.714447166224568e-07, (1, 15): 1.714447166224568e-07, (4, 12): 0.032574667602983415, (2, 12): 0.00017161616133907923, (5, 1): 1.714447166224568e-07, (3, 17): 0.00651507067636998, (16, 7): 0.5890842177594782, (6, 14): 0.19956182159325633, (19, 18): 0.371692317082203, (17, 6): 0.30002842553401604, (7, 15): 0.23436509906761507, (20, 21): 0.05606259378025999, (18, 5): 0.09360898672057803, (21, 8): 0.038060898534902035, (17, 24): 0.019716313856299156, (22, 7): 0.02040209272278898, (23, 6): 0.007372294259482264, (10, 9): 0.6463467531113788, (8, 25): 0.015087306507492822, (11, 4): 0.03017444157026902, (9, 20): 0.526678340908904, (14, 19): 0.6746351313540841, (12, 7): 0.5028475252983825, (10, 19): 0.697094389231626, (25, 25): 1.714447166224568e-07, (15, 10): 0.8868836905326857, (13, 6): 0.3614056340848556, (11, 26): 0.00034306087796153606, (0, 5): 1.714447166224568e-07, (24, 2): 1.714447166224568e-07, (1, 0): 1.714447166224568e-07, (25, 3): 1.714447166224568e-07, (4, 11): 0.02811710497079954, (2, 7): 1.714447166224568e-07, (5, 10): 0.1373273894593045, (3, 22): 0.007200849542859808, (6, 1): 1.714447166224568e-07, (4, 17): 0.062406048295290895, (19, 27): 1.714447166224568e-07, (7, 4): 0.01611597480722756, (5, 20): 0.2647108139097899, (16, 20): 0.3783786610304788, (6, 27): 1.714447166224568e-07, (21, 17): 0.41575360925417437, (19, 1): 1.714447166224568e-07, (17, 17): 0.7975609931723857, (18, 22): 0.045090131916422756, (8, 0): 1.714447166224568e-07, (14, 26): 0.0008573950278289066, (10, 26): 0.00034306087796153606, (15, 19): 0.67154912645488, (11, 19): 0.6761781338036863, (26, 25): 1.714447166224568e-07, (24, 9): 0.08057918825727131, (12, 20): 0.41746805642039897, (27, 20): 1.714447166224568e-07, (1, 25): 1.714447166224568e-07, (25, 4): 0.00017161616133907923, (13, 25): 0.00497206822676787, (26, 3): 1.714447166224568e-07, (27, 10): 1.714447166224568e-07, (4, 24): 0.01611597480722756, (20, 27): 1.714447166224568e-07, (16, 11): 0.819848806333305, (6, 18): 0.43289808091642007, (21, 26): 1.714447166224568e-07, (19, 6): 0.06480627432800529, (17, 10): 0.5861696575768964, (7, 27): 1.714447166224568e-07, (22, 17): 0.38146466592968303, (20, 1): 1.714447166224568e-07, (18, 17): 0.6496042027272054, (23, 20): 0.11606824459811987, (21, 4): 0.0013717291776962767, (8, 7): 0.187046357279817, (22, 11): 0.26505370334303485, (9, 6): 0.12412614627937534, (10, 5): 0.06943528167681162, (15, 24): 0.04354712946682065, (11, 8): 0.6034855739557646, (0, 19): 1.714447166224568e-07, (24, 16): 0.278254946522964, (14, 7): 0.6689774557055431, (12, 19): 0.6581764385583283, (1, 18): 1.714447166224568e-07, (25, 13): 0.07783607279131201, (15, 6): 0.45724323067680894, (13, 18): 0.8527661919248167, (2, 25): 1.714447166224568e-07, (26, 10): 0.002228952760808561, (0, 9): 1.714447166224568e-07, (3, 4): 1.714447166224568e-07, (27, 3): 1.714447166224568e-07, (4, 7): 0.006686515392992437, (5, 6): 0.027431326104309713, (16, 2): 0.0010288397444513633, (6, 21): 0.33963215507380357, (19, 15): 0.6729206841878596, (17, 3): 0.007715183692727178, (7, 16): 0.30894355079838376, (20, 8): 0.042689905883708364, (18, 8): 0.2631678114601878, (16, 24): 0.031545999303248674, (21, 13): 0.44387054278025734, (8, 14): 0.27053993427495343, (22, 2): 1.714447166224568e-07, (9, 15): 0.24242300074887052, (23, 11): 0.25545279921217723, (10, 12): 0.494789623617127, (8, 20): 0.5222207782767201, (11, 1): 0.00017161616133907923, (9, 17): 0.6027997950892747, (24, 23): 0.007715183692727178, (14, 14): 0.6739493524875944, (12, 10): 0.7739016222784867, (10, 22): 0.1572149765875095, (25, 22): 0.003943399927033129, (15, 15): 0.9165436265083707, (13, 11): 0.6343456229478068, (2, 16): 0.0006859503112064497, (26, 21): 1.714447166224568e-07, (0, 0): 1.714447166224568e-07, (3, 13): 0.0036005104937882155, (27, 24): 1.714447166224568e-07, (1, 13): 0.00017161616133907923, (4, 14): 0.027602770820932168, (2, 10): 1.714447166224568e-07, (5, 15): 0.12344036741288551, (3, 19): 0.009772520292196659, (6, 12): 0.2941993051688525, (4, 20): 0.0877798663554145, (19, 20): 0.12069725194692621, (17, 4): 0.04903336039873926, (7, 9): 0.3314028086759256, (20, 23): 0.010458299158686487, (18, 3): 0.004286289360278043, (21, 22): 0.031545999303248674, (22, 5): 0.002057508044186104, (23, 0): 1.714447166224568e-07, (8, 27): 1.714447166224568e-07, (11, 6): 0.20933417044073638, (9, 26): 0.0006859503112064497, (14, 17): 0.942946112868229, (12, 1): 0.00017161616133907923, (10, 17): 0.6948656079155341, (15, 20): 0.45570022822720685, (13, 4): 0.04766180266575961, (0, 7): 1.714447166224568e-07, (24, 4): 1.714447166224568e-07, (27, 17): 1.714447166224568e-07, (1, 6): 1.714447166224568e-07, (25, 1): 1.714447166224568e-07, (2, 5): 1.714447166224568e-07, (26, 6): 0.00017161616133907923, (5, 8): 0.07337851015912814, (3, 24): 0.0017146186109411904, (6, 7): 0.09549487860342505, (4, 19): 0.08315085900660817, (7, 6): 0.0762930703417099, (5, 18): 0.27105426842482083, (16, 22): 0.13681305530943713, (6, 25): 0.021945095172391092, (19, 3): 0.0027432869106759315, (17, 23): 0.04629024493277995, (20, 4): 0.00497206822676787, (18, 20): 0.15790075545399934, (23, 25): 0.0008573950278289066, (8, 2): 0.00017161616133907923, (9, 3): 0.0053149576600127835, (14, 24): 0.04166123758397362, (10, 24): 0.015944530090605105, (11, 21): 0.24208011131562562, (24, 11): 0.18464613124710258, (14, 2): 0.0005145055945839929, (12, 22): 0.10543867216752754, (27, 22): 1.714447166224568e-07, (25, 10): 0.03960390098450414, (13, 23): 0.0651491637612502, (26, 1): 1.714447166224568e-07, (3, 1): 1.714447166224568e-07, (27, 12): 1.714447166224568e-07, (4, 26): 0.00017161616133907923, (5, 27): 1.714447166224568e-07, (16, 13): 0.8417937300609796, (6, 16): 0.25510990977893233, (21, 24): 0.004114844643655586, (19, 8): 0.10149544368521105, (17, 8): 0.5097053139632807, (7, 21): 0.3821504447961729, (22, 23): 0.013372859341268254, (20, 3): 0.0010288397444513633, (18, 15): 0.7677296124800782, (23, 22): 0.03068877572013639, (21, 2): 0.00017161616133907923, (8, 9): 0.4323837467665527, (22, 9): 0.09498054445355769, (9, 4): 0.022287984605636007, (23, 12): 0.3223162386949354, (10, 3): 0.00651507067636998, (15, 26): 0.0006859503112064497, (11, 10): 0.7598431555154452, (0, 21): 1.714447166224568e-07, (24, 18): 0.18207446049776574, (14, 5): 0.21276306477318552, (12, 13): 0.2317934283182782, (1, 16): 1.714447166224568e-07, (25, 19): 0.026231213087952513, (15, 0): 1.714447166224568e-07, (13, 16): 0.8282495974478055, (2, 23): 1.714447166224568e-07, (26, 8): 0.0005145055945839929, (0, 11): 1.714447166224568e-07, (3, 6): 0.00017161616133907923, (27, 5): 1.714447166224568e-07, (1, 10): 1.714447166224568e-07, (4, 1): 1.714447166224568e-07, (5, 4): 0.0053149576600127835, (16, 4): 0.06737794507734214, (6, 11): 0.31665856304639434, (19, 17): 0.532336016557445, (17, 1): 1.714447166224568e-07, (7, 18): 0.5390223605057208, (20, 10): 0.11966858364719146, (18, 6): 0.16458709940227514, (16, 26): 0.0006859503112064497, (21, 11): 0.24070855358264595, (17, 27): 0.00017161616133907923, (22, 0): 1.714447166224568e-07, (9, 13): 0.3756355455645195, (23, 5): 0.0013717291776962767, (10, 10): 0.7166390869265861, (8, 22): 0.24362311376522772, (11, 3): 0.008229517842594548, (9, 23): 0.1004667753854763, (24, 25): 0.00034306087796153606, (14, 12): 0.647546866127736, (12, 4): 0.03823234325152449, (10, 20): 0.4935895106007698, (25, 20): 0.014915861790870365, (15, 9): 0.8882552482656654, (13, 9): 0.8712822213200421, (11, 25): 0.0024003974774310178, (26, 19): 0.0010288397444513633, (0, 2): 1.714447166224568e-07, (3, 15): 0.006343625959747524, (27, 26): 1.714447166224568e-07, (1, 3): 1.714447166224568e-07, (4, 8): 0.012172746324911056, (2, 8): 1.714447166224568e-07, (5, 13): 0.13389849512685537, (3, 21): 0.009258186142329289, (6, 2): 1.714447166224568e-07, (4, 22): 0.05657692793012736, (19, 22): 0.02914577327053428, (7, 11): 0.4467851029628391, (5, 23): 0.11881136006407918, (20, 17): 0.4635866851918398, (18, 1): 1.714447166224568e-07, (16, 17): 0.9103716167099623, (21, 20): 0.11761124704772198, (22, 27): 1.714447166224568e-07, (18, 27): 0.00017161616133907923, (23, 2): 1.714447166224568e-07, (9, 24): 0.033946225335963066, (14, 23): 0.08863708993852679, (12, 3): 0.008400962559217006, (15, 22): 0.1687017726012141, (13, 2): 0.00034306087796153606, (26, 26): 1.714447166224568e-07, (0, 25): 1.714447166224568e-07, (24, 6): 0.006343625959747524, (12, 25): 0.004286289360278043, (27, 19): 1.714447166224568e-07, (1, 4): 1.714447166224568e-07, (25, 7): 0.00651507067636998, (2, 3): 1.714447166224568e-07, (26, 4): 0.00017161616133907923, (3, 26): 1.714447166224568e-07, (27, 9): 1.714447166224568e-07, (6, 5): 0.026059768371330057, (7, 0): 1.714447166224568e-07, (5, 16): 0.16252976280280568, (20, 24): 0.0037719552104106724, (16, 8): 0.7250398780410865, (19, 5): 0.03548922778556518, (17, 21): 0.15412897168830528, (22, 18): 0.2854556246211072, (20, 6): 0.02108787158927881, (18, 18): 0.4692443608403809, (23, 27): 1.714447166224568e-07, (21, 7): 0.017487532540207216, (8, 4): 0.021259316305901266, (9, 1): 1.714447166224568e-07, (10, 6): 0.15858653432048916, (11, 23): 0.03891812211801432, (0, 16): 1.714447166224568e-07, (24, 13): 0.2647108139097899, (14, 0): 1.714447166224568e-07, (12, 16): 0.7060095144959938, (25, 8): 0.015944530090605105, (15, 5): 0.2417372218823807, (13, 21): 0.2508237918633709, (2, 26): 1.714447166224568e-07, (26, 15): 0.0027432869106759315, (3, 3): 1.714447166224568e-07, (27, 14): 1.714447166224568e-07, (4, 4): 0.0015431738943187335, (5, 25): 0.013372859341268254, (16, 15): 0.9261445306392283, (6, 22): 0.25390979676257514, (19, 10): 0.15018574320598876, (17, 14): 0.7895030914911302, (7, 23): 0.16492998883552007, (22, 21): 0.0687495028103218, (20, 13): 0.4493567737121759, (18, 13): 0.5388509157890984, (23, 16): 0.4001521400415308, (21, 0): 1.714447166224568e-07, (8, 11): 0.5508520459526703, (22, 15): 0.47113025272322795, (9, 10): 0.6326311757815822, (23, 14): 0.40323814494073507, (10, 1): 1.714447166224568e-07, (8, 17): 0.514677210745332, (11, 12): 0.43649841996549166, (0, 23): 1.714447166224568e-07, (24, 20): 0.08006485410740394, (14, 11): 0.7363552293381687, (12, 15): 0.4236400662188074, (1, 22): 1.714447166224568e-07, (25, 17): 0.05949148811270913, (15, 2): 0.0012002844610738202, (13, 14): 0.40992448888901084, (2, 21): 0.00034306087796153606, (26, 22): 0.00017161616133907923, (0, 13): 1.714447166224568e-07, (3, 8): 0.0006859503112064497, (27, 7): 1.714447166224568e-07, (1, 8): 1.714447166224568e-07, (4, 3): 1.714447166224568e-07, (2, 15): 0.0008573950278289066, (5, 2): 1.714447166224568e-07, (16, 6): 0.41369627265470493, (6, 9): 0.2239069713536452, (19, 19): 0.22133530060430834, (17, 7): 0.4188396141533786, (7, 12): 0.4090672653058986, (20, 20): 0.11229646083242582, (18, 4): 0.029488662703779193, (21, 9): 0.07543584675859762, (17, 25): 0.003943399927033129, (22, 6): 0.007543738976104721, (23, 7): 0.022802318755503377, (10, 8): 0.48896050325196344, (8, 24): 0.058977153962841755, (11, 5): 0.08880853465514923, (9, 21): 0.34477549657247725, (24, 27): 1.714447166224568e-07, (14, 18): 0.8635672090720315, (12, 6): 0.27722627822322926, (10, 18): 0.7843597499924565, (25, 26): 1.714447166224568e-07, (15, 11): 0.849851631742235, (13, 7): 0.5981707877404684, (11, 27): 1.714447166224568e-07, (26, 17): 0.0015431738943187335, (0, 4): 1.714447166224568e-07, (24, 1): 1.714447166224568e-07, (1, 1): 1.714447166224568e-07, (4, 10): 0.02263087403888092, (2, 6): 1.714447166224568e-07, (5, 11): 0.15224307980545826, (3, 23): 0.0037719552104106724, (6, 0): 1.714447166224568e-07, (4, 16): 0.04354712946682065, (19, 24): 0.003086176343920845, (7, 5): 0.03686078551854483, (5, 21): 0.230593315301921, (20, 19): 0.1957900378275623, (16, 19): 0.5834265421109371, (6, 26): 0.002057508044186104, (21, 18): 0.3032858751498427, (17, 18): 0.6264591659831737, (22, 25): 0.0006859503112064497, (18, 25): 0.001886063327563647, (14, 21): 0.2857985140543521, (15, 16): 0.9602620292470971, (13, 0): 1.714447166224568e-07, (11, 16): 0.5763973087294164, (26, 24): 0.00017161616133907923, (0, 27): 1.714447166224568e-07, (24, 8): 0.04200412701721854, (12, 27): 1.714447166224568e-07, (27, 21): 1.714447166224568e-07, (1, 26): 1.714447166224568e-07, (25, 5): 0.0005145055945839929, (13, 26): 0.0006859503112064497, (2, 1): 1.714447166224568e-07, (26, 2): 1.714447166224568e-07, (27, 11): 1.714447166224568e-07, (7, 2): 1.714447166224568e-07, (20, 26): 1.714447166224568e-07, (16, 10): 0.8112765705021823, (21, 27): 1.714447166224568e-07, (19, 7): 0.09086587125461872, (17, 11): 0.6065715788549688, (7, 24): 0.08315085900660817, (22, 16): 0.45124266559502296, (20, 0): 1.714447166224568e-07, (18, 16): 0.7661866100304761, (23, 21): 0.0651491637612502, (21, 5): 0.004114844643655586, (8, 6): 0.09738077048627208, (22, 10): 0.17144488806717342, (9, 7): 0.2350508779341049, (10, 4): 0.02640265780457497, (15, 25): 0.008743851992461919, (11, 9): 0.7541854798669041, (0, 18): 1.714447166224568e-07, (24, 15): 0.29591375233507705, (14, 6): 0.4342696386493997, (12, 18): 0.8412793959111122, (1, 19): 1.714447166224568e-07, (25, 14): 0.08743697692216959, (15, 7): 0.674978020787329, (13, 19): 0.6616053328907775, (2, 24): 1.714447166224568e-07, (26, 13): 0.0034290657771657586, (0, 8): 1.714447166224568e-07, (3, 5): 1.714447166224568e-07, (27, 0): 1.714447166224568e-07, (4, 6): 0.00497206822676787, (5, 7): 0.04594735549953504, (16, 1): 1.714447166224568e-07, (6, 20): 0.41403916208794983, (19, 12): 0.3303741403761909, (17, 12): 0.6369172936971437, (7, 17): 0.4387272012815836, (20, 15): 0.593541780391662, (18, 11): 0.3500902827877734, (23, 18): 0.2619676984438306, (21, 14): 0.508676645663546, (8, 13): 0.3809503317798157, (22, 13): 0.4217541743359604, (9, 8): 0.3871223415782241, (23, 8): 0.051605031148076116, (10, 15): 0.23779399340006419, (8, 19): 0.627659278999531, (11, 14): 0.1656157677020099, (9, 18): 0.7138959714606268, (24, 22): 0.02005920328954407, (14, 9): 0.8959702605136759, (12, 9): 0.8273923738646932, (1, 20): 1.714447166224568e-07, (25, 23): 0.002228952760808561, (15, 12): 0.8268780397148258, (13, 12): 0.4534714469111149, (2, 19): 0.00034306087796153606, (26, 20): 0.00034306087796153606, (0, 15): 1.714447166224568e-07, (3, 10): 0.002057508044186104, (27, 25): 1.714447166224568e-07, (1, 14): 0.00017161616133907923, (4, 13): 0.02966010742040165, (2, 13): 0.0006859503112064497, (5, 0): 1.714447166224568e-07, (3, 16): 0.005829291809880153, (6, 15): 0.1925325882117356, (19, 21): 0.062063158862045976, (17, 5): 0.1659586571352548, (7, 14): 0.2549384650623099, (20, 22): 0.027774215537554624, (18, 2): 0.0006859503112064497, (21, 23): 0.011829856891666142, (22, 4): 1.714447166224568e-07, (23, 1): 1.714447166224568e-07, (8, 26): 0.002228952760808561, (11, 7): 0.39620891155921434, (9, 27): 1.714447166224568e-07, (14, 16): 0.9144862899089012, (12, 0): 1.714447166224568e-07, (10, 16): 0.46907291612375845, (25, 24): 0.00034306087796153606, (15, 21): 0.2875129612205767, (13, 5): 0.16372987581916287, (0, 6): 1.714447166224568e-07, (24, 3): 1.714447166224568e-07, (1, 7): 1.714447166224568e-07, (25, 2): 1.714447166224568e-07, (2, 4): 1.714447166224568e-07, (5, 9): 0.1068102299005072, (3, 25): 0.00017161616133907923, (6, 6): 0.05263369944781086, (4, 18): 0.07526440204197515, (19, 26): 0.00017161616133907923, (7, 7): 0.14229928624135577, (5, 19): 0.28288395387177034, (16, 21): 0.23710821453357436, (6, 24): 0.08606541918918993, (21, 16): 0.5074765326471888, (19, 0): 1.714447166224568e-07, (17, 16): 0.8865408010994408, (18, 23): 0.020230648006166526, (8, 1): 1.714447166224568e-07, (14, 27): 1.714447166224568e-07, (10, 27): 1.714447166224568e-07, (15, 18): 0.8579095334234905, (11, 18): 0.8272209291480707, (24, 10): 0.13046960079440625, (12, 21): 0.2230497477705329, (27, 23): 1.714447166224568e-07, (1, 24): 1.714447166224568e-07, (25, 11): 0.05297658888105577, (13, 24): 0.027945660254177083, (26, 0): 1.714447166224568e-07, (27, 13): 1.714447166224568e-07, (4, 25): 0.0034290657771657586, (16, 12): 0.8179629144504581, (6, 19): 0.4472994371127064, (21, 25): 0.0006859503112064497, (19, 9): 0.1182970259142118, (17, 9): 0.5602815053669055, (7, 26): 0.0032576210605433017, (22, 22): 0.033603335902718155, (20, 2): 0.00017161616133907923, (18, 14): 0.6706919028717676, (23, 23): 0.012001301608288599, (21, 3): 0.0005145055945839929, (8, 8): 0.3039716540163325, (22, 8): 0.046804579082647325, (9, 5): 0.055891149063637534, (23, 13): 0.37580699028114195, (10, 2): 0.0005145055945839929, (15, 27): 0.00017161616133907923, (11, 11): 0.6331455099314496, (0, 20): 1.714447166224568e-07, (24, 17): 0.236593880383707, (14, 4): 0.06052015641244387, (12, 12): 0.40066647419139817, (1, 17): 1.714447166224568e-07, (25, 12): 0.06309182716178072, (15, 1): 0.00017161616133907923, (13, 17): 0.9167150712249932, (2, 22): 0.00017161616133907923, (26, 11): 0.0032576210605433017, (0, 10): 1.714447166224568e-07, (3, 7): 0.00034306087796153606, (27, 2): 1.714447166224568e-07, (1, 11): 1.714447166224568e-07, (4, 0): 1.714447166224568e-07, (5, 5): 0.012687080474778427, (16, 3): 0.012687080474778427, (6, 10): 0.28168384085541315, (19, 14): 0.5925131120919274, (17, 2): 0.0005145055945839929, (7, 19): 0.5510234906692928, (20, 9): 0.06652072149422986, (18, 9): 0.2875129612205767, (16, 25): 0.006686515392992437, (21, 12): 0.34717572260519164, (8, 15): 0.2456804503646972, (22, 3): 1.714447166224568e-07, (9, 14): 0.2480806763974116, (23, 10): 0.17487378239962256, (10, 13): 0.3192302337957312, (8, 21): 0.37940732933021354, (11, 0): 1.714447166224568e-07, (9, 16): 0.393465796093255, (24, 24): 0.001886063327563647, (14, 15): 0.8064761184367535, (12, 11): 0.6112005862037752, (10, 23): 0.05932004339608667, (25, 21): 0.00651507067636998, (15, 14): 0.8702535530203074, (13, 10): 0.7973895484557633, (2, 17): 0.0006859503112064497, (26, 18): 0.0012002844610738202, (0, 1): 1.714447166224568e-07, (3, 12): 0.0027432869106759315, (27, 27): 1.714447166224568e-07, (1, 12): 1.714447166224568e-07, (4, 15): 0.033603335902718155, (2, 11): 1.714447166224568e-07, (5, 14): 0.11778269176434444, (3, 18): 0.007715183692727178, (6, 13): 0.24910934469714635, (4, 21): 0.07732173864144463, (19, 23): 0.011315522741798771, (7, 8): 0.230593315301921, (20, 16): 0.5686822964814059, (18, 0): 1.714447166224568e-07, (16, 16): 0.9518612381325968, (21, 21): 0.06309182716178072, (22, 26): 0.00017161616133907923, (18, 26): 0.0006859503112064497, (23, 3): 1.714447166224568e-07, (9, 25): 0.009429630858951746, (14, 22): 0.1644156546856527, (12, 2): 0.0006859503112064497, (15, 23): 0.09583776803666996, (13, 3): 0.010458299158686487, (0, 24): 1.714447166224568e-07, (24, 5): 0.0017146186109411904, (12, 24): 0.016630308957094934, (27, 16): 1.714447166224568e-07, (1, 5): 1.714447166224568e-07, (25, 0): 1.714447166224568e-07, (2, 2): 1.714447166224568e-07, (26, 7): 0.00034306087796153606, (3, 27): 1.714447166224568e-07, (6, 4): 0.010629743875308944, (7, 1): 1.714447166224568e-07, (5, 17): 0.21979229815470624, (16, 23): 0.07320706544250567, (19, 2): 0.00017161616133907923, (17, 22): 0.08760842163879204, (20, 5): 0.013715748774513167, (18, 21): 0.08760842163879204, (23, 24): 0.004629178793522956, (8, 3): 0.005143512943390327, (9, 2): 0.00017161616133907923, (14, 25): 0.007886628409349635, (10, 25): 0.004286289360278043, (11, 20): 0.45209988917813526, (24, 12): 0.22973609171880874, (14, 3): 0.01354430405789071, (12, 23): 0.041832682300596086, (25, 9): 0.027774215537554624, (13, 22): 0.13201260324400835, (26, 14): 0.0034290657771657586, (3, 0): 1.714447166224568e-07, (27, 15): 1.714447166224568e-07, (4, 27): 1.714447166224568e-07, (5, 26): 0.0010288397444513633, (16, 14): 0.8812260148841446, (6, 17): 0.3519761746706205, (19, 11): 0.2211638558876859, (17, 15): 0.8659674351047459, (7, 20): 0.4903320609849431, (22, 20): 0.12241169911315078, (20, 12): 0.32608802246062946, (18, 12): 0.42621173696814424, (23, 17): 0.34117515752340566, (21, 1): 1.714447166224568e-07, (8, 10): 0.5287356775083735, (22, 14): 0.46084356972588053, (9, 11): 0.6300595050322454, (23, 15): 0.41489638567106213, (10, 0): 1.714447166224568e-07, (8, 16): 0.34803294618830394, (11, 13): 0.2521953495963506, (0, 22): 1.714447166224568e-07, (24, 19): 0.1275550406118245, (14, 10): 0.8459084032599185, (12, 14): 0.22013518758795114, (1, 23): 1.714447166224568e-07, (25, 18): 0.03943245626788169, (15, 3): 0.014230082924380537, (13, 15): 0.6213158244845001, (2, 20): 0.0005145055945839929, (26, 9): 0.0015431738943187335, (0, 12): 1.714447166224568e-07, (3, 9): 0.0013717291776962767, (27, 4): 1.714447166224568e-07, (1, 9): 1.714447166224568e-07, (4, 2): 1.714447166224568e-07, (2, 14): 0.0006859503112064497, (5, 3): 0.0008573950278289066, (16, 5): 0.2242498607868901, (6, 8): 0.1505286326392337, (19, 16): 0.6550904336591241, (17, 0): 1.714447166224568e-07, (7, 13): 0.3392892656405586, (20, 11): 0.21139150704020584, (18, 7): 0.22373552663702273, (16, 27): 0.00017161616133907923, (21, 10): 0.1429850651078456, (17, 26): 0.0006859503112064497, (22, 1): 1.714447166224568e-07, (9, 12): 0.5189633286608933, (23, 4): 1.714447166224568e-07, (10, 11): 0.6559476572422364, (8, 23): 0.13835605775903925, (11, 2): 0.0006859503112064497, (9, 22): 0.20299071592570547, (24, 26): 1.714447166224568e-07, (14, 13): 0.6149723699694692, (12, 5): 0.11932569421394655, (10, 21): 0.29385641573560756, (25, 27): 1.714447166224568e-07, (15, 8): 0.828078152731183, (13, 8): 0.8023614452378145, (11, 24): 0.010801188591931401, (26, 16): 0.001886063327563647, (0, 3): 1.714447166224568e-07, (24, 0): 1.714447166224568e-07, (3, 14): 0.004629178793522956, (1, 2): 1.714447166224568e-07, (4, 9): 0.01783042197345213, (2, 9): 1.714447166224568e-07, (5, 12): 0.1496714090561214, (3, 20): 0.010972633308553859, (6, 3): 0.001886063327563647, (4, 23): 0.03548922778556518, (19, 25): 0.0008573950278289066, (7, 10): 0.4126676043549702, (5, 22): 0.17607389541597976, (20, 18): 0.32368779642791506, (16, 18): 0.7811023003766299, (21, 19): 0.19458992481120507, (17, 19): 0.4248401792351646, (22, 24): 0.00497206822676787, (18, 24): 0.007543738976104721, (14, 20): 0.4677013583907788, (15, 17): 0.9552901324650459, (13, 1): 0.00017161616133907923, (11, 17): 0.788302978474773, (26, 27): 1.714447166224568e-07, (0, 26): 1.714447166224568e-07, (24, 7): 0.02108787158927881, (12, 26): 0.0008573950278289066, (27, 18): 1.714447166224568e-07, (1, 27): 1.714447166224568e-07, (25, 6): 0.002571842194053474, (13, 27): 1.714447166224568e-07, (2, 0): 1.714447166224568e-07, (26, 5): 0.00017161616133907923, (27, 8): 1.714447166224568e-07}, 5: {(7, 3): 1.850212700452044e-07, (20, 25): 0.0011103126415412717, (16, 9): 0.30195489773504364, (19, 4): 0.05180614063392727, (17, 20): 0.3557960873181981, (7, 25): 0.1611537112306431, (22, 19): 0.2793823027895287, (20, 7): 0.5084386351054918, (18, 19): 0.49456203985210145, (23, 26): 0.00018520629131524958, (21, 6): 0.359311491449057, (8, 5): 0.021832694886604165, (9, 0): 1.850212700452044e-07, (10, 7): 0.2164750709741592, (11, 22): 0.05384137460442452, (0, 17): 1.850212700452044e-07, (24, 14): 0.34858025778643514, (14, 1): 1.850212700452044e-07, (12, 17): 0.3680074911411816, (25, 15): 0.046995587612751966, (15, 4): 0.010916439953937105, (13, 20): 0.16503915790159238, (2, 27): 1.850212700452044e-07, (26, 12): 0.005365801852580974, (3, 2): 1.850212700452044e-07, (27, 1): 1.850212700452044e-07, (4, 5): 0.0011103126415412717, (5, 24): 0.14838724359752398, (16, 0): 1.850212700452044e-07, (6, 23): 0.35690621493846936, (19, 13): 0.3424745558749434, (17, 13): 0.26902111166699727, (7, 22): 0.48549599761988643, (20, 14): 0.5820771005834832, (18, 10): 0.19168222078810182, (23, 19): 0.15967354107028145, (21, 15): 0.7376799886915001, (8, 12): 0.716772585176392, (22, 12): 0.799107050346508, (9, 9): 0.46717889188541123, (23, 9): 0.5365618681523628, (10, 14): 0.5508085059458436, (8, 18): 0.591883227895879, (11, 15): 0.45237719028179485, (9, 19): 0.38502944798534045, (24, 21): 0.01313669519447956, (14, 8): 0.4627383814043263, (12, 8): 0.4973373589027795, (1, 21): 1.850212700452044e-07, (25, 16): 0.03293397108931643, (15, 13): 0.5596895269080134, (13, 13): 0.7547019455356588, (2, 18): 0.0014803551816316804, (26, 23): 0.00018520629131524958, (0, 14): 1.850212700452044e-07, (3, 11): 0.004625716772400155, (27, 6): 1.850212700452044e-07, (1, 15): 1.850212700452044e-07, (4, 12): 0.10157686227608727, (2, 12): 1.850212700452044e-07, (5, 1): 1.850212700452044e-07, (3, 17): 0.01979746091610692, (16, 7): 0.174660263943943, (6, 14): 0.6392486730274514, (19, 18): 0.5878127599548845, (17, 6): 0.17780562553471146, (7, 15): 0.734904669640822, (20, 21): 0.1913121782480114, (18, 5): 0.1504224775680212, (21, 8): 0.6279623755546938, (17, 24): 0.020907588536378145, (22, 7): 0.4285094464459635, (23, 6): 0.14209652041598703, (10, 9): 0.560244590718149, (8, 25): 0.1544929455090157, (11, 4): 0.009806312333665879, (9, 20): 0.3474701301661639, (14, 19): 0.3717079165420857, (12, 7): 0.2890034088318793, (10, 19): 0.22239575161560574, (25, 25): 1.850212700452044e-07, (15, 10): 0.5526587186462957, (13, 6): 0.13284545691372682, (11, 26): 0.005735844392671382, (0, 5): 1.850212700452044e-07, (24, 2): 1.850212700452044e-07, (1, 0): 1.850212700452044e-07, (25, 3): 0.00018520629131524958, (4, 11): 0.08122452257111477, (2, 7): 1.850212700452044e-07, (5, 10): 0.18243115728584158, (3, 22): 0.010916439953937105, (6, 1): 1.850212700452044e-07, (4, 17): 0.18539149760656487, (19, 27): 1.850212700452044e-07, (7, 4): 0.0018503977217220891, (5, 20): 0.40223642609954446, (16, 20): 0.3408093644445366, (6, 27): 0.0025904828019029068, (21, 17): 0.6412839069979486, (19, 1): 1.850212700452044e-07, (17, 17): 0.5911431428156981, (18, 22): 0.13173532929345558, (8, 0): 1.850212700452044e-07, (14, 26): 1.850212700452044e-07, (10, 26): 0.012026567574208332, (15, 19): 0.4379455312182689, (11, 19): 0.15634315820946776, (26, 25): 1.850212700452044e-07, (24, 9): 0.25921498435460144, (12, 20): 0.10879269180785023, (27, 20): 1.850212700452044e-07, (1, 25): 1.850212700452044e-07, (25, 4): 0.000370227561360454, (13, 25): 0.001295333911586476, (26, 3): 1.850212700452044e-07, (27, 10): 0.00018520629131524958, (4, 24): 0.042740098401712265, (20, 27): 0.00018520629131524958, (16, 11): 0.35283574699747483, (6, 18): 0.65164509812048, (21, 26): 0.000370227561360454, (19, 6): 0.3437697047652598, (17, 10): 0.20426366715117572, (7, 27): 0.0027755040719481113, (22, 17): 0.5526587186462957, (20, 1): 1.850212700452044e-07, (18, 17): 0.5998391425078228, (23, 20): 0.08529499051210927, (21, 4): 0.03940971554089859, (8, 7): 0.13488069088422405, (22, 11): 0.7872656890636148, (9, 6): 0.07696903336007507, (10, 5): 0.03422911997963286, (15, 24): 0.013506737734569969, (11, 8): 0.4494168499610716, (0, 19): 1.850212700452044e-07, (24, 16): 0.22369090050592216, (14, 7): 0.27013123928726845, (12, 19): 0.19001702935769496, (1, 18): 1.850212700452044e-07, (25, 13): 0.07030826763844772, (15, 6): 0.1069424791073982, (13, 18): 0.39021004354660616, (2, 25): 1.850212700452044e-07, (26, 10): 0.00481073804244536, (0, 9): 1.850212700452044e-07, (3, 4): 0.00018520629131524958, (27, 3): 1.850212700452044e-07, (4, 7): 0.007586057093123426, (5, 6): 0.01702214186542885, (16, 2): 1.850212700452044e-07, (6, 21): 0.507698550025311, (19, 15): 0.5278658684602383, (17, 3): 0.003515589152128929, (7, 16): 0.7282439039191947, (20, 8): 0.5578393142075614, (18, 8): 0.2682810265868164, (16, 24): 0.017392184405519262, (21, 13): 0.711406968345081, (8, 14): 0.713257181045533, (22, 2): 0.00018520629131524958, (9, 15): 0.5469230592748943, (23, 11): 0.684208841648436, (10, 12): 0.7478561585439863, (8, 20): 0.5006677417635932, (11, 1): 1.850212700452044e-07, (9, 17): 0.46014808362369347, (24, 23): 0.0018503977217220891, (14, 14): 0.6718124165554072, (12, 10): 0.8560936015204309, (10, 22): 0.13987626517544458, (25, 22): 0.0018503977217220891, (15, 15): 0.5770815262922626, (13, 11): 0.8655296862927363, (2, 16): 0.0009252913714960673, (26, 21): 0.0005552488314056585, (0, 0): 1.850212700452044e-07, (3, 13): 0.010731418683891901, (27, 24): 1.850212700452044e-07, (1, 13): 1.850212700452044e-07, (4, 14): 0.15153260518829245, (2, 10): 1.850212700452044e-07, (5, 15): 0.4618132750541003, (3, 19): 0.02035252472624253, (6, 12): 0.5287909748104643, (4, 20): 0.1456119245468459, (19, 20): 0.343954726035305, (17, 4): 0.02904852441836714, (7, 9): 0.30584034440599295, (20, 23): 0.046995587612751966, (18, 3): 0.005735844392671382, (21, 22): 0.07437873557944222, (22, 5): 0.1036120962465845, (23, 0): 1.850212700452044e-07, (8, 27): 0.0016653764516768848, (11, 6): 0.11452835117925157, (9, 26): 0.024793035207327438, (14, 17): 0.561169697068375, (12, 1): 1.850212700452044e-07, (10, 17): 0.3252675777607394, (15, 20): 0.30139983392490804, (13, 4): 0.009991333603711083, (0, 7): 1.850212700452044e-07, (24, 4): 0.003885631692219338, (27, 17): 1.850212700452044e-07, (1, 6): 1.850212700452044e-07, (25, 1): 1.850212700452044e-07, (2, 5): 1.850212700452044e-07, (26, 6): 0.00018520629131524958, (5, 8): 0.06697788477763404, (3, 24): 0.0031455466120385205, (6, 7): 0.06623779969745322, (4, 19): 0.16355898774123073, (7, 6): 0.048105715233023184, (5, 18): 0.47328459379690296, (16, 22): 0.11267813847879952, (6, 25): 0.1198939680105625, (19, 3): 0.006290908202806995, (17, 23): 0.0612422254062327, (20, 4): 0.05125107682379166, (18, 20): 0.35450093842788166, (23, 25): 0.000370227561360454, (8, 2): 1.850212700452044e-07, (9, 3): 1.850212700452044e-07, (14, 24): 0.0077710783631686296, (10, 24): 0.098616521955364, (11, 21): 0.07086333144858332, (24, 11): 0.37355812924253773, (14, 2): 1.850212700452044e-07, (12, 22): 0.030158652038638364, (27, 22): 1.850212700452044e-07, (25, 10): 0.06660784223754362, (13, 23): 0.016282056785248033, (26, 1): 1.850212700452044e-07, (3, 1): 1.850212700452044e-07, (27, 12): 0.00018520629131524958, (4, 26): 0.0020354189917672935, (5, 27): 0.0005552488314056585, (16, 13): 0.38373429909502405, (6, 16): 0.6671868848042772, (21, 24): 0.00906622725348506, (19, 8): 0.4151879150027088, (17, 8): 0.2085191563622154, (7, 21): 0.550253442135708, (22, 23): 0.015171929164976807, (20, 3): 0.006660950742897404, (18, 15): 0.4534873179020661, (23, 22): 0.018317290755745282, (21, 2): 0.000370227561360454, (8, 9): 0.38891489465628976, (22, 9): 0.6971603305516003, (9, 4): 0.004255674232309747, (23, 12): 0.6991955645220975, (10, 3): 1.850212700452044e-07, (15, 26): 1.850212700452044e-07, (11, 10): 0.7896709655742025, (0, 21): 1.850212700452044e-07, (24, 18): 0.09639626671482154, (14, 5): 0.0444052898321191, (12, 13): 0.7127021172353974, (1, 16): 1.850212700452044e-07, (25, 19): 0.008141120903259039, (15, 0): 0.00018520629131524958, (13, 16): 0.5539538675366121, (2, 23): 0.00018520629131524958, (26, 8): 0.0014803551816316804, (0, 11): 1.850212700452044e-07, (3, 6): 1.850212700452044e-07, (27, 5): 1.850212700452044e-07, (1, 10): 1.850212700452044e-07, (4, 1): 1.850212700452044e-07, (5, 4): 0.0009252913714960673, (16, 4): 0.016652099325338444, (6, 11): 0.43554025470768126, (19, 17): 0.6288874819049198, (17, 1): 1.850212700452044e-07, (7, 18): 0.694199990230877, (20, 10): 0.5197249325782493, (18, 6): 0.2616202608651891, (16, 26): 0.00018520629131524958, (21, 11): 0.7040061175432728, (17, 27): 1.850212700452044e-07, (22, 0): 1.850212700452044e-07, (9, 13): 0.6899445010198373, (23, 5): 0.04921584285329442, (10, 10): 0.7102968407248098, (8, 22): 0.40889719182117185, (11, 3): 0.0011103126415412717, (9, 23): 0.24256307005053301, (24, 25): 1.850212700452044e-07, (14, 12): 0.750261435054574, (12, 4): 0.010361376143801492, (10, 20): 0.18890690173742375, (25, 20): 0.003885631692219338, (15, 9): 0.4753198277674002, (13, 9): 0.7302791378896919, (11, 25): 0.02072256726633294, (26, 19): 0.001295333911586476, (0, 2): 1.850212700452044e-07, (3, 15): 0.01868733329583569, (27, 26): 1.850212700452044e-07, (1, 3): 1.850212700452044e-07, (4, 8): 0.015541971705067216, (2, 8): 1.850212700452044e-07, (5, 13): 0.3916902137069678, (3, 21): 0.013506737734569969, (6, 2): 0.00018520629131524958, (4, 22): 0.10601737275717217, (19, 22): 0.12192920198105975, (7, 11): 0.5687555691402284, (5, 23): 0.23035166622754952, (20, 17): 0.6505349705002088, (18, 1): 1.850212700452044e-07, (16, 17): 0.5887378663051105, (21, 20): 0.24237804878048783, (22, 27): 1.850212700452044e-07, (18, 27): 1.850212700452044e-07, (23, 2): 0.00018520629131524958, (9, 24): 0.1959377099991415, (14, 23): 0.02497805647737264, (12, 3): 0.001295333911586476, (15, 22): 0.09362094766414347, (13, 2): 1.850212700452044e-07, (26, 26): 1.850212700452044e-07, (0, 25): 1.850212700452044e-07, (24, 6): 0.05143609809383687, (12, 25): 0.0040706529622645425, (27, 19): 1.850212700452044e-07, (1, 4): 1.850212700452044e-07, (25, 7): 0.02072256726633294, (2, 3): 1.850212700452044e-07, (26, 4): 1.850212700452044e-07, (3, 26): 0.00018520629131524958, (27, 9): 1.850212700452044e-07, (6, 5): 0.011656525034117923, (7, 0): 1.850212700452044e-07, (5, 16): 0.47698501919780706, (20, 24): 0.014616865354841194, (16, 8): 0.24441328275098506, (19, 5): 0.1877967741171525, (17, 21): 0.2255411132063742, (22, 18): 0.4168531064331156, (20, 6): 0.3789237460738487, (18, 18): 0.580041866612986, (23, 27): 1.850212700452044e-07, (21, 7): 0.5226852728989725, (8, 4): 0.002960525341993316, (9, 1): 1.850212700452044e-07, (10, 6): 0.09417601147427908, (11, 23): 0.045145374912299925, (0, 16): 1.850212700452044e-07, (24, 13): 0.37799863972362263, (14, 0): 0.00018520629131524958, (12, 16): 0.44442127566985107, (25, 8): 0.03496920505981368, (15, 5): 0.044590311102164305, (13, 21): 0.08529499051210927, (2, 26): 1.850212700452044e-07, (26, 15): 0.003885631692219338, (3, 3): 0.00018520629131524958, (27, 14): 1.850212700452044e-07, (4, 4): 1.850212700452044e-07, (5, 25): 0.06383252318686557, (16, 15): 0.48586604015997686, (6, 22): 0.4379455312182689, (19, 10): 0.31102093996725866, (17, 14): 0.3454348961956667, (7, 23): 0.40871217055112663, (22, 21): 0.09158571369364622, (20, 13): 0.5202799963883848, (18, 13): 0.2551445164136069, (23, 16): 0.49530212493228226, (21, 0): 1.850212700452044e-07, (8, 11): 0.6623763317831018, (22, 15): 0.7234333508980193, (9, 10): 0.6207465460229309, (23, 14): 0.65164509812048, (10, 1): 1.850212700452044e-07, (8, 17): 0.626297184124287, (11, 12): 0.7802348808018971, (0, 23): 1.850212700452044e-07, (24, 20): 0.02960358822850275, (14, 11): 0.7702437322194561, (12, 15): 0.5223152303588822, (1, 22): 1.850212700452044e-07, (25, 17): 0.02238775869673978, (15, 2): 1.850212700452044e-07, (13, 14): 0.6812485013277128, (2, 21): 0.000370227561360454, (26, 22): 0.000370227561360454, (0, 13): 1.850212700452044e-07, (3, 8): 0.000370227561360454, (27, 7): 1.850212700452044e-07, (1, 8): 1.850212700452044e-07, (4, 3): 0.00018520629131524958, (2, 15): 0.0005552488314056585, (5, 2): 1.850212700452044e-07, (16, 6): 0.11822877658015565, (6, 9): 0.21110945414284826, (19, 19): 0.48420084872957003, (17, 7): 0.20648392239171817, (7, 12): 0.6575657787619266, (20, 20): 0.307690557106445, (18, 4): 0.04533039618234513, (21, 9): 0.6888343733995661, (17, 25): 0.001295333911586476, (22, 6): 0.25699472911405896, (23, 7): 0.27031626055731367, (10, 8): 0.37226298035222133, (8, 24): 0.2834527707305232, (11, 5): 0.0401498006210794, (9, 21): 0.31176102504743947, (24, 27): 1.850212700452044e-07, (14, 18): 0.4829056998392536, (12, 6): 0.12877498897273232, (10, 18): 0.2708713243674493, (25, 26): 1.850212700452044e-07, (15, 11): 0.5759713986719914, (13, 7): 0.29695932344382314, (11, 27): 0.00018520629131524958, (26, 17): 0.003515589152128929, (0, 4): 1.850212700452044e-07, (24, 1): 1.850212700452044e-07, (1, 1): 1.850212700452044e-07, (4, 10): 0.05402639587446973, (2, 6): 1.850212700452044e-07, (5, 11): 0.257919835464285, (3, 23): 0.006660950742897404, (6, 0): 1.850212700452044e-07, (4, 16): 0.18520647633651965, (19, 24): 0.018132269485700077, (7, 5): 0.017577205675564464, (5, 21): 0.3550560022380173, (20, 19): 0.44867676488089075, (16, 19): 0.4790202531683043, (6, 26): 0.028678481878276728, (21, 18): 0.5286059535404191, (17, 18): 0.5789317389927147, (22, 25): 0.0007402701014508629, (18, 25): 0.0014803551816316804, (14, 21): 0.13654588231463088, (15, 16): 0.6005792275880036, (13, 0): 1.850212700452044e-07, (11, 16): 0.3711528527319501, (26, 24): 1.850212700452044e-07, (0, 27): 1.850212700452044e-07, (24, 8): 0.1877967741171525, (12, 27): 1.850212700452044e-07, (27, 21): 1.850212700452044e-07, (1, 26): 1.850212700452044e-07, (25, 5): 0.0016653764516768848, (13, 26): 0.0005552488314056585, (2, 1): 1.850212700452044e-07, (26, 2): 1.850212700452044e-07, (27, 11): 0.00018520629131524958, (7, 2): 0.00018520629131524958, (20, 26): 0.000370227561360454, (16, 10): 0.3387741304740393, (21, 27): 1.850212700452044e-07, (19, 7): 0.41703812770316084, (17, 11): 0.200008177940136, (7, 24): 0.31157600377739425, (22, 16): 0.6644115657535992, (20, 0): 1.850212700452044e-07, (18, 16): 0.5511785484859341, (23, 21): 0.043110140941802676, (21, 5): 0.16170877504077869, (8, 6): 0.06161226794632311, (22, 10): 0.7600675623669698, (9, 7): 0.17318009378358137, (10, 4): 0.007586057093123426, (15, 25): 0.0011103126415412717, (11, 9): 0.6409138644578581, (0, 18): 1.850212700452044e-07, (24, 15): 0.29381396185305464, (14, 6): 0.1241494572216022, (12, 18): 0.283267749460478, (1, 19): 1.850212700452044e-07, (25, 14): 0.060872182866142294, (15, 7): 0.21443983700366195, (13, 19): 0.2766069837388506, (2, 24): 1.850212700452044e-07, (26, 13): 0.005180780582535769, (0, 8): 1.850212700452044e-07, (3, 5): 1.850212700452044e-07, (27, 0): 1.850212700452044e-07, (4, 6): 0.004255674232309747, (5, 7): 0.0357092901399945, (16, 1): 1.850212700452044e-07, (6, 20): 0.5709758243807709, (19, 12): 0.29529413201341626, (17, 12): 0.21943541129488248, (7, 17): 0.7147373512058948, (20, 15): 0.6429490984283553, (18, 11): 0.17947081696511832, (23, 18): 0.2603251119748726, (21, 14): 0.7337945420205507, (8, 13): 0.7304641591597371, (22, 13): 0.7885608379539313, (9, 8): 0.30343506789540525, (23, 8): 0.4114874896018047, (10, 15): 0.4549674880624277, (8, 19): 0.5541388888066573, (11, 14): 0.5480331868951656, (9, 18): 0.4196284254837937, (24, 22): 0.006290908202806995, (14, 9): 0.6518301193905253, (12, 9): 0.7128871385054427, (1, 20): 1.850212700452044e-07, (25, 23): 0.001295333911586476, (15, 12): 0.5720859520010421, (13, 12): 0.8240849218026105, (2, 19): 0.001295333911586476, (26, 20): 0.0009252913714960673, (0, 15): 1.850212700452044e-07, (3, 10): 0.0025904828019029068, (27, 25): 1.850212700452044e-07, (1, 14): 1.850212700452044e-07, (4, 13): 0.13062520167318437, (2, 13): 1.850212700452044e-07, (5, 0): 1.850212700452044e-07, (3, 16): 0.020537545996287737, (6, 15): 0.6612662041628307, (19, 21): 0.21925039002483726, (17, 5): 0.1032420537064941, (7, 14): 0.7313892655099631, (20, 22): 0.10009669211572562, (18, 2): 0.00018520629131524958, (21, 23): 0.032563928549226025, (22, 4): 0.023127843776920596, (23, 1): 1.850212700452044e-07, (8, 26): 0.0318238434690452, (11, 7): 0.2542194100633809, (9, 27): 0.0014803551816316804, (14, 16): 0.6063148869594049, (12, 0): 1.850212700452044e-07, (10, 16): 0.38428936290515964, (25, 24): 0.0005552488314056585, (15, 21): 0.18132102966557037, (13, 5): 0.046810566342706764, (0, 6): 1.850212700452044e-07, (24, 3): 0.0005552488314056585, (1, 7): 1.850212700452044e-07, (25, 2): 1.850212700452044e-07, (2, 4): 1.850212700452044e-07, (5, 9): 0.1152684362594324, (3, 25): 0.000370227561360454, (6, 6): 0.03330401362940684, (4, 18): 0.1789157531549827, (19, 26): 0.00018520629131524958, (7, 7): 0.10157686227608727, (5, 19): 0.44664153091039355, (16, 21): 0.21369975192348115, (6, 24): 0.2529242611730645, (21, 16): 0.7097417769146742, (19, 0): 1.850212700452044e-07, (17, 16): 0.5310112300510067, (18, 23): 0.06309243810668475, (8, 1): 1.850212700452044e-07, (14, 27): 1.850212700452044e-07, (10, 27): 0.0007402701014508629, (15, 18): 0.5419274849836738, (11, 18): 0.2209155814552441, (24, 10): 0.3254525990307846, (12, 21): 0.05754180000532861, (27, 23): 1.850212700452044e-07, (1, 24): 1.850212700452044e-07, (25, 11): 0.07585890573980385, (13, 24): 0.005365801852580974, (26, 0): 1.850212700452044e-07, (27, 13): 0.00018520629131524958, (4, 25): 0.01572699297511242, (16, 12): 0.36523217209050357, (6, 19): 0.618156248242298, (21, 25): 0.0011103126415412717, (19, 9): 0.36264187430987066, (17, 9): 0.21129447541289348, (7, 26): 0.03829958792062736, (22, 22): 0.042740098401712265, (20, 2): 0.0007402701014508629, (18, 14): 0.34950536413666117, (23, 23): 0.007216014553033017, (21, 3): 0.005735844392671382, (8, 8): 0.2457084316413015, (22, 8): 0.5867026323346133, (9, 5): 0.02627320536768907, (23, 13): 0.6856890118087976, (10, 2): 1.850212700452044e-07, (15, 27): 1.850212700452044e-07, (11, 11): 0.8268602408532886, (0, 20): 1.850212700452044e-07, (24, 17): 0.15060749883806643, (14, 4): 0.010546397413846696, (12, 12): 0.8133536881399887, (1, 17): 1.850212700452044e-07, (25, 12): 0.07678401209002987, (15, 1): 1.850212700452044e-07, (13, 17): 0.4784651893581687, (2, 22): 0.000370227561360454, (26, 11): 0.005920865662716586, (0, 10): 1.850212700452044e-07, (3, 7): 1.850212700452044e-07, (27, 2): 1.850212700452044e-07, (1, 11): 1.850212700452044e-07, (4, 0): 1.850212700452044e-07, (5, 5): 0.005550823122626177, (16, 3): 0.0025904828019029068, (6, 10): 0.3208270672796545, (19, 14): 0.4275843400957375, (17, 2): 1.850212700452044e-07, (7, 19): 0.6594159914623786, (20, 9): 0.5607996545282846, (18, 9): 0.23035166622754952, (16, 25): 0.0014803551816316804, (21, 12): 0.7003056921423688, (8, 15): 0.686984160699114, (22, 3): 0.00481073804244536, (9, 14): 0.6148258653814843, (23, 10): 0.6307376946053719, (10, 13): 0.6544204171711581, (8, 21): 0.4616282537840551, (11, 0): 1.850212700452044e-07, (9, 16): 0.4978924227129152, (24, 24): 0.0007402701014508629, (14, 15): 0.6388786304873609, (12, 11): 0.8701552180438664, (10, 23): 0.11767371277002005, (25, 21): 0.0016653764516768848, (15, 14): 0.5607996545282846, (13, 10): 0.8472125805582611, (2, 17): 0.001295333911586476, (26, 18): 0.0016653764516768848, (0, 1): 1.850212700452044e-07, (3, 12): 0.006660950742897404, (27, 27): 1.850212700452044e-07, (1, 12): 1.850212700452044e-07, (4, 15): 0.16873958330249647, (2, 11): 1.850212700452044e-07, (5, 14): 0.43091472295655114, (3, 18): 0.020167503456197326, (6, 13): 0.5979889298073707, (4, 21): 0.12562962738196384, (19, 23): 0.05791184254541902, (7, 8): 0.1874267315770621, (20, 16): 0.6755128419563113, (18, 0): 1.850212700452044e-07, (16, 16): 0.5539538675366121, (21, 21): 0.1448718394666651, (22, 26): 0.000370227561360454, (18, 26): 0.00018520629131524958, (23, 3): 0.002405461531857702, (9, 25): 0.10953277688803105, (14, 22): 0.06642282096749842, (12, 2): 1.850212700452044e-07, (15, 23): 0.04200001332153145, (13, 3): 0.001295333911586476, (0, 24): 1.850212700452044e-07, (24, 5): 0.01572699297511242, (12, 24): 0.009621291063620674, (27, 16): 1.850212700452044e-07, (1, 5): 1.850212700452044e-07, (25, 0): 1.850212700452044e-07, (2, 2): 1.850212700452044e-07, (26, 7): 0.000370227561360454, (3, 27): 1.850212700452044e-07, (6, 4): 0.0016653764516768848, (7, 1): 1.850212700452044e-07, (5, 17): 0.4830907211092988, (16, 23): 0.05310128952424371, (19, 2): 0.0007402701014508629, (17, 22): 0.12951507405291313, (20, 5): 0.19260732713832784, (18, 21): 0.23072170876763995, (23, 24): 0.001295333911586476, (8, 3): 1.850212700452044e-07, (9, 2): 1.850212700452044e-07, (14, 25): 0.0011103126415412717, (10, 25): 0.05402639587446973, (11, 20): 0.10398213878667492, (24, 12): 0.3916902137069678, (14, 3): 0.0011103126415412717, (12, 23): 0.01646707805529324, (25, 9): 0.050881034283701256, (13, 22): 0.04088988570126022, (26, 14): 0.004255674232309747, (3, 0): 1.850212700452044e-07, (27, 15): 1.850212700452044e-07, (4, 27): 1.850212700452044e-07, (5, 26): 0.011841546304163128, (16, 14): 0.4255491061252402, (6, 17): 0.66977718258491, (19, 11): 0.2797523453296191, (17, 15): 0.4366503823279525, (7, 20): 0.6044646742589529, (22, 20): 0.16577924298177318, (20, 12): 0.48494093380975084, (18, 12): 0.19889805031986477, (23, 17): 0.37929378861393914, (21, 1): 1.850212700452044e-07, (8, 10): 0.5336015278316396, (22, 14): 0.7663582855485067, (9, 11): 0.7147373512058948, (23, 15): 0.5878127599548845, (10, 0): 1.850212700452044e-07, (8, 16): 0.6575657787619266, (11, 13): 0.6633014381333279, (0, 22): 1.850212700452044e-07, (24, 19): 0.05328631079428891, (14, 10): 0.7471160734638055, (12, 14): 0.6137157377612131, (1, 23): 1.850212700452044e-07, (25, 18): 0.014801886624886398, (15, 3): 0.0018503977217220891, (13, 15): 0.6224117374533377, (2, 20): 0.0009252913714960673, (26, 9): 0.0025904828019029068, (0, 12): 1.850212700452044e-07, (3, 9): 0.0009252913714960673, (27, 4): 1.850212700452044e-07, (1, 9): 1.850212700452044e-07, (4, 2): 1.850212700452044e-07, (2, 14): 0.000370227561360454, (5, 3): 0.00018520629131524958, (16, 5): 0.061057204136187496, (6, 8): 0.12673975500223505, (19, 16): 0.5990990574276419, (17, 0): 1.850212700452044e-07, (7, 13): 0.7091867131045386, (20, 11): 0.49252680588160425, (18, 7): 0.2958491958235519, (16, 27): 1.850212700452044e-07, (21, 10): 0.7075215216741317, (17, 26): 0.00018520629131524958, (22, 1): 1.850212700452044e-07, (9, 12): 0.7293540315394659, (23, 4): 0.01276665265438915, (10, 11): 0.7667283280885971, (8, 23): 0.35302076826752005, (11, 2): 1.850212700452044e-07, (9, 22): 0.2766069837388506, (24, 26): 1.850212700452044e-07, (14, 13): 0.7073365004040866, (12, 5): 0.04570043872243554, (10, 21): 0.16392903028132114, (25, 27): 1.850212700452044e-07, (15, 8): 0.3452498749256215, (13, 8): 0.516764592257526, (11, 24): 0.03718946030035613, (26, 16): 0.0033305678820837246, (0, 3): 1.850212700452044e-07, (24, 0): 1.850212700452044e-07, (3, 14): 0.014246822814750785, (1, 2): 1.850212700452044e-07, (4, 9): 0.031638822199, (2, 9): 1.850212700452044e-07, (5, 12): 0.3260076628409202, (3, 20): 0.016282056785248033, (6, 3): 1.850212700452044e-07, (4, 23): 0.07493379938957782, (19, 25): 0.0016653764516768848, (7, 10): 0.4410908928090374, (5, 22): 0.3002897063046368, (20, 18): 0.5783766751825791, (16, 18): 0.5687555691402284, (21, 19): 0.381884086394572, (17, 19): 0.49770740144286996, (22, 24): 0.003885631692219338, (18, 24): 0.020907588536378145, (14, 20): 0.23960272972980975, (15, 17): 0.5842973558240256, (13, 1): 1.850212700452044e-07, (11, 17): 0.2973293659839135, (26, 27): 1.850212700452044e-07, (0, 26): 1.850212700452044e-07, (24, 7): 0.10916273434794065, (12, 26): 0.001295333911586476, (27, 18): 1.850212700452044e-07, (1, 27): 1.850212700452044e-07, (25, 6): 0.008881205983439857, (13, 27): 1.850212700452044e-07, (2, 0): 1.850212700452044e-07, (26, 5): 1.850212700452044e-07, (27, 8): 1.850212700452044e-07}, 6: {(7, 3): 0.00016940879883238242, (20, 25): 0.0022002835101096944, (16, 9): 0.8201350734770472, (19, 4): 0.005415835136298772, (17, 20): 0.7170681818797235, (7, 25): 0.002031043950836585, (22, 19): 0.07446557531972738, (20, 7): 0.21950387761678208, (18, 19): 0.8135347306653958, (23, 26): 1.6923955927310935e-07, (21, 6): 0.039940705228013074, (8, 5): 0.009985303236672724, (9, 0): 1.6923955927310935e-07, (10, 7): 0.14317683638460976, (11, 22): 0.1423306385882442, (0, 17): 1.6923955927310935e-07, (24, 14): 0.0018618043915634758, (14, 1): 1.6923955927310935e-07, (12, 17): 0.5254890007825638, (25, 15): 1.6923955927310935e-07, (15, 4): 0.0027080021879290228, (13, 20): 0.5715221609048495, (2, 27): 1.6923955927310935e-07, (26, 12): 1.6923955927310935e-07, (3, 2): 0.00016940879883238242, (27, 1): 1.6923955927310935e-07, (4, 5): 0.005585074695571881, (5, 24): 0.012016177947950037, (16, 0): 1.6923955927310935e-07, (6, 23): 0.015231729574139114, (19, 13): 0.8517828710611186, (17, 13): 0.7377154081110429, (7, 22): 0.01878576031887441, (20, 14): 0.948418659406064, (18, 10): 0.9106782376881606, (23, 19): 0.007446709847576083, (21, 15): 0.8522905897389379, (8, 12): 0.6837279887029211, (22, 12): 0.3845124479080637, (9, 9): 0.3528646503239923, (23, 9): 0.009477584558853395, (10, 14): 0.4442540123314713, (8, 18): 0.10086694656633244, (11, 15): 0.2756914112954544, (9, 19): 0.07023458633789964, (24, 21): 1.6923955927310935e-07, (14, 8): 0.575414670768131, (12, 8): 0.41869883888123177, (1, 21): 0.0022002835101096944, (25, 16): 1.6923955927310935e-07, (15, 13): 0.7170681818797235, (13, 13): 0.5295507502051184, (2, 18): 0.18379433061015601, (26, 23): 1.6923955927310935e-07, (0, 14): 1.6923955927310935e-07, (3, 11): 0.15806991760064337, (27, 6): 1.6923955927310935e-07, (1, 15): 0.0113392197108576, (4, 12): 0.32240152965483254, (2, 12): 0.11592926734163916, (5, 1): 1.6923955927310935e-07, (3, 17): 0.4860561834719293, (16, 7): 0.40871370488411835, (6, 14): 0.6734043755872613, (19, 18): 0.8731070555295304, (17, 6): 0.19344098548872324, (7, 15): 0.5620447455855554, (20, 21): 0.18142497678033248, (18, 5): 0.052295193054950055, (21, 8): 0.24962851916739554, (17, 24): 0.08106591813137864, (22, 7): 0.029786331671626518, (23, 6): 1.6923955927310935e-07, (10, 9): 0.4457771683649293, (8, 25): 0.001523325273017257, (11, 4): 0.0010156065951979292, (9, 20): 0.06363424352624839, (14, 19): 0.7155450258462656, (12, 7): 0.22999673029171486, (10, 19): 0.15451588685590809, (25, 25): 1.6923955927310935e-07, (15, 10): 0.833843477778169, (13, 6): 0.13014539032058034, (11, 26): 0.0006771274766517105, (0, 5): 1.6923955927310935e-07, (24, 2): 1.6923955927310935e-07, (1, 0): 1.6923955927310935e-07, (25, 3): 1.6923955927310935e-07, (4, 11): 0.22238095012442494, (2, 7): 0.009646824118126505, (5, 10): 0.19022543386253415, (3, 22): 0.05534150512186602, (6, 1): 1.6923955927310935e-07, (4, 17): 0.5481671017251605, (19, 27): 1.6923955927310935e-07, (7, 4): 0.002031043950836585, (5, 20): 0.1617931879046518, (16, 20): 0.7084369643567949, (6, 27): 0.00016940879883238242, (21, 17): 0.6366793912249966, (19, 1): 1.6923955927310935e-07, (17, 17): 0.6534341075930344, (18, 22): 0.27281433878781153, (8, 0): 1.6923955927310935e-07, (14, 26): 0.0008463670359248199, (10, 26): 0.00033864835810549175, (15, 19): 0.7206222126244588, (11, 19): 0.30852388579443757, (26, 25): 1.6923955927310935e-07, (24, 9): 1.6923955927310935e-07, (12, 20): 0.4315610453859881, (27, 20): 1.6923955927310935e-07, (1, 25): 1.6923955927310935e-07, (25, 4): 1.6923955927310935e-07, (13, 25): 0.02200131194506349, (26, 3): 1.6923955927310935e-07, (27, 10): 1.6923955927310935e-07, (4, 24): 0.011846938388676927, (20, 27): 1.6923955927310935e-07, (16, 11): 0.7676708101023832, (6, 18): 0.290584492511488, (21, 26): 0.00016940879883238242, (19, 6): 0.13133006723549212, (17, 10): 0.8705684621404337, (7, 27): 0.00016940879883238242, (22, 17): 0.2200115962946014, (20, 1): 1.6923955927310935e-07, (18, 17): 0.8057497109388329, (23, 20): 0.004061918662113897, (21, 4): 0.0016925648322903663, (8, 7): 0.0856353862317526, (22, 11): 0.32578632084029474, (9, 6): 0.046710287598937444, (10, 5): 0.017939562522508862, (15, 24): 0.11169827835981143, (11, 8): 0.3403409229377821, (0, 19): 1.6923955927310935e-07, (24, 16): 0.0016925648322903663, (14, 7): 0.3471105053087066, (12, 19): 0.5007800251286898, (1, 18): 0.007785188966122302, (25, 13): 0.00016940879883238242, (15, 6): 0.18447128884724845, (13, 18): 0.7031905380193286, (2, 25): 0.0006771274766517105, (26, 10): 1.6923955927310935e-07, (0, 9): 1.6923955927310935e-07, (3, 4): 0.0016925648322903663, (27, 3): 1.6923955927310935e-07, (4, 7): 0.025555342689798782, (5, 6): 0.015062490014866004, (16, 2): 1.6923955927310935e-07, (6, 21): 0.05398758864768115, (19, 15): 0.8487365589942026, (17, 3): 0.00033864835810549175, (7, 16): 0.43088408714889564, (20, 8): 0.4215759113888747, (18, 8): 0.6241556638387865, (16, 24): 0.1003592278885131, (21, 13): 0.9089858420954295, (8, 14): 0.6185707583827739, (22, 2): 1.6923955927310935e-07, (9, 15): 0.35421856679817715, (23, 11): 0.02910937343453408, (10, 12): 0.7365307311961311, (8, 20): 0.04112538214292484, (11, 1): 1.6923955927310935e-07, (9, 17): 0.12760679693148372, (24, 23): 1.6923955927310935e-07, (14, 14): 0.7079292456789756, (12, 10): 0.7710556012878454, (10, 22): 0.07988124121646688, (25, 22): 1.6923955927310935e-07, (15, 15): 0.8148886471395808, (13, 11): 0.7544701244790807, (2, 16): 0.2323660841215384, (26, 21): 1.6923955927310935e-07, (0, 0): 1.6923955927310935e-07, (3, 13): 0.3180013011137317, (27, 24): 1.6923955927310935e-07, (1, 13): 0.012862375744315584, (4, 14): 0.5280275941716605, (2, 10): 0.05381834908840804, (5, 15): 0.6534341075930344, (3, 19): 0.3112317187428073, (6, 12): 0.5136422316334461, (4, 20): 0.21121113921239973, (19, 20): 0.5280275941716605, (17, 4): 0.004908116458479444, (7, 9): 0.20799558758621065, (20, 23): 0.032155685501450045, (18, 3): 0.00033864835810549175, (21, 22): 0.027247738282529875, (22, 5): 0.0010156065951979292, (23, 0): 1.6923955927310935e-07, (8, 27): 1.6923955927310935e-07, (11, 6): 0.08292755328338285, (9, 26): 0.00033864835810549175, (14, 17): 0.8037188362275556, (12, 1): 1.6923955927310935e-07, (10, 17): 0.15502360553372743, (15, 20): 0.6862665820920176, (13, 4): 0.0013540857137441476, (0, 7): 1.6923955927310935e-07, (24, 4): 1.6923955927310935e-07, (27, 17): 1.6923955927310935e-07, (1, 6): 0.0010156065951979292, (25, 1): 1.6923955927310935e-07, (2, 5): 0.0018618043915634758, (26, 6): 1.6923955927310935e-07, (5, 8): 0.06972686766008031, (3, 24): 0.008800626321760959, (6, 7): 0.04924888098803409, (4, 19): 0.33475601748176953, (7, 6): 0.02877089431598786, (5, 18): 0.3931436654309923, (16, 22): 0.39466682146445026, (6, 25): 0.002877241747202132, (19, 3): 0.00016940879883238242, (17, 23): 0.18142497678033248, (20, 4): 0.003892679102840788, (18, 20): 0.6647731580643328, (23, 25): 1.6923955927310935e-07, (8, 2): 1.6923955927310935e-07, (9, 3): 0.00033864835810549175, (14, 24): 0.10763652893725681, (10, 24): 0.01658564604832399, (11, 21): 0.2093495040603955, (24, 11): 0.0008463670359248199, (14, 2): 1.6923955927310935e-07, (12, 22): 0.23727403134045855, (27, 22): 1.6923955927310935e-07, (25, 10): 1.6923955927310935e-07, (13, 23): 0.18464052840652156, (26, 1): 1.6923955927310935e-07, (3, 1): 1.6923955927310935e-07, (27, 12): 1.6923955927310935e-07, (4, 26): 0.00033864835810549175, (5, 27): 0.00016940879883238242, (16, 13): 0.7512545728528917, (6, 16): 0.5397051237615049, (21, 24): 0.002538762628655913, (19, 8): 0.5457977478953369, (17, 8): 0.6552957427450387, (7, 21): 0.0313094877050845, (22, 23): 0.0016925648322903663, (20, 3): 1.6923955927310935e-07, (18, 15): 0.6678194701312488, (23, 22): 0.0010156065951979292, (21, 2): 0.00016940879883238242, (8, 9): 0.27552217173618127, (22, 9): 0.13708421225077783, (9, 4): 0.001523325273017257, (23, 12): 0.037740590957462655, (10, 3): 0.00033864835810549175, (15, 26): 0.0013540857137441476, (11, 10): 0.7130064324571689, (0, 21): 1.6923955927310935e-07, (24, 18): 0.0008463670359248199, (14, 5): 0.0458640898025719, (12, 13): 0.49976458777305116, (1, 16): 0.01100074059231138, (25, 19): 1.6923955927310935e-07, (15, 0): 1.6923955927310935e-07, (13, 16): 0.7047136940527866, (2, 23): 0.009308344999580286, (26, 8): 1.6923955927310935e-07, (0, 11): 1.6923955927310935e-07, (3, 6): 0.008123668084668521, (27, 5): 1.6923955927310935e-07, (1, 10): 0.007615949406849193, (4, 1): 1.6923955927310935e-07, (5, 4): 0.0022002835101096944, (16, 4): 0.003892679102840788, (6, 11): 0.38908191600843767, (19, 17): 0.9078011651805178, (17, 1): 1.6923955927310935e-07, (7, 18): 0.17380919661304256, (20, 10): 0.8587216929913161, (18, 6): 0.17160908234249214, (16, 26): 0.0013540857137441476, (21, 11): 0.8155656053766732, (17, 27): 1.6923955927310935e-07, (22, 0): 1.6923955927310935e-07, (9, 13): 0.6796662392803664, (23, 5): 1.6923955927310935e-07, (10, 10): 0.6189092375013201, (8, 22): 0.020139676793059284, (11, 3): 0.00016940879883238242, (9, 23): 0.018108802081781974, (24, 25): 1.6923955927310935e-07, (14, 12): 0.6341407978358999, (12, 4): 0.0013540857137441476, (10, 20): 0.13911508696205513, (25, 20): 1.6923955927310935e-07, (15, 9): 0.7999955659235471, (13, 9): 0.7114832764237109, (11, 25): 0.0064312724919374275, (26, 19): 1.6923955927310935e-07, (0, 2): 1.6923955927310935e-07, (3, 15): 0.477763445067547, (27, 26): 1.6923955927310935e-07, (1, 3): 1.6923955927310935e-07, (4, 8): 0.054495307325500474, (2, 8): 0.017431843844689536, (5, 13): 0.536658811694589, (3, 21): 0.10797500805580303, (6, 2): 0.0005078879173786011, (4, 22): 0.06312652484842905, (19, 22): 0.17533235264650054, (7, 11): 0.47810192418609315, (5, 23): 0.022509030622882814, (20, 17): 0.8700607434626144, (18, 1): 1.6923955927310935e-07, (16, 17): 0.6261865385500638, (21, 20): 0.15062337699262657, (22, 27): 1.6923955927310935e-07, (18, 27): 1.6923955927310935e-07, (23, 2): 1.6923955927310935e-07, (9, 24): 0.007108230729029864, (14, 23): 0.21933463805750897, (12, 3): 0.00016940879883238242, (15, 22): 0.40126716427610154, (13, 2): 1.6923955927310935e-07, (26, 26): 1.6923955927310935e-07, (0, 25): 1.6923955927310935e-07, (24, 6): 1.6923955927310935e-07, (12, 25): 0.012354657066496254, (27, 19): 1.6923955927310935e-07, (1, 4): 0.00016940879883238242, (25, 7): 1.6923955927310935e-07, (2, 3): 0.00016940879883238242, (26, 4): 1.6923955927310935e-07, (3, 26): 0.00016940879883238242, (27, 9): 1.6923955927310935e-07, (6, 5): 0.00846214720321474, (7, 0): 1.6923955927310935e-07, (5, 16): 0.6168783627900428, (20, 24): 0.008969865881034067, (16, 8): 0.6578343361341353, (19, 5): 0.038586788753828205, (17, 21): 0.5498594973178915, (22, 18): 0.13623801445441228, (20, 6): 0.08546614667247948, (18, 18): 0.8548291831280346, (23, 27): 1.6923955927310935e-07, (21, 7): 0.11423687174890806, (8, 4): 0.0013540857137441476, (9, 1): 1.6923955927310935e-07, (10, 6): 0.0660035973560719, (11, 23): 0.08292755328338285, (0, 16): 1.6923955927310935e-07, (24, 13): 0.0016925648322903663, (14, 0): 1.6923955927310935e-07, (12, 16): 0.477763445067547, (25, 8): 1.6923955927310935e-07, (15, 5): 0.054326067766227365, (13, 21): 0.443746293653652, (2, 26): 1.6923955927310935e-07, (26, 15): 1.6923955927310935e-07, (3, 3): 0.0006771274766517105, (27, 14): 1.6923955927310935e-07, (4, 4): 0.0016925648322903663, (5, 25): 0.0042311582213870064, (16, 15): 0.7140218698128076, (6, 22): 0.02961709211235341, (19, 10): 0.9208326112445472, (17, 14): 0.6855896238549252, (7, 23): 0.010493021914492051, (22, 21): 0.013539333981408021, (20, 13): 0.9540035648620766, (18, 13): 0.7417771575335975, (23, 16): 0.031986445942176936, (21, 0): 1.6923955927310935e-07, (8, 11): 0.5765993476830428, (22, 15): 0.35658792062800065, (9, 10): 0.5219349700378285, (23, 14): 0.04112538214292484, (10, 1): 1.6923955927310935e-07, (8, 17): 0.1856559657621602, (11, 12): 0.703867496256421, (0, 23): 1.6923955927310935e-07, (24, 20): 0.00016940879883238242, (14, 11): 0.7385616059074085, (12, 15): 0.4148063290179503, (1, 22): 0.0008463670359248199, (25, 17): 1.6923955927310935e-07, (15, 2): 1.6923955927310935e-07, (13, 14): 0.5347971765425849, (2, 21): 0.04603332936184501, (26, 22): 1.6923955927310935e-07, (0, 13): 0.00016940879883238242, (3, 8): 0.035878955805458454, (27, 7): 1.6923955927310935e-07, (1, 8): 0.002877241747202132, (4, 3): 0.0005078879173786011, (2, 15): 0.22813509513971064, (5, 2): 0.0006771274766517105, (16, 6): 0.19665653711491232, (6, 9): 0.15790067804137028, (19, 19): 0.7444849904819673, (17, 7): 0.4107445795953957, (7, 12): 0.6055393123187445, (20, 20): 0.34220255808978634, (18, 4): 0.005923553814118099, (21, 9): 0.44120770026455536, (17, 25): 0.022509030622882814, (22, 6): 0.00829290764394163, (23, 7): 0.001523325273017257, (10, 8): 0.27112194319508043, (8, 24): 0.004908116458479444, (11, 5): 0.023524467978521472, (9, 21): 0.04992583922512652, (24, 27): 1.6923955927310935e-07, (14, 18): 0.7561625200718118, (12, 6): 0.10492869598888706, (10, 18): 0.15688524068573162, (25, 26): 1.6923955927310935e-07, (15, 11): 0.7405924806186858, (13, 7): 0.29126145074858045, (11, 27): 0.00016940879883238242, (26, 17): 1.6923955927310935e-07, (0, 4): 1.6923955927310935e-07, (24, 1): 1.6923955927310935e-07, (1, 1): 1.6923955927310935e-07, (4, 10): 0.14486923197734086, (2, 6): 0.0042311582213870064, (5, 11): 0.30040038694932836, (3, 23): 0.022339791063609706, (6, 0): 1.6923955927310935e-07, (4, 16): 0.6075701870300219, (19, 24): 0.02640154048616433, (7, 5): 0.00829290764394163, (5, 21): 0.0947743224325005, (20, 19): 0.5442745918618789, (16, 19): 0.7465158651932445, (6, 26): 0.00033864835810549175, (21, 18): 0.4676090715111604, (17, 18): 0.7504083750565261, (22, 25): 0.00033864835810549175, (18, 25): 0.014385531777773568, (14, 21): 0.5310739062385764, (15, 16): 0.7786713814551354, (13, 0): 1.6923955927310935e-07, (11, 16): 0.26452160038342915, (26, 24): 1.6923955927310935e-07, (0, 27): 1.6923955927310935e-07, (24, 8): 1.6923955927310935e-07, (12, 27): 0.00016940879883238242, (27, 21): 1.6923955927310935e-07, (1, 26): 1.6923955927310935e-07, (25, 5): 1.6923955927310935e-07, (13, 26): 0.0006771274766517105, (2, 1): 1.6923955927310935e-07, (26, 2): 1.6923955927310935e-07, (27, 11): 1.6923955927310935e-07, (7, 2): 1.6923955927310935e-07, (20, 26): 0.00016940879883238242, (16, 10): 0.84315165353819, (21, 27): 1.6923955927310935e-07, (19, 7): 0.31512422860608885, (17, 11): 0.818442677884316, (7, 24): 0.005246595577025662, (22, 16): 0.29853875179732414, (20, 0): 1.6923955927310935e-07, (18, 16): 0.7201144939466395, (23, 21): 0.0016925648322903663, (21, 5): 0.010493021914492051, (8, 6): 0.03655591404255089, (22, 10): 0.2308429280880804, (9, 7): 0.11508306954527361, (10, 4): 0.0011848461544710384, (15, 25): 0.03266340417926938, (11, 9): 0.5293815106458453, (0, 18): 1.6923955927310935e-07, (24, 15): 0.002031043950836585, (14, 6): 0.15891611539700892, (12, 18): 0.5322585831534882, (1, 19): 0.005077356017752554, (25, 14): 1.6923955927310935e-07, (15, 7): 0.3878972390935259, (13, 19): 0.6534341075930344, (2, 24): 0.002538762628655913, (26, 13): 1.6923955927310935e-07, (0, 8): 1.6923955927310935e-07, (3, 5): 0.004061918662113897, (27, 0): 1.6923955927310935e-07, (4, 6): 0.012693136185042473, (5, 7): 0.036725153601824, (16, 1): 1.6923955927310935e-07, (6, 20): 0.10255934215906352, (19, 12): 0.9039086553172363, (17, 12): 0.7575164365459967, (7, 17): 0.2856765452925678, (20, 15): 0.9452031077798749, (18, 11): 0.8822459917302783, (23, 18): 0.013539333981408021, (21, 14): 0.8954466773535807, (8, 13): 0.7075907665604294, (22, 13): 0.40719054885066036, (9, 8): 0.21493440951640813, (23, 8): 0.004400397780660116, (10, 15): 0.2733220574656308, (8, 19): 0.05804933807023577, (11, 14): 0.3789275424520511, (9, 18): 0.0837737510797484, (24, 22): 1.6923955927310935e-07, (14, 9): 0.7664861331874715, (12, 9): 0.6239864242795135, (1, 20): 0.0037234395435676785, (25, 23): 1.6923955927310935e-07, (15, 12): 0.6732351360279882, (13, 12): 0.6231402264831478, (2, 19): 0.1328532232689501, (26, 20): 1.6923955927310935e-07, (0, 15): 1.6923955927310935e-07, (3, 10): 0.10475945642961394, (27, 25): 1.6923955927310935e-07, (1, 14): 0.011846938388676927, (4, 13): 0.4339303992158116, (2, 13): 0.162131667023198, (5, 0): 1.6923955927310935e-07, (3, 16): 0.5063649305847024, (6, 15): 0.6366793912249966, (19, 21): 0.32764795599229896, (17, 5): 0.06024945234078619, (7, 14): 0.6701888239610723, (20, 22): 0.089358656535761, (18, 2): 1.6923955927310935e-07, (21, 23): 0.009646824118126505, (22, 4): 1.6923955927310935e-07, (23, 1): 1.6923955927310935e-07, (8, 26): 0.0005078879173786011, (11, 7): 0.18836379871052997, (9, 27): 1.6923955927310935e-07, (14, 16): 0.8233506251032362, (12, 0): 1.6923955927310935e-07, (10, 16): 0.18802531959198374, (25, 24): 1.6923955927310935e-07, (15, 21): 0.5689835675157529, (13, 5): 0.03537123712763912, (0, 6): 1.6923955927310935e-07, (24, 3): 1.6923955927310935e-07, (1, 7): 0.002031043950836585, (25, 2): 1.6923955927310935e-07, (2, 4): 0.0011848461544710384, (5, 9): 0.1221911310347442, (3, 25): 0.0027080021879290228, (6, 6): 0.02081663503015172, (4, 18): 0.4488234804318453, (19, 26): 0.00033864835810549175, (7, 7): 0.06515739955970637, (5, 19): 0.27535293217690815, (16, 21): 0.5771070663608622, (6, 24): 0.008631386762487848, (21, 16): 0.7774867045402236, (19, 0): 1.6923955927310935e-07, (17, 16): 0.5852305652059714, (18, 23): 0.1289607134056686, (8, 1): 1.6923955927310935e-07, (14, 27): 1.6923955927310935e-07, (10, 27): 1.6923955927310935e-07, (15, 18): 0.713175672016442, (11, 18): 0.3163089055210006, (24, 10): 1.6923955927310935e-07, (12, 21): 0.34084864161560147, (27, 23): 1.6923955927310935e-07, (1, 24): 1.6923955927310935e-07, (25, 11): 0.00016940879883238242, (13, 24): 0.0895278960950341, (26, 0): 1.6923955927310935e-07, (27, 13): 1.6923955927310935e-07, (4, 25): 0.0037234395435676785, (16, 12): 0.7226530873357362, (6, 19): 0.1800710603061476, (21, 25): 0.0010156065951979292, (19, 9): 0.7803637770478664, (17, 9): 0.8243660624588749, (7, 26): 0.0005078879173786011, (22, 22): 0.005415835136298772, (20, 2): 0.00016940879883238242, (18, 14): 0.6838972282621941, (23, 23): 1.6923955927310935e-07, (21, 3): 1.6923955927310935e-07, (8, 8): 0.16467026041229466, (22, 8): 0.06515739955970637, (9, 5): 0.012693136185042473, (23, 13): 0.04180234038001728, (10, 2): 1.6923955927310935e-07, (15, 27): 1.6923955927310935e-07, (11, 11): 0.7695324452543875, (0, 20): 1.6923955927310935e-07, (24, 17): 0.0011848461544710384, (14, 4): 0.0018618043915634758, (12, 12): 0.651233993322484, (1, 17): 0.009816063677399614, (25, 12): 0.00016940879883238242, (15, 1): 1.6923955927310935e-07, (13, 17): 0.7297611488252067, (2, 22): 0.022678270182155926, (26, 11): 1.6923955927310935e-07, (0, 10): 1.6923955927310935e-07, (3, 7): 0.018278041641055082, (27, 2): 1.6923955927310935e-07, (1, 11): 0.010323782355218943, (4, 0): 1.6923955927310935e-07, (5, 5): 0.007785188966122302, (16, 3): 0.00016940879883238242, (6, 10): 0.25487494550486195, (19, 14): 0.832320321744711, (17, 2): 1.6923955927310935e-07, (7, 19): 0.0966359575845047, (20, 9): 0.6607114086417781, (18, 9): 0.8218274690697782, (16, 25): 0.027924696519622313, (21, 12): 0.8878308971862908, (8, 15): 0.4637165616478789, (22, 3): 1.6923955927310935e-07, (9, 14): 0.5359818534574966, (23, 10): 0.018278041641055082, (10, 13): 0.6173860814678621, (8, 21): 0.03147872726435761, (11, 0): 1.6923955927310935e-07, (9, 16): 0.21307277436440392, (24, 24): 1.6923955927310935e-07, (14, 15): 0.795087618704627, (12, 11): 0.7691939661358412, (10, 23): 0.04535637112475257, (25, 21): 1.6923955927310935e-07, (15, 14): 0.7928875044340765, (13, 10): 0.8055804713795597, (2, 17): 0.21933463805750897, (26, 18): 1.6923955927310935e-07, (0, 1): 1.6923955927310935e-07, (3, 12): 0.22847357425825687, (27, 27): 1.6923955927310935e-07, (1, 12): 0.012862375744315584, (4, 15): 0.5977542925921815, (2, 11): 0.08106591813137864, (5, 14): 0.6197554352976857, (3, 18): 0.4115907773917612, (6, 13): 0.621786310008963, (4, 21): 0.11694470469727782, (19, 23): 0.07649645003100469, (7, 8): 0.12591440133875262, (20, 16): 0.9262482771412867, (18, 0): 1.6923955927310935e-07, (16, 16): 0.6478492021370218, (21, 21): 0.06871143030444166, (22, 26): 0.00016940879883238242, (18, 26): 0.0006771274766517105, (23, 3): 1.6923955927310935e-07, (9, 25): 0.002031043950836585, (14, 22): 0.3718194809625805, (12, 2): 1.6923955927310935e-07, (15, 23): 0.2308429280880804, (13, 3): 1.6923955927310935e-07, (0, 24): 1.6923955927310935e-07, (24, 5): 1.6923955927310935e-07, (12, 24): 0.06278804572988284, (27, 16): 1.6923955927310935e-07, (1, 5): 0.00033864835810549175, (25, 0): 1.6923955927310935e-07, (2, 2): 0.00016940879883238242, (26, 7): 1.6923955927310935e-07, (3, 27): 1.6923955927310935e-07, (6, 4): 0.0022002835101096944, (7, 1): 1.6923955927310935e-07, (5, 17): 0.5153346272261772, (16, 23): 0.22119627320951316, (19, 2): 1.6923955927310935e-07, (17, 22): 0.3503260569348956, (20, 5): 0.023524467978521472, (18, 21): 0.4567777397176814, (23, 24): 1.6923955927310935e-07, (8, 3): 1.6923955927310935e-07, (9, 2): 1.6923955927310935e-07, (14, 25): 0.029786331671626518, (10, 25): 0.003554199984294569, (11, 20): 0.27281433878781153, (24, 12): 0.0013540857137441476, (14, 3): 1.6923955927310935e-07, (12, 23): 0.1343763793024081, (25, 9): 1.6923955927310935e-07, (13, 22): 0.3178320615544586, (26, 14): 1.6923955927310935e-07, (3, 0): 1.6923955927310935e-07, (27, 15): 1.6923955927310935e-07, (4, 27): 0.00016940879883238242, (5, 26): 0.0005078879173786011, (16, 14): 0.764116779357648, (6, 17): 0.4124369751881268, (19, 11): 0.9418183165944127, (17, 15): 0.604693114522379, (7, 20): 0.051618234817857614, (22, 20): 0.03164796682363072, (20, 12): 0.9658503340111942, (18, 12): 0.813703970224669, (23, 17): 0.02098587458942483, (21, 1): 1.6923955927310935e-07, (8, 10): 0.4295301706747108, (22, 14): 0.3966976961757276, (9, 11): 0.6715427404352572, (23, 15): 0.03807907007600887, (10, 0): 1.6923955927310935e-07, (8, 16): 0.3103855209464418, (11, 13): 0.5471516643695218, (0, 22): 1.6923955927310935e-07, (24, 19): 0.00033864835810549175, (14, 10): 0.8280893327628833, (12, 14): 0.4017748829539209, (1, 23): 0.00033864835810549175, (25, 18): 1.6923955927310935e-07, (15, 3): 1.6923955927310935e-07, (13, 15): 0.6299098088540722, (2, 20): 0.0820813554870173, (26, 9): 1.6923955927310935e-07, (0, 12): 0.00016940879883238242, (3, 9): 0.06312652484842905, (27, 4): 1.6923955927310935e-07, (1, 9): 0.005246595577025662, (4, 2): 0.00016940879883238242, (2, 14): 0.19530262064072745, (5, 3): 0.00016940879883238242, (16, 5): 0.06329576440770215, (6, 8): 0.09528204111031982, (19, 16): 0.8859692620342867, (17, 0): 1.6923955927310935e-07, (7, 13): 0.6820355931101899, (20, 11): 0.948418659406064, (18, 7): 0.37740438641859314, (16, 27): 1.6923955927310935e-07, (21, 10): 0.6603729295232319, (17, 26): 0.0013540857137441476, (22, 1): 1.6923955927310935e-07, (9, 12): 0.7324689817735766, (23, 4): 1.6923955927310935e-07, (10, 11): 0.7346690960441269, (8, 23): 0.0113392197108576, (11, 2): 1.6923955927310935e-07, (9, 22): 0.03604819536473156, (24, 26): 1.6923955927310935e-07, (14, 13): 0.6258480594315177, (12, 5): 0.027755456960349204, (10, 21): 0.11457535086745428, (25, 27): 1.6923955927310935e-07, (15, 8): 0.6324484022431689, (13, 8): 0.4999338273323243, (11, 24): 0.03740211183891644, (26, 16): 1.6923955927310935e-07, (0, 3): 1.6923955927310935e-07, (24, 0): 1.6923955927310935e-07, (3, 14): 0.39974400824264356, (1, 2): 1.6923955927310935e-07, (4, 9): 0.09155877080631142, (2, 9): 0.03147872726435761, (5, 12): 0.4200527553554167, (3, 20): 0.19614881843709298, (6, 3): 0.00016940879883238242, (4, 23): 0.02623230092689122, (19, 25): 0.006769751610483646, (7, 10): 0.33746385043013927, (5, 22): 0.047387245836029886, (20, 18): 0.7429618344485093, (16, 18): 0.6855896238549252, (21, 19): 0.29329232545985773, (17, 19): 0.7957645769417194, (22, 24): 0.00033864835810549175, (18, 24): 0.05381834908840804, (14, 20): 0.6492031186112067, (15, 17): 0.7267148367582907, (13, 1): 1.6923955927310935e-07, (11, 17): 0.2966771166453199, (26, 27): 1.6923955927310935e-07, (0, 26): 1.6923955927310935e-07, (24, 7): 1.6923955927310935e-07, (12, 26): 0.0006771274766517105, (27, 18): 1.6923955927310935e-07, (1, 27): 1.6923955927310935e-07, (25, 6): 1.6923955927310935e-07, (13, 27): 0.00016940879883238242, (2, 0): 1.6923955927310935e-07, (26, 5): 1.6923955927310935e-07, (27, 8): 1.6923955927310935e-07}, 7: {(7, 3): 0.026207520106158986, (20, 25): 0.0006393637108599466, (16, 9): 0.1037109941794092, (19, 4): 0.0023971744630367554, (17, 20): 0.12017049485888295, (7, 25): 0.0019177715306248986, (22, 19): 0.04778065206469255, (20, 7): 0.014382247773333181, (18, 19): 0.16906959396489238, (23, 26): 1.59800977470619e-07, (21, 6): 0.009108815516802753, (8, 5): 0.23378898984049307, (9, 0): 0.0004795627333893276, (10, 7): 0.5362922401923749, (11, 22): 0.15053268057830058, (0, 17): 1.59800977470619e-07, (24, 14): 0.4833981166496, (14, 1): 0.0006393637108599466, (12, 17): 0.8506207628770824, (25, 15): 0.3138492795532732, (15, 4): 0.026207520106158986, (13, 20): 0.40062121031981934, (2, 27): 1.59800977470619e-07, (26, 12): 0.2997867935358587, (3, 2): 1.59800977470619e-07, (27, 1): 1.59800977470619e-07, (4, 5): 0.0001599607784480896, (5, 24): 1.59800977470619e-07, (16, 0): 0.0001599607784480896, (6, 23): 0.010547024314038325, (19, 13): 0.6551841674305154, (17, 13): 0.4718924462717154, (7, 22): 0.12672233493517834, (20, 14): 0.7822259445196575, (18, 10): 0.09907676583276125, (23, 19): 0.045223836425162645, (21, 15): 0.6093212868964477, (8, 12): 0.8725134967905572, (22, 12): 0.6190691465221555, (9, 9): 0.7959688285821307, (23, 9): 0.2729402293207947, (10, 14): 0.6630144153265758, (8, 18): 0.8213771839999592, (11, 15): 0.45958777100647774, (9, 19): 0.7814269396323045, (24, 21): 0.007830407697037802, (14, 8): 0.19927197870683935, (12, 8): 0.37713046663163835, (1, 21): 1.59800977470619e-07, (25, 16): 0.23203117908831625, (15, 13): 0.19751416795466256, (13, 13): 0.0776634348516983, (2, 18): 1.59800977470619e-07, (26, 23): 0.0007991646883305657, (0, 14): 1.59800977470619e-07, (3, 11): 1.59800977470619e-07, (27, 6): 0.00335598032786047, (1, 15): 1.59800977470619e-07, (4, 12): 0.0004795627333893276, (2, 12): 1.59800977470619e-07, (5, 1): 1.59800977470619e-07, (3, 17): 1.59800977470619e-07, (16, 7): 0.0843750759054643, (6, 14): 0.1911221288558378, (19, 18): 0.24018102893931784, (17, 6): 0.03531617582198427, (7, 15): 0.6572615801376335, (20, 21): 0.012464636043685754, (18, 5): 0.01134602920139142, (21, 8): 0.062482341991989494, (17, 24): 0.006711800854743468, (22, 7): 0.05369328823110545, (23, 6): 0.03963080221369098, (10, 9): 0.7280534131571177, (8, 25): 0.002716776417977994, (11, 4): 0.13503198576365052, (9, 20): 0.5981352184735044, (14, 19): 0.580237508996795, (12, 7): 0.3266333577509227, (10, 19): 0.7930924109876596, (25, 25): 1.59800977470619e-07, (15, 10): 0.11585586846717624, (13, 6): 0.17130680764948103, (11, 26): 0.0001599607784480896, (0, 5): 1.59800977470619e-07, (24, 2): 0.0001599607784480896, (1, 0): 1.59800977470619e-07, (25, 3): 0.0009589656658011847, (4, 11): 0.0004795627333893276, (2, 7): 1.59800977470619e-07, (5, 10): 0.01629985950298061, (3, 22): 1.59800977470619e-07, (6, 1): 1.59800977470619e-07, (4, 17): 0.0001599607784480896, (19, 27): 1.59800977470619e-07, (7, 4): 0.06471955567657817, (5, 20): 0.0017579705531542795, (16, 20): 0.1828124780273656, (6, 27): 1.59800977470619e-07, (21, 17): 0.26287276774014573, (19, 1): 0.0004795627333893276, (17, 17): 0.7223005779681754, (18, 22): 0.01917627709745175, (8, 0): 0.0001599607784480896, (14, 26): 1.59800977470619e-07, (10, 26): 0.0001599607784480896, (15, 19): 0.4760472716859515, (11, 19): 0.766086045795125, (26, 25): 0.0001599607784480896, (24, 9): 0.30841604631927216, (12, 20): 0.48403732055948245, (27, 20): 0.0030363783729192317, (1, 25): 1.59800977470619e-07, (25, 4): 0.005752994989919755, (13, 25): 0.0015981695756836604, (26, 3): 1.59800977470619e-07, (27, 10): 0.023650704466629084, (4, 24): 1.59800977470619e-07, (20, 27): 1.59800977470619e-07, (16, 11): 0.11825288312923553, (6, 18): 0.14557885027671139, (21, 26): 1.59800977470619e-07, (19, 6): 0.0073510047646259446, (17, 10): 0.10003557169758497, (7, 27): 1.59800977470619e-07, (22, 17): 0.21940690186813736, (20, 1): 0.0003197617559187086, (18, 17): 0.5871089510280317, (23, 20): 0.021573291759511035, (21, 4): 0.0012785676207424225, (8, 7): 0.4819599078523644, (22, 11): 0.4947439860500139, (9, 6): 0.41915812370641115, (10, 5): 0.2778940596223839, (15, 24): 0.012145034088744516, (11, 8): 0.5254257737243728, (0, 19): 1.59800977470619e-07, (24, 16): 0.26926480683897047, (14, 7): 0.17130680764948103, (12, 19): 0.716547742779233, (1, 18): 1.59800977470619e-07, (25, 13): 0.4659798101053025, (15, 6): 0.08709169252246482, (13, 18): 0.8469453403952583, (2, 25): 1.59800977470619e-07, (26, 10): 0.24209864066896525, (0, 9): 1.59800977470619e-07, (3, 4): 1.59800977470619e-07, (27, 3): 1.59800977470619e-07, (4, 7): 0.0001599607784480896, (5, 6): 0.004794189125096041, (16, 2): 0.0006393637108599466, (6, 21): 0.045703239357574504, (19, 15): 0.8082735038473684, (17, 3): 0.0022373734855661367, (7, 16): 0.6539057596107505, (20, 8): 0.0346769719121018, (18, 8): 0.0431464237180446, (16, 24): 0.008789213561861514, (21, 13): 0.7099959027029378, (8, 14): 0.8846583710783243, (22, 2): 1.59800977470619e-07, (9, 15): 0.8896122013799135, (23, 11): 0.5322972157556094, (10, 12): 0.7010470479645831, (8, 20): 0.5374108470346692, (11, 1): 0.005912795967390373, (9, 17): 0.9262066252206852, (24, 23): 0.0007991646883305657, (14, 14): 0.2842860987212087, (12, 10): 0.345809475047397, (10, 22): 0.1912819298333084, (25, 22): 0.0030363783729192317, (15, 15): 0.7176663496215274, (13, 11): 0.15101208351071244, (2, 16): 1.59800977470619e-07, (26, 21): 0.004474587170154803, (0, 0): 1.59800977470619e-07, (3, 13): 1.59800977470619e-07, (27, 24): 1.59800977470619e-07, (1, 13): 1.59800977470619e-07, (4, 14): 0.0004795627333893276, (2, 10): 1.59800977470619e-07, (5, 15): 0.014382247773333181, (3, 19): 1.59800977470619e-07, (6, 12): 0.18393108486965995, (4, 20): 1.59800977470619e-07, (19, 20): 0.04586304033504512, (17, 4): 0.009748019426685229, (7, 9): 0.46118578078118394, (20, 23): 0.0019177715306248986, (18, 3): 0.0019177715306248986, (21, 22): 0.004474587170154803, (22, 5): 0.009268616494273373, (23, 0): 1.59800977470619e-07, (8, 27): 1.59800977470619e-07, (11, 6): 0.32535494993115777, (9, 26): 0.0001599607784480896, (14, 17): 0.8974424492759738, (12, 1): 0.003675582282801708, (10, 17): 0.8838593661909712, (15, 20): 0.2459338641282601, (13, 4): 0.059126521465106496, (0, 7): 1.59800977470619e-07, (24, 4): 0.007670606719567183, (27, 17): 0.024769311308923417, (1, 6): 1.59800977470619e-07, (25, 1): 1.59800977470619e-07, (2, 5): 1.59800977470619e-07, (26, 6): 0.033558365069807465, (5, 8): 0.011665631156332658, (3, 24): 1.59800977470619e-07, (6, 7): 0.07270960455010911, (4, 19): 1.59800977470619e-07, (7, 6): 0.1987925757744275, (5, 18): 0.006232397922331611, (16, 22): 0.04810025401963379, (6, 25): 0.0001599607784480896, (19, 3): 0.0011187666432718037, (17, 23): 0.015980257548039372, (20, 4): 0.0011187666432718037, (18, 20): 0.07702423094181582, (23, 25): 0.0003197617559187086, (8, 2): 0.027166325970982702, (9, 3): 0.08805049838728854, (14, 24): 0.010387223336567707, (10, 24): 0.013743043863450705, (11, 21): 0.32807156654815833, (24, 11): 0.5275031864314909, (14, 2): 0.003995184237742946, (12, 22): 0.10674721275135096, (27, 22): 0.0007991646883305657, (25, 10): 0.40557504062140853, (13, 23): 0.02253209762433475, (26, 1): 1.59800977470619e-07, (3, 1): 1.59800977470619e-07, (27, 12): 0.03595537973186674, (4, 26): 1.59800977470619e-07, (5, 27): 1.59800977470619e-07, (16, 13): 0.32919017339045265, (6, 16): 0.18425068682460116, (21, 24): 0.0012785676207424225, (19, 8): 0.02860453476821827, (17, 8): 0.06871458011334364, (7, 21): 0.23586640254761113, (22, 23): 0.0009589656658011847, (20, 3): 0.0001599607784480896, (18, 15): 0.8566932000209659, (23, 22): 0.003515781305331089, (21, 2): 1.59800977470619e-07, (8, 9): 0.7138311261622325, (22, 9): 0.2114168529946064, (9, 4): 0.17226561351430475, (23, 12): 0.6107594956936834, (10, 3): 0.08868970229717102, (15, 26): 1.59800977470619e-07, (11, 10): 0.5359726382374337, (0, 21): 1.59800977470619e-07, (24, 18): 0.09172592086911277, (14, 5): 0.07542622116710963, (12, 13): 0.15117188448818303, (1, 16): 1.59800977470619e-07, (25, 19): 0.03963080221369098, (15, 0): 0.0001599607784480896, (13, 16): 0.7440335109041796, (2, 23): 1.59800977470619e-07, (26, 8): 0.12144890267864791, (0, 11): 1.59800977470619e-07, (3, 6): 1.59800977470619e-07, (27, 5): 0.0009589656658011847, (1, 10): 1.59800977470619e-07, (4, 1): 1.59800977470619e-07, (5, 4): 0.0006393637108599466, (16, 4): 0.016779262435392465, (6, 11): 0.17594103599612898, (19, 17): 0.4450458820566514, (17, 1): 0.0001599607784480896, (7, 18): 0.5912637764422678, (20, 10): 0.1853692936668955, (18, 6): 0.01773806830021618, (16, 26): 1.59800977470619e-07, (21, 11): 0.42730797355741273, (17, 27): 1.59800977470619e-07, (22, 0): 1.59800977470619e-07, (9, 13): 0.8769879241597346, (23, 5): 0.015341053638156896, (10, 10): 0.7496265451156512, (8, 22): 0.18664770148666046, (11, 3): 0.0683949781584024, (9, 23): 0.08629268763511173, (24, 25): 0.0001599607784480896, (14, 12): 0.07191059966275602, (12, 4): 0.09092691598175968, (10, 20): 0.5997332282482106, (25, 20): 0.018696874165039894, (15, 9): 0.13311437403400309, (13, 9): 0.27166182150102974, (11, 25): 0.0014383685982130416, (26, 19): 0.027166325970982702, (0, 2): 1.59800977470619e-07, (3, 15): 1.59800977470619e-07, (27, 26): 1.59800977470619e-07, (1, 3): 1.59800977470619e-07, (4, 8): 0.0001599607784480896, (2, 8): 1.59800977470619e-07, (5, 13): 0.016779262435392465, (3, 21): 1.59800977470619e-07, (6, 2): 0.0012785676207424225, (4, 22): 0.0001599607784480896, (19, 22): 0.00942841747174399, (7, 11): 0.5866295480956198, (5, 23): 0.0001599607784480896, (20, 17): 0.3359018144442186, (18, 1): 0.0001599607784480896, (16, 17): 0.8348004661074911, (21, 20): 0.02413010739904094, (22, 27): 1.59800977470619e-07, (18, 27): 1.59800977470619e-07, (23, 2): 1.59800977470619e-07, (9, 24): 0.021573291759511035, (14, 23): 0.025088913263864653, (12, 3): 0.044584632515280165, (15, 22): 0.06056473026234207, (13, 2): 0.008469611606920278, (26, 26): 1.59800977470619e-07, (0, 25): 1.59800977470619e-07, (24, 6): 0.058007914622812164, (12, 25): 0.0009589656658011847, (27, 19): 0.007031402809684706, (1, 4): 1.59800977470619e-07, (25, 7): 0.11985089290394171, (2, 3): 1.59800977470619e-07, (26, 4): 0.002556975440507375, (3, 26): 1.59800977470619e-07, (27, 9): 0.013423441908509467, (6, 5): 0.02764572890339456, (7, 0): 0.0001599607784480896, (5, 16): 0.013743043863450705, (20, 24): 0.0012785676207424225, (16, 8): 0.10003557169758497, (19, 5): 0.004474587170154803, (17, 21): 0.05992552635245959, (22, 18): 0.1147372616248819, (20, 6): 0.00639219889980223, (18, 18): 0.345809475047397, (23, 27): 1.59800977470619e-07, (21, 7): 0.024929112286394037, (8, 4): 0.13726919944823918, (9, 1): 0.006711800854743468, (10, 6): 0.4014202152071724, (11, 23): 0.04857965695204564, (0, 16): 1.59800977470619e-07, (24, 13): 0.5541899496690842, (14, 0): 1.59800977470619e-07, (12, 16): 0.6543851625431623, (25, 8): 0.20230819727878113, (15, 5): 0.055131497028341026, (13, 21): 0.19176133276572027, (2, 26): 1.59800977470619e-07, (26, 15): 0.2019885953238399, (3, 3): 1.59800977470619e-07, (27, 14): 0.04410522958286831, (4, 4): 0.0001599607784480896, (5, 25): 1.59800977470619e-07, (16, 15): 0.8015618627936024, (6, 22): 0.023810505444099704, (19, 10): 0.12320671343082472, (17, 14): 0.7050420724013485, (7, 23): 0.05385308920857607, (22, 21): 0.008949014539332134, (20, 13): 0.6952942127756407, (18, 13): 0.5752836786952059, (23, 16): 0.3095346531615665, (21, 0): 1.59800977470619e-07, (8, 11): 0.8475845443051406, (22, 15): 0.5132808994366057, (9, 10): 0.8630852391197907, (23, 14): 0.5601025858354971, (10, 1): 0.0071912037871553255, (8, 17): 0.8707556860383805, (11, 12): 0.41803951686411683, (0, 23): 1.59800977470619e-07, (24, 20): 0.020454684917216703, (14, 11): 0.0952415423734664, (12, 15): 0.3790480783612858, (1, 22): 1.59800977470619e-07, (25, 17): 0.13934661215535724, (15, 2): 0.0011187666432718037, (13, 14): 0.18840551223883728, (2, 21): 1.59800977470619e-07, (26, 22): 0.002556975440507375, (0, 13): 1.59800977470619e-07, (3, 8): 1.59800977470619e-07, (27, 7): 0.00639219889980223, (1, 8): 1.59800977470619e-07, (4, 3): 0.0001599607784480896, (2, 15): 1.59800977470619e-07, (5, 2): 1.59800977470619e-07, (16, 6): 0.06104413319475393, (6, 9): 0.1305575583944732, (19, 19): 0.10594820786399786, (17, 7): 0.05209527845639926, (7, 12): 0.614275117198037, (20, 20): 0.02908393770063013, (18, 4): 0.005752994989919755, (21, 9): 0.14014561704271034, (17, 25): 0.0017579705531542795, (22, 6): 0.020454684917216703, (23, 7): 0.09076711500428906, (10, 8): 0.6475137205119257, (8, 24): 0.023650704466629084, (11, 5): 0.22196371750766725, (9, 21): 0.3849607145276987, (24, 27): 1.59800977470619e-07, (14, 18): 0.8106705185094277, (12, 6): 0.23954182502943536, (10, 18): 0.8913700121320903, (25, 26): 1.59800977470619e-07, (15, 11): 0.09636014921576072, (13, 7): 0.23874282014208226, (11, 27): 0.0001599607784480896, (26, 17): 0.09348373162128958, (0, 4): 1.59800977470619e-07, (24, 1): 1.59800977470619e-07, (1, 1): 1.59800977470619e-07, (4, 10): 1.59800977470619e-07, (2, 6): 1.59800977470619e-07, (5, 11): 0.01741846634527494, (3, 23): 1.59800977470619e-07, (6, 0): 1.59800977470619e-07, (4, 16): 0.0003197617559187086, (19, 24): 0.0014383685982130416, (7, 5): 0.12496452418300152, (5, 21): 0.0014383685982130416, (20, 19): 0.07414781334734469, (16, 19): 0.3608307669296352, (6, 26): 1.59800977470619e-07, (21, 18): 0.13455258283123867, (17, 18): 0.4706140384519505, (22, 25): 0.0003197617559187086, (18, 25): 0.0014383685982130416, (14, 21): 0.14589845223165263, (15, 16): 0.8777869290470877, (13, 0): 1.59800977470619e-07, (11, 16): 0.6614164055518695, (26, 24): 1.59800977470619e-07, (0, 27): 1.59800977470619e-07, (24, 8): 0.1997513816392512, (12, 27): 0.0001599607784480896, (27, 21): 0.0012785676207424225, (1, 26): 1.59800977470619e-07, (25, 5): 0.024449709353982177, (13, 26): 0.0001599607784480896, (2, 1): 1.59800977470619e-07, (26, 2): 1.59800977470619e-07, (27, 11): 0.028284932813277035, (7, 2): 0.009108815516802753, (20, 26): 1.59800977470619e-07, (16, 10): 0.10898442643593963, (21, 27): 1.59800977470619e-07, (19, 7): 0.01661946145792185, (17, 11): 0.14094462193006344, (7, 24): 0.01645966048045123, (22, 16): 0.3657845972312244, (20, 0): 1.59800977470619e-07, (18, 16): 0.7954894256497189, (23, 21): 0.007830407697037802, (21, 5): 0.004154985215213565, (8, 6): 0.3530005190335749, (22, 10): 0.34517027113751453, (9, 7): 0.5612211926777915, (10, 4): 0.169229394942363, (15, 25): 0.002716776417977994, (11, 9): 0.5570663672635553, (0, 18): 1.59800977470619e-07, (24, 15): 0.3828833018205806, (14, 6): 0.1248047232055309, (12, 18): 0.8646832488944969, (1, 19): 1.59800977470619e-07, (25, 14): 0.4018996181395843, (15, 7): 0.11873228606164739, (13, 19): 0.6617360075068108, (2, 24): 1.59800977470619e-07, (26, 13): 0.2878017202255623, (0, 8): 1.59800977470619e-07, (3, 5): 1.59800977470619e-07, (27, 0): 1.59800977470619e-07, (4, 6): 0.0001599607784480896, (5, 7): 0.008629412584390896, (16, 1): 1.59800977470619e-07, (6, 20): 0.07750363387422768, (19, 12): 0.45287612995271176, (17, 12): 0.256480728641321, (7, 17): 0.6393638706609241, (20, 15): 0.716547742779233, (18, 11): 0.18425068682460116, (23, 18): 0.10067477560746743, (21, 14): 0.719104558418763, (8, 13): 0.8806633466415588, (22, 13): 0.6804327218708732, (9, 8): 0.6892217756317572, (23, 8): 0.16299715682100885, (10, 15): 0.7071194851084666, (8, 19): 0.7007274460096419, (11, 14): 0.35683574249286976, (9, 18): 0.8886533955150898, (24, 22): 0.0031961793503898508, (14, 9): 0.1834516819372481, (12, 9): 0.384001908662875, (1, 20): 1.59800977470619e-07, (25, 23): 0.0006393637108599466, (15, 12): 0.10498940199917416, (13, 12): 0.08421527492799367, (2, 19): 1.59800977470619e-07, (26, 20): 0.01134602920139142, (0, 15): 1.59800977470619e-07, (3, 10): 1.59800977470619e-07, (27, 25): 1.59800977470619e-07, (1, 14): 1.59800977470619e-07, (4, 13): 0.0004795627333893276, (2, 13): 1.59800977470619e-07, (5, 0): 1.59800977470619e-07, (3, 16): 1.59800977470619e-07, (6, 15): 0.1903231239684847, (19, 21): 0.018696874165039894, (17, 5): 0.020135082962275466, (7, 14): 0.6510293420162793, (20, 22): 0.0054333930349785165, (18, 2): 0.0007991646883305657, (21, 23): 0.0012785676207424225, (22, 4): 0.003515781305331089, (23, 1): 1.59800977470619e-07, (8, 26): 1.59800977470619e-07, (11, 7): 0.43881364393529726, (9, 27): 0.0001599607784480896, (14, 16): 0.8236143976845478, (12, 0): 0.0003197617559187086, (10, 16): 0.7945306197848951, (25, 24): 0.0004795627333893276, (15, 21): 0.11569606748970562, (13, 5): 0.10786581959364529, (0, 6): 1.59800977470619e-07, (24, 3): 0.0011187666432718037, (1, 7): 1.59800977470619e-07, (25, 2): 1.59800977470619e-07, (2, 4): 1.59800977470619e-07, (5, 9): 0.014062645818391943, (3, 25): 1.59800977470619e-07, (6, 6): 0.045383637402633265, (4, 18): 1.59800977470619e-07, (19, 26): 1.59800977470619e-07, (7, 7): 0.2863635114283267, (5, 19): 0.0038353832602723266, (16, 21): 0.09012791109440658, (6, 24): 0.0020775725080955177, (21, 16): 0.4329010077688844, (19, 0): 1.59800977470619e-07, (17, 16): 0.8728330987454985, (18, 23): 0.00942841747174399, (8, 1): 0.004154985215213565, (14, 27): 1.59800977470619e-07, (10, 27): 0.0001599607784480896, (15, 18): 0.7411570933097085, (11, 18): 0.876828123182264, (24, 10): 0.43338041070129624, (12, 21): 0.2548827188666148, (27, 23): 0.0003197617559187086, (1, 24): 1.59800977470619e-07, (25, 11): 0.46965523258712677, (13, 24): 0.006551999877272849, (26, 0): 1.59800977470619e-07, (27, 13): 0.0423474188306915, (4, 25): 1.59800977470619e-07, (16, 12): 0.16651277832536246, (6, 19): 0.11281964989523448, (21, 25): 0.0006393637108599466, (19, 9): 0.057528511690400304, (17, 9): 0.0794212456038751, (7, 26): 1.59800977470619e-07, (22, 22): 0.003675582282801708, (20, 2): 0.0001599607784480896, (18, 14): 0.7731172888038322, (23, 23): 0.0009589656658011847, (21, 3): 1.59800977470619e-07, (8, 8): 0.59957342727074, (22, 8): 0.11138144109799891, (9, 5): 0.2844458996986793, (23, 13): 0.6233837729138623, (10, 2): 0.038192593416455416, (15, 27): 1.59800977470619e-07, (11, 11): 0.48243931078477625, (0, 20): 1.59800977470619e-07, (24, 17): 0.16475496757318567, (14, 4): 0.040749409055985314, (12, 12): 0.19176133276572027, (1, 17): 1.59800977470619e-07, (25, 12): 0.4891509518385423, (15, 1): 1.59800977470619e-07, (13, 17): 0.8800241427316764, (2, 22): 1.59800977470619e-07, (26, 11): 0.2828478899239731, (0, 10): 1.59800977470619e-07, (3, 7): 1.59800977470619e-07, (27, 2): 1.59800977470619e-07, (1, 11): 1.59800977470619e-07, (4, 0): 1.59800977470619e-07, (5, 5): 0.002716776417977994, (16, 3): 0.004634388147625422, (6, 10): 0.1559659138123016, (19, 14): 0.7943708188074246, (17, 2): 0.0004795627333893276, (7, 19): 0.4928263743203665, (20, 9): 0.08277706613075811, (18, 9): 0.062322541014518874, (16, 25): 0.0007991646883305657, (21, 12): 0.5915833783972091, (8, 15): 0.8892925994249723, (22, 3): 0.0003197617559187086, (9, 14): 0.8752301134075577, (23, 10): 0.4073328513735853, (10, 13): 0.6694064544254005, (8, 21): 0.34165464963316095, (11, 0): 0.0006393637108599466, (9, 16): 0.9178969743922131, (24, 24): 0.0006393637108599466, (14, 15): 0.5958980047889157, (12, 11): 0.2718216224785004, (10, 23): 0.07079199282046168, (25, 21): 0.006551999877272849, (15, 14): 0.4538349358175355, (13, 10): 0.22372152825984407, (2, 17): 1.59800977470619e-07, (26, 18): 0.05129627356904617, (0, 1): 1.59800977470619e-07, (3, 12): 1.59800977470619e-07, (27, 27): 1.59800977470619e-07, (1, 12): 1.59800977470619e-07, (4, 15): 0.0006393637108599466, (2, 11): 1.59800977470619e-07, (5, 14): 0.01486165070574504, (3, 18): 1.59800977470619e-07, (6, 13): 0.19160153178824965, (4, 21): 0.0001599607784480896, (19, 23): 0.00335598032786047, (7, 8): 0.37601185978934404, (20, 16): 0.5444420900433764, (18, 0): 0.0001599607784480896, (16, 16): 0.9006384688253862, (21, 21): 0.010866626268979563, (22, 26): 1.59800977470619e-07, (18, 26): 0.0003197617559187086, (23, 3): 0.0004795627333893276, (9, 25): 0.002716776417977994, (14, 22): 0.06072453123981269, (12, 2): 0.01773806830021618, (15, 23): 0.02780552988086518, (13, 3): 0.025088913263864653, (0, 24): 1.59800977470619e-07, (24, 5): 0.023810505444099704, (12, 24): 0.004314786192684184, (27, 16): 0.03435736995716056, (1, 5): 1.59800977470619e-07, (25, 0): 1.59800977470619e-07, (2, 2): 1.59800977470619e-07, (26, 7): 0.07254980357263849, (3, 27): 1.59800977470619e-07, (6, 4): 0.011825432133803278, (7, 1): 0.0020775725080955177, (5, 17): 0.010706825291508945, (16, 23): 0.023650704466629084, (19, 2): 0.0006393637108599466, (17, 22): 0.0338779670247487, (20, 5): 0.0031961793503898508, (18, 21): 0.03515637484451365, (23, 24): 0.0007991646883305657, (8, 3): 0.06743617229357869, (9, 2): 0.037553389506572936, (14, 25): 0.0022373734855661367, (10, 25): 0.0020775725080955177, (11, 20): 0.5519527359844956, (24, 12): 0.5701700474161461, (14, 3): 0.015820456570568752, (12, 23): 0.03036234552039508, (25, 9): 0.303462216017683, (13, 22): 0.07350860943746221, (26, 14): 0.25520232082155603, (3, 0): 1.59800977470619e-07, (27, 15): 0.04011020514610284, (4, 27): 1.59800977470619e-07, (5, 26): 1.59800977470619e-07, (16, 14): 0.5973362135861513, (6, 17): 0.17002839982971607, (19, 11): 0.2577591364610859, (17, 15): 0.8526981755842005, (7, 20): 0.3699394226454605, (22, 20): 0.02221249566939351, (20, 12): 0.5276629874089614, (18, 12): 0.3608307669296352, (23, 17): 0.18632809953171922, (21, 1): 1.59800977470619e-07, (8, 10): 0.7945306197848951, (22, 14): 0.6395236716383947, (9, 11): 0.881462351528912, (23, 15): 0.4400920517550622, (10, 0): 0.0004795627333893276, (8, 16): 0.8873749876953249, (11, 13): 0.3627483786592826, (0, 22): 1.59800977470619e-07, (24, 19): 0.04282682176310336, (14, 10): 0.14158382583994591, (12, 14): 0.19096232787836717, (1, 23): 1.59800977470619e-07, (25, 18): 0.07606542507699211, (15, 3): 0.00815000965197904, (13, 15): 0.459907372961419, (2, 20): 1.59800977470619e-07, (26, 9): 0.18329188095977747, (0, 12): 1.59800977470619e-07, (3, 9): 1.59800977470619e-07, (27, 4): 0.0004795627333893276, (1, 9): 1.59800977470619e-07, (4, 2): 1.59800977470619e-07, (2, 14): 1.59800977470619e-07, (5, 3): 0.0001599607784480896, (16, 5): 0.0364347826642786, (6, 8): 0.10195318342723239, (19, 16): 0.6694064544254005, (17, 0): 1.59800977470619e-07, (7, 13): 0.6384050647961004, (20, 11): 0.34660847993475014, (18, 7): 0.03068194747533632, (16, 27): 1.59800977470619e-07, (21, 10): 0.2627129667626751, (17, 26): 0.0003197617559187086, (22, 1): 1.59800977470619e-07, (9, 12): 0.88353976423603, (23, 4): 0.005912795967390373, (10, 11): 0.7323680395488243, (8, 23): 0.07958104658134572, (11, 2): 0.02860453476821827, (9, 22): 0.20933944028748835, (24, 26): 1.59800977470619e-07, (14, 13): 0.1045099990667623, (12, 5): 0.15628551576724284, (10, 21): 0.3731354421948729, (25, 27): 1.59800977470619e-07, (15, 8): 0.13583099065100362, (13, 8): 0.27805386059985454, (11, 24): 0.00799020867450842, (26, 16): 0.1516512874205949, (0, 3): 1.59800977470619e-07, (24, 0): 1.59800977470619e-07, (3, 14): 1.59800977470619e-07, (1, 2): 1.59800977470619e-07, (4, 9): 0.0001599607784480896, (2, 9): 1.59800977470619e-07, (5, 12): 0.01741846634527494, (3, 20): 1.59800977470619e-07, (6, 3): 0.0038353832602723266, (4, 23): 1.59800977470619e-07, (19, 25): 0.0004795627333893276, (7, 10): 0.5350138323726099, (5, 22): 0.0006393637108599466, (20, 18): 0.16970879787477486, (16, 18): 0.6179505396798611, (21, 19): 0.05625010387063536, (17, 19): 0.2537641120243204, (22, 24): 0.0006393637108599466, (18, 24): 0.003995184237742946, (14, 20): 0.32104032353945106, (15, 17): 0.8937670267941495, (13, 1): 0.0020775725080955177, (11, 17): 0.8384758885893154, (26, 27): 1.59800977470619e-07, (0, 26): 1.59800977470619e-07, (24, 7): 0.11729407726441181, (12, 26): 0.0003197617559187086, (27, 18): 0.014062645818391943, (1, 27): 1.59800977470619e-07, (25, 6): 0.058966720487635876, (13, 27): 1.59800977470619e-07, (2, 0): 1.59800977470619e-07, (26, 5): 0.011985233111273896, (27, 8): 0.00958821844921461}, 8: {(7, 3): 0.0005140649260061695, (20, 25): 0.0005140649260061695, (16, 9): 0.39244360531324907, (19, 4): 0.00976415023234844, (17, 20): 0.16975636645686104, (7, 25): 0.04111166154828613, (22, 19): 0.2579747726191994, (20, 7): 0.35629975346809684, (18, 19): 0.2996001564977396, (23, 26): 1.7129787604337537e-07, (21, 6): 0.20161777140092885, (8, 5): 0.03768570402741862, (9, 0): 1.7129787604337537e-07, (10, 7): 0.23382177209708344, (11, 22): 0.23656253811377742, (0, 17): 1.7129787604337537e-07, (24, 14): 0.5185188420811734, (14, 1): 1.7129787604337537e-07, (12, 17): 0.8520358067376251, (25, 15): 0.0496765553504549, (15, 4): 0.0013705543062230462, (13, 20): 0.2643127940328042, (2, 27): 1.7129787604337537e-07, (26, 12): 0.001199256430179671, (3, 2): 1.7129787604337537e-07, (27, 1): 1.7129787604337537e-07, (4, 5): 0.00017146917391941874, (5, 24): 0.04128295942432951, (16, 0): 1.7129787604337537e-07, (6, 23): 0.15519604699317413, (19, 13): 0.5118382249154817, (17, 13): 0.7884842947255329, (7, 22): 0.33505881683871824, (20, 14): 0.565797055869145, (18, 10): 0.7800906987994075, (23, 19): 0.1671868983162104, (21, 15): 0.737437527664607, (8, 12): 0.8035585078173499, (22, 12): 0.8881796585827774, (9, 9): 0.55945903445554, (23, 9): 0.5709359921504462, (10, 14): 0.4114576695540637, (8, 18): 0.7379514212927372, (11, 15): 0.5599729280836702, (9, 19): 0.7444607405823854, (24, 21): 0.01678736315012683, (14, 8): 0.18346019654033108, (12, 8): 0.3208410931271181, (1, 21): 1.7129787604337537e-07, (25, 16): 0.035972725266984876, (15, 13): 0.9529302557271733, (13, 13): 0.8741332327472207, (2, 18): 0.00017146917391941874, (26, 23): 0.00017146917391941874, (0, 14): 1.7129787604337537e-07, (3, 11): 0.0010279585541362957, (27, 6): 1.7129787604337537e-07, (1, 15): 1.7129787604337537e-07, (4, 12): 0.10843172683333265, (2, 12): 1.7129787604337537e-07, (5, 1): 1.7129787604337537e-07, (3, 17): 0.005995596959394181, (16, 7): 0.10106591816346751, (6, 14): 0.8840685095577364, (19, 18): 0.45308305343260397, (17, 6): 0.06766283233500932, (7, 15): 0.840558849042719, (20, 21): 0.11802440789176168, (18, 5): 0.039912576415982504, (21, 8): 0.5411301617188988, (17, 24): 0.012504916249042446, (22, 7): 0.33420232745850137, (23, 6): 0.14132091903366073, (10, 9): 0.5669961410014486, (8, 25): 0.0503617468546284, (11, 4): 0.014731788637606326, (9, 20): 0.6692609729993436, (14, 19): 0.2942899223403949, (12, 7): 0.18260370716011418, (10, 19): 0.7562802940293784, (25, 25): 1.7129787604337537e-07, (15, 10): 0.5460978001241568, (13, 6): 0.061667406673491174, (11, 26): 0.005824299083350806, (0, 5): 1.7129787604337537e-07, (24, 2): 1.7129787604337537e-07, (1, 0): 1.7129787604337537e-07, (25, 3): 1.7129787604337537e-07, (4, 11): 0.06903321534335631, (2, 7): 1.7129787604337537e-07, (5, 10): 0.20144647352488548, (3, 22): 0.0018844479343531723, (6, 1): 1.7129787604337537e-07, (4, 17): 0.19802051600401796, (19, 27): 1.7129787604337537e-07, (7, 4): 0.007365979967741184, (5, 20): 0.3634942642619186, (16, 20): 0.1442329829263981, (6, 27): 0.0005140649260061695, (21, 17): 0.6002279289538633, (19, 1): 1.7129787604337537e-07, (17, 17): 0.5454126086199833, (18, 22): 0.06475076844227193, (8, 0): 1.7129787604337537e-07, (14, 26): 0.0003427670499627941, (10, 26): 0.009078958728174939, (15, 19): 0.22816894218765205, (11, 19): 0.7149975059029249, (26, 25): 1.7129787604337537e-07, (24, 9): 0.3391699658637593, (12, 20): 0.4219068399927096, (27, 20): 1.7129787604337537e-07, (1, 25): 1.7129787604337537e-07, (25, 4): 1.7129787604337537e-07, (13, 25): 0.006852086339611058, (26, 3): 1.7129787604337537e-07, (27, 10): 1.7129787604337537e-07, (4, 24): 0.006509490587524308, (20, 27): 1.7129787604337537e-07, (16, 11): 0.8302809764801166, (6, 18): 0.8025307205610896, (21, 26): 1.7129787604337537e-07, (19, 6): 0.15314047248065363, (17, 10): 0.727330952978048, (7, 27): 0.0010279585541362957, (22, 17): 0.553977502422152, (20, 1): 1.7129787604337537e-07, (18, 17): 0.549352459768981, (23, 20): 0.08719079020395411, (21, 4): 0.013875299257389449, (8, 7): 0.1839740901684612, (22, 11): 0.870707275226353, (9, 6): 0.11134379072607004, (10, 5): 0.050190448978585024, (15, 24): 0.009078958728174939, (11, 8): 0.37308694532034764, (0, 19): 1.7129787604337537e-07, (24, 16): 0.3187855186145976, (14, 7): 0.09147323710503849, (12, 19): 0.608278929127902, (1, 18): 1.7129787604337537e-07, (25, 13): 0.0635516833099683, (15, 6): 0.02980600172942336, (13, 18): 0.6540154620314833, (2, 25): 1.7129787604337537e-07, (26, 10): 0.001199256430179671, (0, 9): 1.7129787604337537e-07, (3, 4): 1.7129787604337537e-07, (27, 3): 1.7129787604337537e-07, (4, 7): 0.003426128818743551, (5, 6): 0.009592852356305064, (16, 2): 1.7129787604337537e-07, (6, 21): 0.3912445201809454, (19, 15): 0.5830981413495259, (17, 3): 0.0003427670499627941, (7, 16): 0.8292531892238563, (20, 8): 0.5356486296855109, (18, 8): 0.428930052910488, (16, 24): 0.011819724744868945, (21, 13): 0.7182521655477491, (8, 14): 0.6829648030828137, (22, 2): 1.7129787604337537e-07, (9, 15): 0.4304717337948784, (23, 11): 0.8340495297530708, (10, 12): 0.677654568925469, (8, 20): 0.6610386749492616, (11, 1): 1.7129787604337537e-07, (9, 17): 0.6051955673591213, (24, 23): 0.0022270436864399233, (14, 14): 0.9556710217438673, (12, 10): 0.655728440791917, (10, 22): 0.324438348524029, (25, 22): 0.0005140649260061695, (15, 15): 0.9173002975101512, (13, 11): 0.7735813795097592, (2, 16): 1.7129787604337537e-07, (26, 21): 0.00017146917391941874, (0, 0): 1.7129787604337537e-07, (3, 13): 0.0030835330666568, (27, 24): 1.7129787604337537e-07, (1, 13): 1.7129787604337537e-07, (4, 14): 0.18517317530076483, (2, 10): 1.7129787604337537e-07, (5, 15): 0.7514839535001638, (3, 19): 0.0035974266947869263, (6, 12): 0.7233911018290503, (4, 20): 0.08136666241847934, (19, 20): 0.2062428140541, (17, 4): 0.0029122351906134248, (7, 9): 0.39038803080072854, (20, 23): 0.030833788985683612, (18, 3): 0.0005140649260061695, (21, 22): 0.0510469383588019, (22, 5): 0.07228787498818044, (23, 0): 1.7129787604337537e-07, (8, 27): 0.0008566606780929203, (11, 6): 0.11151508860211341, (9, 26): 0.010963235364652067, (14, 17): 0.729215229614525, (12, 1): 1.7129787604337537e-07, (10, 17): 0.6774832710494256, (15, 20): 0.13464030186796908, (13, 4): 0.005995596959394181, (0, 7): 1.7129787604337537e-07, (24, 4): 0.0008566606780929203, (27, 17): 1.7129787604337537e-07, (1, 6): 1.7129787604337537e-07, (25, 1): 1.7129787604337537e-07, (2, 5): 1.7129787604337537e-07, (26, 6): 0.00017146917391941874, (5, 8): 0.058241449152623664, (3, 24): 0.0005140649260061695, (6, 7): 0.07365825799652745, (4, 19): 0.11802440789176168, (7, 6): 0.06252389605370805, (5, 18): 0.6266078018645432, (16, 22): 0.047792278713977766, (6, 25): 0.02929210810129323, (19, 3): 0.0005140649260061695, (17, 23): 0.031518980489857115, (20, 4): 0.013018809877172573, (18, 20): 0.19048340945810946, (23, 25): 0.0005140649260061695, (8, 2): 1.7129787604337537e-07, (9, 3): 0.001713150058309797, (14, 24): 0.01010674598443519, (10, 24): 0.0976399606426, (11, 21): 0.3782258816016489, (24, 11): 0.5358199275615543, (14, 2): 1.7129787604337537e-07, (12, 22): 0.14234870628992097, (27, 22): 1.7129787604337537e-07, (25, 10): 0.04522281057332714, (13, 23): 0.0371718103992885, (26, 1): 1.7129787604337537e-07, (3, 1): 1.7129787604337537e-07, (27, 12): 1.7129787604337537e-07, (4, 26): 0.0008566606780929203, (5, 27): 0.00017146917391941874, (16, 13): 0.9044529568068981, (6, 16): 0.8996566162776836, (21, 24): 0.005139107579177305, (19, 8): 0.5041298204935298, (17, 8): 0.3186142207385542, (7, 21): 0.4774073518307632, (22, 23): 0.012847512001129198, (20, 3): 0.000685362802049545, (18, 15): 0.6260939082364131, (23, 22): 0.017643852530343708, (21, 2): 1.7129787604337537e-07, (8, 9): 0.4988195863361852, (22, 9): 0.6632655473378255, (9, 4): 0.015245682265736452, (23, 12): 0.8880083607067341, (10, 3): 0.0022270436864399233, (15, 26): 1.7129787604337537e-07, (11, 10): 0.6944417607777198, (0, 21): 1.7129787604337537e-07, (24, 18): 0.12915876983458108, (14, 5): 0.012847512001129198, (12, 13): 0.7350393573999998, (1, 16): 1.7129787604337537e-07, (25, 19): 0.008222469347958061, (15, 0): 1.7129787604337537e-07, (13, 16): 0.9087354037079824, (2, 23): 1.7129787604337537e-07, (26, 8): 0.0008566606780929203, (0, 11): 1.7129787604337537e-07, (3, 6): 1.7129787604337537e-07, (27, 5): 1.7129787604337537e-07, (1, 10): 1.7129787604337537e-07, (4, 1): 1.7129787604337537e-07, (5, 4): 0.0003427670499627941, (16, 4): 0.0010279585541362957, (6, 11): 0.5765888220598776, (19, 17): 0.566310949497275, (17, 1): 1.7129787604337537e-07, (7, 18): 0.8093826356028248, (20, 10): 0.747030208723036, (18, 6): 0.10569096081663865, (16, 26): 1.7129787604337537e-07, (21, 11): 0.7619331239388097, (17, 27): 1.7129787604337537e-07, (22, 0): 1.7129787604337537e-07, (9, 13): 0.6091354185081189, (23, 5): 0.04882006597023802, (10, 10): 0.7028353567038452, (8, 22): 0.3754851155849549, (11, 3): 0.001713150058309797, (9, 23): 0.23073841032830267, (24, 25): 1.7129787604337537e-07, (14, 12): 0.9104483824684162, (12, 4): 0.010449341736521942, (10, 20): 0.6416820149563602, (25, 20): 0.004111320322917052, (15, 9): 0.3112484120686891, (13, 9): 0.40083720123937444, (11, 25): 0.022440193059558217, (26, 19): 0.00017146917391941874, (0, 2): 1.7129787604337537e-07, (3, 15): 0.006338192711480932, (27, 26): 1.7129787604337537e-07, (1, 3): 1.7129787604337537e-07, (4, 8): 0.009078958728174939, (2, 8): 1.7129787604337537e-07, (5, 13): 0.5976584608132127, (3, 21): 0.002055745810396548, (6, 2): 1.7129787604337537e-07, (4, 22): 0.029977299605466735, (19, 22): 0.06714893870687919, (7, 11): 0.7324698892593492, (5, 23): 0.08256574755078297, (20, 17): 0.5873805882506102, (18, 1): 1.7129787604337537e-07, (16, 17): 0.5531210130419352, (21, 20): 0.18722874981328533, (22, 27): 1.7129787604337537e-07, (18, 27): 1.7129787604337537e-07, (23, 2): 1.7129787604337537e-07, (9, 24): 0.12847357833040757, (14, 23): 0.02346798031581847, (12, 3): 0.000685362802049545, (15, 22): 0.04008387429202588, (13, 2): 1.7129787604337537e-07, (26, 26): 1.7129787604337537e-07, (0, 25): 1.7129787604337537e-07, (24, 6): 0.0656072578224888, (12, 25): 0.010963235364652067, (27, 19): 1.7129787604337537e-07, (1, 4): 1.7129787604337537e-07, (25, 7): 0.0142178950094762, (2, 3): 1.7129787604337537e-07, (26, 4): 1.7129787604337537e-07, (3, 26): 1.7129787604337537e-07, (27, 9): 1.7129787604337537e-07, (6, 5): 0.01010674598443519, (7, 0): 1.7129787604337537e-07, (5, 16): 0.7571367834095952, (20, 24): 0.00753727784378456, (16, 8): 0.21086785670727112, (19, 5): 0.05669976826823329, (17, 21): 0.10055202453533739, (22, 18): 0.4018649884956347, (20, 6): 0.1901408137060227, (18, 18): 0.4301291380427916, (23, 27): 1.7129787604337537e-07, (21, 7): 0.36554983877443914, (8, 4): 0.010963235364652067, (9, 1): 1.7129787604337537e-07, (10, 6): 0.11545493975111104, (11, 23): 0.12881617408249432, (0, 16): 1.7129787604337537e-07, (24, 13): 0.5707646942744028, (14, 0): 1.7129787604337537e-07, (12, 16): 0.8472394662084107, (25, 8): 0.023296682439775094, (15, 5): 0.00976415023234844, (13, 21): 0.14594596168683185, (2, 26): 1.7129787604337537e-07, (26, 15): 0.0003427670499627941, (3, 3): 1.7129787604337537e-07, (27, 14): 1.7129787604337537e-07, (4, 4): 1.7129787604337537e-07, (5, 25): 0.015588278017823204, (16, 15): 0.828225401967596, (6, 22): 0.2567756874868957, (19, 10): 0.7691276347326315, (17, 14): 0.7280161444822214, (7, 23): 0.2139512184760519, (22, 21): 0.07297306649235395, (20, 13): 0.5294819061479493, (18, 13): 0.6397977383198831, (23, 16): 0.5937186096642151, (21, 0): 1.7129787604337537e-07, (8, 11): 0.7809471881796244, (22, 15): 0.8054427844538271, (9, 10): 0.6978677182985874, (23, 14): 0.8347347212572442, (10, 1): 1.7129787604337537e-07, (8, 17): 0.68125182432238, (11, 12): 0.7054048248444958, (0, 23): 1.7129787604337537e-07, (24, 20): 0.03203287411798724, (14, 11): 0.7655303793357207, (12, 15): 0.7679285496003279, (1, 22): 1.7129787604337537e-07, (25, 17): 0.022782788811644968, (15, 2): 1.7129787604337537e-07, (13, 14): 0.8777304881441315, (2, 21): 1.7129787604337537e-07, (26, 22): 0.00017146917391941874, (0, 13): 1.7129787604337537e-07, (3, 8): 0.00017146917391941874, (27, 7): 1.7129787604337537e-07, (1, 8): 1.7129787604337537e-07, (4, 3): 1.7129787604337537e-07, (2, 15): 1.7129787604337537e-07, (5, 2): 1.7129787604337537e-07, (16, 6): 0.038542193407635504, (6, 9): 0.2554053044785487, (19, 19): 0.3208410931271181, (17, 7): 0.15673772787756451, (7, 12): 0.8290818913478128, (20, 20): 0.20675670768223012, (18, 4): 0.006338192711480932, (21, 9): 0.6851916754713776, (17, 25): 0.002055745810396548, (22, 6): 0.18363149441637444, (23, 7): 0.2665396664213681, (10, 8): 0.38901764779238157, (8, 24): 0.13344121673566545, (11, 5): 0.04727838508584764, (9, 21): 0.5195466293374336, (24, 27): 1.7129787604337537e-07, (14, 18): 0.49744920332783815, (12, 6): 0.09113064135295174, (10, 18): 0.768442443228458, (25, 26): 1.7129787604337537e-07, (15, 11): 0.7869426138411425, (13, 7): 0.13138564222314494, (11, 27): 0.000685362802049545, (26, 17): 0.0003427670499627941, (0, 4): 1.7129787604337537e-07, (24, 1): 1.7129787604337537e-07, (1, 1): 1.7129787604337537e-07, (4, 10): 0.037343108275331875, (2, 6): 1.7129787604337537e-07, (5, 11): 0.3335171359543279, (3, 23): 0.000685362802049545, (6, 0): 1.7129787604337537e-07, (4, 16): 0.21223823971561812, (19, 24): 0.011477128992782195, (7, 5): 0.023981873943948597, (5, 21): 0.24290055952738232, (20, 19): 0.3276930081688531, (16, 19): 0.23639124023773406, (6, 26): 0.008565065100044811, (21, 18): 0.45359694706073406, (17, 18): 0.4061474353967191, (22, 25): 0.000685362802049545, (18, 25): 0.001713150058309797, (14, 21): 0.08804727958417098, (15, 16): 0.8184614230331236, (13, 0): 1.7129787604337537e-07, (11, 16): 0.6886176329922451, (26, 24): 1.7129787604337537e-07, (0, 27): 1.7129787604337537e-07, (24, 8): 0.23159489970851954, (12, 27): 0.0003427670499627941, (27, 21): 1.7129787604337537e-07, (1, 26): 1.7129787604337537e-07, (25, 5): 0.0015418521822664216, (13, 26): 0.0013705543062230462, (2, 1): 1.7129787604337537e-07, (26, 2): 1.7129787604337537e-07, (27, 11): 1.7129787604337537e-07, (7, 2): 1.7129787604337537e-07, (20, 26): 1.7129787604337537e-07, (16, 10): 0.6327745254021047, (21, 27): 1.7129787604337537e-07, (19, 7): 0.3139891780853831, (17, 11): 0.853748785498059, (7, 24): 0.11614013125528454, (22, 16): 0.7002658885631946, (20, 0): 1.7129787604337537e-07, (18, 16): 0.620612376203025, (23, 21): 0.04059776792015601, (21, 5): 0.07811200277365521, (8, 6): 0.09198713073316861, (22, 10): 0.7982482736600053, (9, 7): 0.22028923988965676, (10, 4): 0.015930873769909953, (15, 25): 0.0022270436864399233, (11, 9): 0.5445561192397664, (0, 18): 1.7129787604337537e-07, (24, 15): 0.424818903885447, (14, 6): 0.04025517216806926, (12, 18): 0.7725535922534991, (1, 19): 1.7129787604337537e-07, (25, 14): 0.05961183216097067, (15, 7): 0.07365825799652745, (13, 19): 0.444004266002305, (2, 24): 1.7129787604337537e-07, (26, 13): 0.0010279585541362957, (0, 8): 1.7129787604337537e-07, (3, 5): 1.7129787604337537e-07, (27, 0): 1.7129787604337537e-07, (4, 6): 0.001199256430179671, (5, 7): 0.024667065448122097, (16, 1): 1.7129787604337537e-07, (6, 20): 0.5387319914542916, (19, 12): 0.6161586314258973, (17, 12): 0.852378402489712, (7, 17): 0.8273689125873791, (20, 15): 0.6231818443436757, (18, 11): 0.8133224867518224, (23, 18): 0.2905213690674407, (21, 14): 0.7256179742176142, (8, 13): 0.7557664004012482, (22, 13): 0.8892074458390377, (9, 8): 0.3777119879735188, (23, 8): 0.4129993504384541, (10, 15): 0.41042988229780347, (8, 19): 0.7430903575740384, (11, 14): 0.5082409695185708, (9, 18): 0.7153401016550116, (24, 22): 0.006852086339611058, (14, 9): 0.3314615614418074, (12, 9): 0.4919676712944501, (1, 20): 1.7129787604337537e-07, (25, 23): 0.00017146917391941874, (15, 12): 0.928605957329014, (13, 12): 0.8727628497388736, (2, 19): 0.00017146917391941874, (26, 20): 1.7129787604337537e-07, (0, 15): 1.7129787604337537e-07, (3, 10): 0.000685362802049545, (27, 25): 1.7129787604337537e-07, (1, 14): 1.7129787604337537e-07, (4, 13): 0.15142749372021988, (2, 13): 1.7129787604337537e-07, (5, 0): 1.7129787604337537e-07, (3, 16): 0.006852086339611058, (6, 15): 0.9046242546829414, (19, 21): 0.12093647178449905, (17, 5): 0.02124110792725459, (7, 14): 0.8554617642584927, (20, 22): 0.06389427906205505, (18, 2): 0.00017146917391941874, (21, 23): 0.022611490935601594, (22, 4): 0.010449341736521942, (23, 1): 1.7129787604337537e-07, (8, 26): 0.010449341736521942, (11, 7): 0.22337260165843753, (9, 27): 0.001199256430179671, (14, 16): 0.8914343182276016, (12, 0): 1.7129787604337537e-07, (10, 16): 0.5246855656187348, (25, 24): 1.7129787604337537e-07, (15, 21): 0.0726304707402672, (13, 5): 0.024495767572078723, (0, 6): 1.7129787604337537e-07, (24, 3): 1.7129787604337537e-07, (1, 7): 1.7129787604337537e-07, (25, 2): 1.7129787604337537e-07, (2, 4): 1.7129787604337537e-07, (5, 9): 0.11425585461880741, (3, 25): 0.00017146917391941874, (6, 6): 0.029977299605466735, (4, 18): 0.16153406840677903, (19, 26): 1.7129787604337537e-07, (7, 7): 0.13344121673566545, (5, 19): 0.4972779054517948, (16, 21): 0.08633430082373723, (6, 24): 0.08119536454243596, (21, 16): 0.7014649736954982, (19, 0): 1.7129787604337537e-07, (17, 16): 0.6610386749492616, (18, 23): 0.035972725266984876, (8, 1): 1.7129787604337537e-07, (14, 27): 1.7129787604337537e-07, (10, 27): 0.0008566606780929203, (15, 18): 0.38987413717259845, (11, 18): 0.8006464439246126, (24, 10): 0.4457172447627388, (12, 21): 0.25865996412337283, (27, 23): 1.7129787604337537e-07, (1, 24): 1.7129787604337537e-07, (25, 11): 0.05395900225153928, (13, 24): 0.018157746158473834, (26, 0): 1.7129787604337537e-07, (27, 13): 1.7129787604337537e-07, (4, 25): 0.0030835330666568, (16, 12): 0.9123326591048934, (6, 19): 0.6901593138766354, (21, 25): 0.000685362802049545, (19, 9): 0.6711452496358208, (17, 9): 0.521602203849954, (7, 26): 0.009421554480261688, (22, 22): 0.03443104438259449, (20, 2): 1.7129787604337537e-07, (18, 14): 0.5882370776308271, (23, 23): 0.006166894835437557, (21, 3): 0.000685362802049545, (8, 8): 0.3210123910031615, (22, 8): 0.5022455438570527, (9, 5): 0.04830617234210789, (23, 13): 0.8849249989379533, (10, 2): 1.7129787604337537e-07, (15, 27): 1.7129787604337537e-07, (11, 11): 0.7513126556241204, (0, 20): 1.7129787604337537e-07, (24, 17): 0.2137799206000085, (14, 4): 0.0027409373145700494, (12, 12): 0.7955075076433114, (1, 17): 1.7129787604337537e-07, (25, 12): 0.06252389605370805, (15, 1): 1.7129787604337537e-07, (13, 17): 0.8217160826779477, (2, 22): 1.7129787604337537e-07, (26, 11): 0.001199256430179671, (0, 10): 1.7129787604337537e-07, (3, 7): 1.7129787604337537e-07, (27, 2): 1.7129787604337537e-07, (1, 11): 1.7129787604337537e-07, (4, 0): 1.7129787604337537e-07, (5, 5): 0.0018844479343531723, (16, 3): 0.00017146917391941874, (6, 10): 0.4087169035373697, (19, 14): 0.5190327357093034, (17, 2): 0.00017146917391941874, (7, 19): 0.7490857832355566, (20, 9): 0.6867333563557679, (18, 9): 0.6207836740790684, (16, 25): 0.002055745810396548, (21, 12): 0.736923634036477, (8, 15): 0.6284920785010203, (22, 3): 0.000685362802049545, (9, 14): 0.4770647560786765, (23, 10): 0.7247614848373973, (10, 13): 0.5315374806604699, (8, 21): 0.5185188420811734, (11, 0): 1.7129787604337537e-07, (9, 16): 0.48237499023602115, (24, 24): 0.0003427670499627941, (14, 15): 0.9515598727188262, (12, 11): 0.7701554219888918, (10, 23): 0.18962692007789259, (25, 21): 0.001199256430179671, (15, 14): 0.9435088725447877, (13, 10): 0.5920056309037813, (2, 17): 0.00017146917391941874, (26, 18): 0.00017146917391941874, (0, 1): 1.7129787604337537e-07, (3, 12): 0.0023983415624832987, (27, 27): 1.7129787604337537e-07, (1, 12): 1.7129787604337537e-07, (4, 15): 0.20658540980618673, (2, 11): 1.7129787604337537e-07, (5, 14): 0.6899880160005921, (3, 18): 0.00531040545522068, (6, 13): 0.8292531892238563, (4, 21): 0.051218236234845275, (19, 23): 0.03528753376281137, (7, 8): 0.23981719775860155, (20, 16): 0.6481913342460085, (18, 0): 1.7129787604337537e-07, (16, 16): 0.7280161444822214, (21, 21): 0.10363538630411814, (22, 26): 1.7129787604337537e-07, (18, 26): 0.00017146917391941874, (23, 3): 0.000685362802049545, (9, 25): 0.047620980837934396, (14, 22): 0.047963576590021144, (12, 2): 1.7129787604337537e-07, (15, 23): 0.02089851217516784, (13, 3): 0.000685362802049545, (0, 24): 1.7129787604337537e-07, (24, 5): 0.01678736315012683, (12, 24): 0.03425974650655112, (27, 16): 1.7129787604337537e-07, (1, 5): 1.7129787604337537e-07, (25, 0): 1.7129787604337537e-07, (2, 2): 1.7129787604337537e-07, (26, 7): 0.00017146917391941874, (3, 27): 1.7129787604337537e-07, (6, 4): 0.002569639438526674, (7, 1): 1.7129787604337537e-07, (5, 17): 0.7182521655477491, (16, 23): 0.026208746332512477, (19, 2): 1.7129787604337537e-07, (17, 22): 0.05772755552449354, (20, 5): 0.07245917286422382, (18, 21): 0.11596883337924117, (23, 24): 0.001713150058309797, (8, 3): 0.0015418521822664216, (9, 2): 1.7129787604337537e-07, (14, 25): 0.0039400224468736774, (10, 25): 0.036144023143028246, (11, 20): 0.5567182684388461, (24, 12): 0.5800147795807451, (14, 3): 0.0003427670499627941, (12, 23): 0.0705748962277467, (25, 9): 0.03563012951489812, (13, 22): 0.0740008537486142, (26, 14): 0.000685362802049545, (3, 0): 1.7129787604337537e-07, (27, 15): 1.7129787604337537e-07, (4, 27): 0.00017146917391941874, (5, 26): 0.0039400224468736774, (16, 14): 0.8677952113336157, (6, 17): 0.8643692538127482, (19, 11): 0.7326411871353925, (17, 15): 0.7115715483820574, (7, 20): 0.6260939082364131, (22, 20): 0.14594596168683185, (20, 12): 0.5863528009943499, (18, 12): 0.7461737193428192, (23, 17): 0.4366384573324399, (21, 1): 1.7129787604337537e-07, (8, 10): 0.6696035687514305, (22, 14): 0.8662535304492254, (9, 11): 0.7614192303106796, (23, 15): 0.729215229614525, (10, 0): 1.7129787604337537e-07, (8, 16): 0.6320893338979312, (11, 13): 0.5978297586892561, (0, 22): 1.7129787604337537e-07, (24, 19): 0.06834802383918281, (14, 10): 0.5404449702147254, (12, 14): 0.6992381013069343, (1, 23): 1.7129787604337537e-07, (25, 18): 0.0142178950094762, (15, 3): 0.00017146917391941874, (13, 15): 0.9106196803444596, (2, 20): 1.7129787604337537e-07, (26, 9): 0.0008566606780929203, (0, 12): 1.7129787604337537e-07, (3, 9): 0.0003427670499627941, (27, 4): 1.7129787604337537e-07, (1, 9): 1.7129787604337537e-07, (4, 2): 0.00017146917391941874, (2, 14): 1.7129787604337537e-07, (5, 3): 1.7129787604337537e-07, (16, 5): 0.011819724744868945, (6, 8): 0.14269130204200772, (19, 16): 0.6165012271779841, (17, 0): 1.7129787604337537e-07, (7, 13): 0.8677952113336157, (20, 11): 0.6920435905131126, (18, 7): 0.2417014743950787, (16, 27): 1.7129787604337537e-07, (21, 10): 0.7660442729638507, (17, 26): 0.00017146917391941874, (22, 1): 1.7129787604337537e-07, (9, 12): 0.7235623997050936, (23, 4): 0.006338192711480932, (10, 11): 0.7412060809375614, (8, 23): 0.24204407014716545, (11, 2): 1.7129787604337537e-07, (9, 22): 0.3679480090390464, (24, 26): 1.7129787604337537e-07, (14, 13): 0.946420936437525, (12, 5): 0.03837089553159213, (10, 21): 0.4780925433349367, (25, 27): 1.7129787604337537e-07, (15, 8): 0.16393223867138626, (13, 8): 0.24821079368472695, (11, 24): 0.06183870454953455, (26, 16): 0.0003427670499627941, (0, 3): 1.7129787604337537e-07, (24, 0): 1.7129787604337537e-07, (3, 14): 0.004453916075003803, (1, 2): 1.7129787604337537e-07, (4, 9): 0.01867163978660396, (2, 9): 1.7129787604337537e-07, (5, 12): 0.47158322404528846, (3, 20): 0.0022270436864399233, (6, 3): 1.7129787604337537e-07, (4, 23): 0.01199102262091232, (19, 25): 0.001199256430179671, (7, 10): 0.5752184390515306, (5, 22): 0.15142749372021988, (20, 18): 0.46404611749937996, (16, 18): 0.3770267964693453, (21, 19): 0.31176230569681923, (17, 19): 0.2703082196943224, (22, 24): 0.0023983415624832987, (18, 24): 0.013018809877172573, (14, 20): 0.15982108964634525, (15, 17): 0.6063946524914249, (13, 1): 1.7129787604337537e-07, (11, 17): 0.7896833798578365, (26, 27): 1.7129787604337537e-07, (0, 26): 1.7129787604337537e-07, (24, 7): 0.13412640823983896, (12, 26): 0.0027409373145700494, (27, 18): 1.7129787604337537e-07, (1, 27): 1.7129787604337537e-07, (25, 6): 0.006166894835437557, (13, 27): 0.00017146917391941874, (2, 0): 1.7129787604337537e-07, (26, 5): 1.7129787604337537e-07, (27, 8): 1.7129787604337537e-07}, 9: {(7, 3): 0.00016843957310243816, (20, 25): 0.0011780673839062636, (16, 9): 0.5813775159925046, (19, 4): 0.0035338656091151895, (17, 20): 0.11358329698673214, (7, 25): 0.001009796082105626, (22, 19): 0.17264652391875593, (20, 7): 0.01581767064056173, (18, 19): 0.21219027984190575, (23, 26): 1.6827130180063756e-07, (21, 6): 0.00689929164512794, (8, 5): 0.020192724487378307, (9, 0): 1.6827130180063756e-07, (10, 7): 0.33048500500775396, (11, 22): 0.10903997183811494, (0, 17): 1.6827130180063756e-07, (24, 14): 0.44070270768717157, (14, 1): 1.6827130180063756e-07, (12, 17): 0.9051315006569313, (25, 15): 0.3217348973141208, (15, 4): 0.0371881259692427, (13, 20): 0.36043729672826746, (2, 27): 1.6827130180063756e-07, (26, 12): 0.10971305704531749, (3, 2): 1.6827130180063756e-07, (27, 1): 1.6827130180063756e-07, (4, 5): 1.6827130180063756e-07, (5, 24): 1.6827130180063756e-07, (16, 0): 1.6827130180063756e-07, (6, 23): 0.008413733361333678, (19, 13): 0.45365959792582067, (17, 13): 0.5381317914297408, (7, 22): 0.06983275851856638, (20, 14): 0.6114980790148187, (18, 10): 0.22783951090936505, (23, 19): 0.17012245439174636, (21, 15): 0.6037575991319893, (8, 12): 0.9226317160441976, (22, 12): 0.43094297218273464, (9, 9): 0.6544072609739813, (23, 9): 0.1474058286486603, (10, 14): 0.3459659647734126, (8, 18): 0.7750577843650385, (11, 15): 0.42067842277289574, (9, 19): 0.6582775009153959, (24, 21): 0.05738068218531921, (14, 8): 0.6799844988476782, (12, 8): 0.7127974026988025, (1, 21): 1.6827130180063756e-07, (25, 16): 0.2904364351792022, (15, 13): 0.7669807618786079, (13, 13): 0.4985880355065909, (2, 18): 1.6827130180063756e-07, (26, 23): 0.0015146099875075385, (0, 14): 1.6827130180063756e-07, (3, 11): 1.6827130180063756e-07, (27, 6): 0.00016843957310243816, (1, 15): 1.6827130180063756e-07, (4, 12): 0.0028607804019126393, (2, 12): 1.6827130180063756e-07, (5, 1): 1.6827130180063756e-07, (3, 17): 0.00016843957310243816, (16, 7): 0.3020471550034462, (6, 14): 0.5869304689519256, (19, 18): 0.3533699020526407, (17, 6): 0.09675616680666839, (7, 15): 0.9473675974088913, (20, 21): 0.048294031888084774, (18, 5): 0.021875437505384683, (21, 8): 0.0331496147260274, (17, 24): 0.001346338685706901, (22, 7): 0.02574567744679935, (23, 6): 0.014976314131558545, (10, 9): 0.779937652117257, (8, 25): 0.001346338685706901, (11, 4): 0.0331496147260274, (9, 20): 0.45803465177263725, (14, 19): 0.5430116591819593, (12, 7): 0.49202545473636605, (10, 19): 0.6979895281403464, (25, 25): 0.00016843957310243816, (15, 10): 0.7580623828831741, (13, 6): 0.2806766996747652, (11, 26): 0.00016843957310243816, (0, 5): 1.6827130180063756e-07, (24, 2): 1.6827130180063756e-07, (1, 0): 1.6827130180063756e-07, (25, 3): 1.6827130180063756e-07, (4, 11): 0.0018511525911088136, (2, 7): 1.6827130180063756e-07, (5, 10): 0.019351367978375122, (3, 22): 1.6827130180063756e-07, (6, 1): 1.6827130180063756e-07, (4, 17): 0.0026925091001120016, (19, 27): 1.6827130180063756e-07, (7, 4): 0.001346338685706901, (5, 20): 0.014976314131558545, (16, 20): 0.13377585320280866, (6, 27): 1.6827130180063756e-07, (21, 17): 0.4132744854936677, (19, 1): 1.6827130180063756e-07, (17, 17): 0.7858271476802792, (18, 22): 0.019856181883777035, (8, 0): 1.6827130180063756e-07, (14, 26): 0.00016843957310243816, (10, 26): 0.00016843957310243816, (15, 19): 0.42017360886749383, (11, 19): 0.7223888669014389, (26, 25): 1.6827130180063756e-07, (24, 9): 0.17214171001335402, (12, 20): 0.44154406419617476, (27, 20): 0.0011780673839062636, (1, 25): 1.6827130180063756e-07, (25, 4): 0.0021876951947100887, (13, 25): 0.00016843957310243816, (26, 3): 0.00016843957310243816, (27, 10): 0.001009796082105626, (4, 24): 1.6827130180063756e-07, (20, 27): 1.6827130180063756e-07, (16, 11): 0.6670276086090291, (6, 18): 0.39257711537218926, (21, 26): 1.6827130180063756e-07, (19, 6): 0.020360995789178944, (17, 10): 0.43582283993495313, (7, 27): 1.6827130180063756e-07, (22, 17): 0.374572086079521, (20, 1): 1.6827130180063756e-07, (18, 17): 0.671739205059447, (23, 20): 0.10045813544628242, (21, 4): 0.0018511525911088136, (8, 7): 0.12384784639657104, (22, 11): 0.3074318366610666, (9, 6): 0.09759752331567158, (10, 5): 0.07067411502756957, (15, 24): 0.0015146099875075385, (11, 8): 0.6641669964784183, (0, 19): 1.6827130180063756e-07, (24, 16): 0.36750469140389425, (14, 7): 0.4859676878715431, (12, 19): 0.700513597667356, (1, 18): 1.6827130180063756e-07, (25, 13): 0.3575766845976566, (15, 6): 0.2263250691931593, (13, 18): 0.871813782900405, (2, 25): 1.6827130180063756e-07, (26, 10): 0.06646733248255363, (0, 9): 1.6827130180063756e-07, (3, 4): 1.6827130180063756e-07, (27, 3): 1.6827130180063756e-07, (4, 7): 1.6827130180063756e-07, (5, 6): 0.0003367108749030757, (16, 2): 0.00016843957310243816, (6, 21): 0.07723669579779444, (19, 15): 0.7358505710454899, (17, 3): 0.0026925091001120016, (7, 16): 0.9207807317243906, (20, 8): 0.027764933068406997, (18, 8): 0.13613165142801759, (16, 24): 0.0011780673839062636, (21, 13): 0.5032996319570088, (8, 14): 0.9068142136749376, (22, 2): 1.6827130180063756e-07, (9, 15): 0.6397676577173258, (23, 11): 0.3520237316382356, (10, 12): 0.6288300231002844, (8, 20): 0.39695216921900583, (11, 1): 1.6827130180063756e-07, (9, 17): 0.7785914817028519, (24, 23): 0.01463977152795727, (14, 14): 0.7994571231261308, (12, 10): 0.7225571382032395, (10, 22): 0.11947279254975446, (25, 22): 0.02355815052339106, (15, 15): 0.8920063391164815, (13, 11): 0.5396462331459465, (2, 16): 1.6827130180063756e-07, (26, 21): 0.01531285673515982, (0, 0): 1.6827130180063756e-07, (3, 13): 1.6827130180063756e-07, (27, 24): 1.6827130180063756e-07, (1, 13): 1.6827130180063756e-07, (4, 14): 0.00420695081631774, (2, 10): 1.6827130180063756e-07, (5, 15): 0.06882313070776255, (3, 19): 0.00016843957310243816, (6, 12): 0.39123094495778415, (4, 20): 0.0003367108749030757, (19, 20): 0.09776579461747222, (17, 4): 0.014976314131558545, (7, 9): 0.232046293454381, (20, 23): 0.010432988982941328, (18, 3): 0.0011780673839062636, (21, 22): 0.02423123573059361, (22, 5): 0.003702136910915827, (23, 0): 1.6827130180063756e-07, (8, 27): 1.6827130180063756e-07, (11, 6): 0.21925767451753253, (9, 26): 1.6827130180063756e-07, (14, 17): 0.9571273329133283, (12, 1): 1.6827130180063756e-07, (10, 17): 0.7520046160183511, (15, 20): 0.18829575498621523, (13, 4): 0.046779590171879035, (0, 7): 1.6827130180063756e-07, (24, 4): 0.0023559664965107263, (27, 17): 0.001346338685706901, (1, 6): 1.6827130180063756e-07, (25, 1): 1.6827130180063756e-07, (2, 5): 1.6827130180063756e-07, (26, 6): 0.00622620643792539, (5, 8): 0.002524237798311364, (3, 24): 1.6827130180063756e-07, (6, 7): 0.012283973302748344, (4, 19): 0.0006732534785043509, (7, 6): 0.020360995789178944, (5, 18): 0.03819775378004653, (16, 22): 0.020865809694580857, (6, 25): 0.0005049821767037133, (19, 3): 0.0006732534785043509, (17, 23): 0.007740648154131128, (20, 4): 0.002019423892909451, (18, 20): 0.10281393367149134, (23, 25): 0.0005049821767037133, (8, 2): 0.00016843957310243816, (9, 3): 0.0018511525911088136, (14, 24): 0.0021876951947100887, (10, 24): 0.008077190757732403, (11, 21): 0.2549311904992677, (24, 11): 0.35774495589945726, (14, 2): 0.0003367108749030757, (12, 22): 0.08295792005901612, (27, 22): 0.0003367108749030757, (25, 10): 0.2347386342831912, (13, 23): 0.01649075584776428, (26, 1): 1.6827130180063756e-07, (3, 1): 1.6827130180063756e-07, (27, 12): 0.0011780673839062636, (4, 26): 1.6827130180063756e-07, (5, 27): 1.6827130180063756e-07, (16, 13): 0.6786383284332731, (6, 16): 0.5951757627401568, (21, 24): 0.004375222118118378, (19, 8): 0.05452007005470837, (17, 8): 0.2805084283729646, (7, 21): 0.15851173456750237, (22, 23): 0.014303228924355994, (20, 3): 0.0008415247803049884, (18, 15): 0.7814520938334626, (23, 22): 0.030793816500818475, (21, 2): 0.00016843957310243816, (8, 9): 0.4528182414168175, (22, 9): 0.10920824313991558, (9, 4): 0.012283973302748344, (23, 12): 0.4484431875700009, (10, 3): 0.00420695081631774, (15, 26): 0.0003367108749030757, (11, 10): 0.792894542355906, (0, 21): 1.6827130180063756e-07, (24, 18): 0.24298392807142244, (14, 5): 0.12536228811277678, (12, 13): 0.2823594126927716, (1, 16): 1.6827130180063756e-07, (25, 19): 0.13209314018480228, (15, 0): 1.6827130180063756e-07, (13, 16): 0.9052997719587319, (2, 23): 1.6827130180063756e-07, (26, 8): 0.02423123573059361, (0, 11): 1.6827130180063756e-07, (3, 6): 1.6827130180063756e-07, (27, 5): 1.6827130180063756e-07, (1, 10): 1.6827130180063756e-07, (4, 1): 1.6827130180063756e-07, (5, 4): 1.6827130180063756e-07, (16, 4): 0.027091847861204447, (6, 11): 0.26351302689110023, (19, 17): 0.5552954642134058, (17, 1): 1.6827130180063756e-07, (7, 18): 0.6895759630503145, (20, 10): 0.09305419816705436, (18, 6): 0.047452675379081585, (16, 26): 0.00016843957310243816, (21, 11): 0.24618108280563455, (17, 27): 1.6827130180063756e-07, (22, 0): 1.6827130180063756e-07, (9, 13): 0.748807461284139, (23, 5): 0.0050483073253209275, (10, 10): 0.858352078756354, (8, 22): 0.1021408484642888, (11, 3): 0.006562749041526665, (9, 23): 0.04105836591065737, (24, 25): 0.0003367108749030757, (14, 12): 0.6626525547622125, (12, 4): 0.04240453632506246, (10, 20): 0.4945495242633756, (25, 20): 0.08211656355001293, (15, 9): 0.7222205955996382, (13, 9): 0.7891925737162919, (11, 25): 0.0011780673839062636, (26, 19): 0.04105836591065737, (0, 2): 1.6827130180063756e-07, (3, 15): 1.6827130180063756e-07, (27, 26): 1.6827130180063756e-07, (1, 3): 1.6827130180063756e-07, (4, 8): 1.6827130180063756e-07, (2, 8): 1.6827130180063756e-07, (5, 13): 0.05855858129792366, (3, 21): 1.6827130180063756e-07, (6, 2): 1.6827130180063756e-07, (4, 22): 1.6827130180063756e-07, (19, 22): 0.021202352298182132, (7, 11): 0.6152000476544327, (5, 23): 0.00016843957310243816, (20, 17): 0.4713280846148876, (18, 1): 1.6827130180063756e-07, (16, 17): 0.8768619219544241, (21, 20): 0.09103494254544671, (22, 27): 1.6827130180063756e-07, (18, 27): 1.6827130180063756e-07, (23, 2): 1.6827130180063756e-07, (9, 24): 0.008077190757732403, (14, 23): 0.011779159397346431, (12, 3): 0.008413733361333678, (15, 22): 0.026082220050400624, (13, 2): 0.0005049821767037133, (26, 26): 1.6827130180063756e-07, (0, 25): 1.6827130180063756e-07, (24, 6): 0.021202352298182132, (12, 25): 0.0003367108749030757, (27, 19): 0.0015146099875075385, (1, 4): 1.6827130180063756e-07, (25, 7): 0.04627477626647712, (2, 3): 1.6827130180063756e-07, (26, 4): 0.001346338685706901, (3, 26): 1.6827130180063756e-07, (27, 9): 0.0005049821767037133, (6, 5): 0.0011780673839062636, (7, 0): 1.6827130180063756e-07, (5, 16): 0.06427980555914534, (20, 24): 0.0038704082127164648, (16, 8): 0.4597173647906436, (19, 5): 0.009928175077539417, (17, 21): 0.04711613277548031, (22, 18): 0.2678880807379168, (20, 6): 0.008750275964934953, (18, 18): 0.4221928644891015, (23, 27): 1.6827130180063756e-07, (21, 7): 0.01531285673515982, (8, 4): 0.00488003602352029, (9, 1): 1.6827130180063756e-07, (10, 6): 0.15985790498190747, (11, 23): 0.0331496147260274, (0, 16): 1.6827130180063756e-07, (24, 13): 0.4444046763267856, (14, 0): 1.6827130180063756e-07, (12, 16): 0.7986157666171276, (25, 8): 0.08952050082924097, (15, 5): 0.10399183278409581, (13, 21): 0.15531457983329025, (2, 26): 1.6827130180063756e-07, (26, 15): 0.10096294935168433, (3, 3): 1.6827130180063756e-07, (27, 14): 0.0026925091001120016, (4, 4): 1.6827130180063756e-07, (5, 25): 1.6827130180063756e-07, (16, 15): 0.8571741796437495, (6, 22): 0.028774560879210823, (19, 10): 0.1134150256849315, (17, 14): 0.6789748710368744, (7, 23): 0.021707166203584045, (22, 21): 0.05283735703670199, (20, 13): 0.47284252633109336, (18, 13): 0.4624097056194538, (23, 16): 0.4147889272098734, (21, 0): 1.6827130180063756e-07, (8, 11): 0.8442172894051004, (22, 15): 0.5372904349207376, (9, 10): 0.8352989104096666, (23, 14): 0.5076746858038254, (10, 1): 1.6827130180063756e-07, (8, 17): 0.8696262559769967, (11, 12): 0.4062070908180409, (0, 23): 1.6827130180063756e-07, (24, 20): 0.10163603455888688, (14, 11): 0.6742632745864565, (12, 15): 0.5945026775329543, (1, 22): 1.6827130180063756e-07, (25, 17): 0.26216685647669513, (15, 2): 0.00016843957310243816, (13, 14): 0.6328685343434997, (2, 21): 1.6827130180063756e-07, (26, 22): 0.0055531212307228395, (0, 13): 1.6827130180063756e-07, (3, 8): 1.6827130180063756e-07, (27, 7): 0.00016843957310243816, (1, 8): 1.6827130180063756e-07, (4, 3): 1.6827130180063756e-07, (2, 15): 1.6827130180063756e-07, (5, 2): 1.6827130180063756e-07, (16, 6): 0.1595213623783062, (6, 9): 0.0770684244959938, (19, 19): 0.1854351428556044, (17, 7): 0.181901445517791, (7, 12): 0.7912118293378996, (20, 20): 0.09490518248686138, (18, 4): 0.006562749041526665, (21, 9): 0.0694962159149651, (17, 25): 0.0003367108749030757, (22, 6): 0.00891854726673559, (23, 7): 0.04021700940165418, (10, 8): 0.5611849597764281, (8, 24): 0.006394477739726028, (11, 5): 0.09625135290126648, (9, 21): 0.2579600739316792, (24, 27): 1.6827130180063756e-07, (14, 18): 0.8299142287520462, (12, 6): 0.26401784079650215, (10, 18): 0.8041687195765487, (25, 26): 1.6827130180063756e-07, (15, 11): 0.7511632595093479, (13, 7): 0.5095256701236324, (11, 27): 1.6827130180063756e-07, (26, 17): 0.08178002094641165, (0, 4): 1.6827130180063756e-07, (24, 1): 1.6827130180063756e-07, (1, 1): 1.6827130180063756e-07, (4, 10): 0.001009796082105626, (2, 6): 1.6827130180063756e-07, (5, 11): 0.031971715613422935, (3, 23): 1.6827130180063756e-07, (6, 0): 1.6827130180063756e-07, (4, 16): 0.0031973230055139146, (19, 24): 0.0031973230055139146, (7, 5): 0.007235834248729215, (5, 21): 0.007404105550529853, (20, 19): 0.17533886474756613, (16, 19): 0.3185377425799087, (6, 26): 1.6827130180063756e-07, (21, 18): 0.28034015707116394, (17, 18): 0.5132276387632464, (22, 25): 0.0006732534785043509, (18, 25): 0.0006732534785043509, (14, 21): 0.10769380142370984, (15, 16): 0.9483772252196951, (13, 0): 1.6827130180063756e-07, (11, 16): 0.6537341757667787, (26, 24): 0.00016843957310243816, (0, 27): 1.6827130180063756e-07, (24, 8): 0.09557826769406393, (12, 27): 1.6827130180063756e-07, (27, 21): 0.001009796082105626, (1, 26): 1.6827130180063756e-07, (25, 5): 0.0070675629469285775, (13, 26): 0.00016843957310243816, (2, 1): 1.6827130180063756e-07, (26, 2): 1.6827130180063756e-07, (27, 11): 0.001346338685706901, (7, 2): 1.6827130180063756e-07, (20, 26): 1.6827130180063756e-07, (16, 10): 0.6468350523929526, (21, 27): 1.6827130180063756e-07, (19, 7): 0.035673684253036964, (17, 11): 0.4528182414168175, (7, 24): 0.0050483073253209275, (22, 16): 0.46644821686266913, (20, 0): 1.6827130180063756e-07, (18, 16): 0.8140967263827863, (23, 21): 0.05569796916731283, (21, 5): 0.00488003602352029, (8, 6): 0.052500814433100715, (22, 10): 0.19771894788705094, (9, 7): 0.21976248842293444, (10, 4): 0.02221198010898596, (15, 25): 0.00016843957310243816, (11, 9): 0.8188083228332041, (0, 18): 1.6827130180063756e-07, (24, 15): 0.40990905945765493, (14, 6): 0.27159004937753084, (12, 18): 0.8810687044994401, (1, 19): 1.6827130180063756e-07, (25, 14): 0.34512460826440944, (15, 7): 0.415462012417076, (13, 19): 0.6422917272443354, (2, 24): 1.6827130180063756e-07, (26, 13): 0.11408811089213405, (0, 8): 1.6827130180063756e-07, (3, 5): 1.6827130180063756e-07, (27, 0): 1.6827130180063756e-07, (4, 6): 1.6827130180063756e-07, (5, 7): 0.001009796082105626, (16, 1): 1.6827130180063756e-07, (6, 20): 0.1511077972882743, (19, 12): 0.2882489082557939, (17, 12): 0.4740204254436978, (7, 17): 0.8374864373330749, (20, 15): 0.6725805615684501, (18, 11): 0.2655322825127079, (23, 18): 0.25779180262987855, (21, 14): 0.5982046461725683, (8, 13): 0.9241461577604033, (22, 13): 0.5137324526686483, (9, 8): 0.4164716402278798, (23, 8): 0.08077039313560783, (10, 15): 0.4080580751378479, (8, 19): 0.6022431574157836, (11, 14): 0.25190230706685623, (9, 18): 0.7745529704596366, (24, 22): 0.030289002595416562, (14, 9): 0.7752260556668391, (12, 9): 0.8075341456125614, (1, 20): 1.6827130180063756e-07, (25, 23): 0.009759903775738779, (15, 12): 0.7489757325859396, (13, 12): 0.4556788535474283, (2, 19): 1.6827130180063756e-07, (26, 20): 0.02574567744679935, (0, 15): 1.6827130180063756e-07, (3, 10): 1.6827130180063756e-07, (27, 25): 1.6827130180063756e-07, (1, 14): 1.6827130180063756e-07, (4, 13): 0.0038704082127164648, (2, 13): 1.6827130180063756e-07, (5, 0): 1.6827130180063756e-07, (3, 16): 1.6827130180063756e-07, (6, 15): 0.6207530006138537, (19, 21): 0.04543341975747393, (17, 5): 0.04408724934306884, (7, 14): 0.9409732879404671, (20, 22): 0.02204370880718532, (18, 2): 0.00016843957310243816, (21, 23): 0.011442616793745156, (22, 4): 0.0011780673839062636, (23, 1): 1.6827130180063756e-07, (8, 26): 1.6827130180063756e-07, (11, 7): 0.42875544525932635, (9, 27): 1.6827130180063756e-07, (14, 16): 0.9471993261070907, (12, 0): 1.6827130180063756e-07, (10, 16): 0.5781803612582925, (25, 24): 0.0023559664965107263, (15, 21): 0.07117892893297148, (13, 5): 0.12923252805419144, (0, 6): 1.6827130180063756e-07, (24, 3): 1.6827130180063756e-07, (1, 7): 1.6827130180063756e-07, (25, 2): 0.00016843957310243816, (2, 4): 1.6827130180063756e-07, (5, 9): 0.007908919455931765, (3, 25): 1.6827130180063756e-07, (6, 6): 0.005216578627121565, (4, 18): 0.0018511525911088136, (19, 26): 1.6827130180063756e-07, (7, 7): 0.051827729225898164, (5, 19): 0.024736049635995523, (16, 21): 0.056707596978116656, (6, 24): 0.001346338685706901, (21, 16): 0.5371221636189369, (19, 0): 1.6827130180063756e-07, (17, 16): 0.8847706731390541, (18, 23): 0.007908919455931765, (8, 1): 1.6827130180063756e-07, (14, 27): 1.6827130180063756e-07, (10, 27): 1.6827130180063756e-07, (15, 18): 0.748807461284139, (11, 18): 0.858352078756354, (24, 10): 0.2641861120983028, (12, 21): 0.21269509374730766, (27, 23): 1.6827130180063756e-07, (1, 24): 1.6827130180063756e-07, (25, 11): 0.3065904801520634, (13, 24): 0.0026925091001120016, (26, 0): 1.6827130180063756e-07, (27, 13): 0.0018511525911088136, (4, 25): 1.6827130180063756e-07, (16, 12): 0.6682055077216336, (6, 19): 0.26351302689110023, (21, 25): 0.0011780673839062636, (19, 9): 0.07925595141940209, (17, 9): 0.3749086286831223, (7, 26): 1.6827130180063756e-07, (22, 22): 0.028101475672008273, (20, 2): 0.0003367108749030757, (18, 14): 0.6390945725101232, (23, 23): 0.01581767064056173, (21, 3): 0.0008415247803049884, (8, 8): 0.2487051523326441, (22, 8): 0.05502488396011028, (9, 5): 0.04072182330705609, (23, 13): 0.4896696565111571, (10, 2): 0.00016843957310243816, (15, 27): 1.6827130180063756e-07, (11, 11): 0.636907045586715, (0, 20): 1.6827130180063756e-07, (24, 17): 0.3222397112195227, (14, 4): 0.04576996236107521, (12, 12): 0.33082154761135524, (1, 17): 1.6827130180063756e-07, (25, 12): 0.35454780116524515, (15, 1): 1.6827130180063756e-07, (13, 17): 0.9446752565800811, (2, 22): 1.6827130180063756e-07, (26, 11): 0.09271765556345309, (0, 10): 1.6827130180063756e-07, (3, 7): 1.6827130180063756e-07, (27, 2): 1.6827130180063756e-07, (1, 11): 1.6827130180063756e-07, (4, 0): 1.6827130180063756e-07, (5, 5): 1.6827130180063756e-07, (16, 3): 0.0055531212307228395, (6, 10): 0.15009816947747048, (19, 14): 0.6143586911454295, (17, 2): 0.00016843957310243816, (7, 19): 0.4863042304751444, (20, 9): 0.04711613277548031, (18, 9): 0.1859399567610063, (16, 25): 0.0003367108749030757, (21, 12): 0.38180775205694845, (8, 15): 0.895035222548893, (22, 3): 0.0003367108749030757, (9, 14): 0.6587823148207979, (23, 10): 0.24820033842724218, (10, 13): 0.45214515620961493, (8, 21): 0.22060384493193763, (11, 0): 1.6827130180063756e-07, (9, 16): 0.7037107524015681, (24, 24): 0.00488003602352029, (14, 15): 0.8798908053868356, (12, 11): 0.5174344213082623, (10, 23): 0.04004873809985354, (25, 21): 0.0466113188700784, (15, 14): 0.8157794394007927, (13, 10): 0.6989991559511503, (2, 17): 1.6827130180063756e-07, (26, 18): 0.06108265082493323, (0, 1): 1.6827130180063756e-07, (3, 12): 1.6827130180063756e-07, (27, 27): 1.6827130180063756e-07, (1, 12): 1.6827130180063756e-07, (4, 15): 0.004375222118118378, (2, 11): 1.6827130180063756e-07, (5, 14): 0.06730868899155681, (3, 18): 0.00016843957310243816, (6, 13): 0.5095256701236324, (4, 21): 0.0003367108749030757, (19, 23): 0.007908919455931765, (7, 8): 0.11879970734255191, (20, 16): 0.6227722562354614, (18, 0): 0.00016843957310243816, (16, 16): 0.9270067698910142, (21, 21): 0.04913538839708796, (22, 26): 1.6827130180063756e-07, (18, 26): 1.6827130180063756e-07, (23, 3): 0.00016843957310243816, (9, 25): 0.001346338685706901, (14, 22): 0.04038528070345482, (12, 2): 0.0006732534785043509, (15, 23): 0.009423361172137504, (13, 3): 0.009591632473938141, (0, 24): 1.6827130180063756e-07, (24, 5): 0.008077190757732403, (12, 24): 0.0045434934199190145, (27, 16): 0.0011780673839062636, (1, 5): 1.6827130180063756e-07, (25, 0): 1.6827130180063756e-07, (2, 2): 1.6827130180063756e-07, (26, 7): 0.011947430699147069, (3, 27): 1.6827130180063756e-07, (6, 4): 0.00016843957310243816, (7, 1): 1.6827130180063756e-07, (5, 17): 0.052500814433100715, (16, 23): 0.008413733361333678, (19, 2): 1.6827130180063756e-07, (17, 22): 0.019687910581976397, (20, 5): 0.005216578627121565, (18, 21): 0.044423791946670114, (23, 24): 0.005721392532523477, (8, 3): 0.001009796082105626, (9, 2): 0.00016843957310243816, (14, 25): 0.00016843957310243816, (10, 25): 0.001346338685706901, (11, 20): 0.48714558698414756, (24, 12): 0.43094297218273464, (14, 3): 0.008413733361333678, (12, 23): 0.02625049135220126, (25, 9): 0.15430495202248642, (13, 22): 0.05939993780692685, (26, 14): 0.10870342923451366, (3, 0): 1.6827130180063756e-07, (27, 15): 0.0023559664965107263, (4, 27): 1.6827130180063756e-07, (5, 26): 1.6827130180063756e-07, (16, 14): 0.7520046160183511, (6, 17): 0.5140689952722496, (19, 11): 0.17416096563496167, (17, 15): 0.8193131367386061, (7, 20): 0.30002789938183855, (22, 20): 0.09288592686525372, (20, 12): 0.31651848695830104, (18, 12): 0.3333456171383648, (23, 17): 0.34882657690402347, (21, 1): 1.6827130180063756e-07, (8, 10): 0.6803210414512795, (22, 14): 0.5568099059296115, (9, 11): 0.8908284400038771, (23, 15): 0.4751983245563023, (10, 0): 1.6827130180063756e-07, (8, 16): 0.8943621373416905, (11, 13): 0.24399355588222627, (0, 22): 1.6827130180063756e-07, (24, 19): 0.16456950143232532, (14, 10): 0.7429179657211167, (12, 14): 0.39880315353881285, (1, 23): 1.6827130180063756e-07, (25, 18): 0.20091610262126303, (15, 3): 0.0070675629469285775, (13, 15): 0.7912118293378996, (2, 20): 1.6827130180063756e-07, (26, 9): 0.043582435437666926, (0, 12): 1.6827130180063756e-07, (3, 9): 1.6827130180063756e-07, (27, 4): 1.6827130180063756e-07, (1, 9): 1.6827130180063756e-07, (4, 2): 1.6827130180063756e-07, (2, 14): 1.6827130180063756e-07, (5, 3): 1.6827130180063756e-07, (16, 5): 0.07286164195097786, (6, 8): 0.03516887034763505, (19, 16): 0.7277735485590593, (17, 0): 1.6827130180063756e-07, (7, 13): 0.8955400364542949, (20, 11): 0.1824062594231929, (18, 7): 0.08464063307702249, (16, 27): 1.6827130180063756e-07, (21, 10): 0.13646819403161886, (17, 26): 1.6827130180063756e-07, (22, 1): 1.6827130180063756e-07, (9, 12): 0.8435442041978979, (23, 4): 0.0016828812893081762, (10, 11): 0.7922214571487034, (8, 23): 0.03247652951882485, (11, 2): 0.0005049821767037133, (9, 22): 0.11913624994615318, (24, 26): 1.6827130180063756e-07, (14, 13): 0.7143118444150083, (12, 5): 0.11779007953174808, (10, 21): 0.2651957399091066, (25, 27): 1.6827130180063756e-07, (15, 8): 0.6083009242806066, (13, 8): 0.7164993713384166, (11, 24): 0.007404105550529853, (26, 16): 0.0883426017166365, (0, 3): 1.6827130180063756e-07, (24, 0): 1.6827130180063756e-07, (3, 14): 1.6827130180063756e-07, (1, 2): 1.6827130180063756e-07, (4, 9): 0.00016843957310243816, (2, 9): 1.6827130180063756e-07, (5, 12): 0.04762094668088222, (3, 20): 1.6827130180063756e-07, (6, 3): 1.6827130180063756e-07, (4, 23): 1.6827130180063756e-07, (19, 25): 0.0008415247803049884, (7, 10): 0.41394757070087024, (5, 22): 0.0031973230055139146, (20, 18): 0.3104607200934781, (16, 18): 0.6258011396678729, (21, 19): 0.1681031987701387, (17, 19): 0.2530802061794607, (22, 24): 0.005384849928922202, (18, 24): 0.0016828812893081762, (14, 20): 0.2643543834001034, (15, 17): 0.9364299627918499, (13, 1): 1.6827130180063756e-07, (11, 17): 0.8287363296394418, (26, 27): 1.6827130180063756e-07, (0, 26): 1.6827130180063756e-07, (24, 7): 0.04913538839708796, (12, 26): 1.6827130180063756e-07, (27, 18): 0.001346338685706901, (1, 27): 1.6827130180063756e-07, (25, 6): 0.020360995789178944, (13, 27): 1.6827130180063756e-07, (2, 0): 1.6827130180063756e-07, (26, 5): 0.0031973230055139146, (27, 8): 0.0005049821767037133}}
| mit |
kracwarlock/neon | neon/models/autoencoder.py | 9 | 2835 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Contains code to train stacked autoencoder models and run inference.
"""
import logging
from neon.backends.backend import Block
from neon.models.mlp import MLP
from neon.util.compat import range
logger = logging.getLogger(__name__)
class Autoencoder(MLP):
"""
Adaptation of multi-layer perceptron.
"""
def fit(self, datasets):
"""
Learn model weights on the given datasets.
"""
for layer in self.layers:
logger.info("%s", str(layer))
ds = datasets[0]
inputs = ds.get_inputs(train=True)['train']
targets = ds.get_inputs(train=True)['train']
num_batches = len(inputs)
logger.info('commencing model fitting')
error = self.backend.empty((1, 1))
while self.epochs_complete < self.num_epochs:
self.backend.begin(Block.epoch, self.epochs_complete)
error.fill(0.0)
for batch in range(num_batches):
self.backend.begin(Block.minibatch, batch)
inputs_batch = ds.get_batch(inputs, batch)
targets_batch = ds.get_batch(targets, batch)
self.backend.begin(Block.fprop, batch)
self.fprop(inputs_batch)
self.backend.end(Block.fprop, batch)
self.backend.begin(Block.bprop, batch)
self.bprop(targets_batch, inputs_batch)
self.backend.end(Block.bprop, batch)
self.backend.add(error,
self.cost.apply_function(targets_batch),
error)
self.backend.begin(Block.update, batch)
self.update(self.epochs_complete)
self.backend.end(Block.update, batch)
self.backend.end(Block.minibatch, batch)
self.epochs_complete += 1
logger.info('epoch: %d, total training error: %0.5f',
self.epochs_complete,
error.asnumpyarray() / num_batches)
self.backend.end(Block.epoch, self.epochs_complete - 1)
| apache-2.0 |
fractal-mind/portfolio | node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py | 1869 | 1247 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
| mit |
nvoron23/hue | apps/search/src/search/decorators.py | 4 | 2234 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from search.models import Collection
from search.search_controller import SearchController
LOG = logging.getLogger(__name__)
def allow_viewer_only(view_func):
def decorate(request, *args, **kwargs):
collection_json = json.loads(request.POST.get('collection', '{}'))
if collection_json['id']:
try:
SearchController(request.user).get_search_collections().get(id=collection_json['id'])
except Collection.DoesNotExist:
message = _("Dashboard does not exist or you don't have the permission to access it.")
raise PopupException(message)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def allow_owner_only(view_func):
def decorate(request, *args, **kwargs):
collection_json = json.loads(request.POST.get('collection', '{}'))
if collection_json['id']:
try:
collection = Collection.objects.get(id=collection_json['id'])
if collection.owner != request.user and not request.user.is_superuser:
message = _("Permission denied. You are not an Administrator.")
raise PopupException(message)
except Collection.DoesNotExist:
pass
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
| apache-2.0 |
beiko-lab/gengis | bin/Lib/site-packages/ndg/httpsclient/https.py | 66 | 4598 | """ndg_httpsclient HTTPS module containing PyOpenSSL implementation of
httplib.HTTPSConnection
PyOpenSSL utility to make a httplib-like interface suitable for use with
urllib2
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "09/12/11"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import logging
import socket
from httplib import HTTPS_PORT
from httplib import HTTPConnection
from urllib2 import AbstractHTTPHandler
from OpenSSL import SSL
from ndg.httpsclient.ssl_socket import SSLSocket
log = logging.getLogger(__name__)
class HTTPSConnection(HTTPConnection):
"""This class allows communication via SSL using PyOpenSSL.
It is based on httplib.HTTPSConnection, modified to use PyOpenSSL.
Note: This uses the constructor inherited from HTTPConnection to allow it to
be used with httplib and HTTPSContextHandler. To use the class directly with
an SSL context set ssl_context after construction.
@cvar default_port: default port for this class (443)
@type default_port: int
@cvar default_ssl_method: default SSL method used if no SSL context is
explicitly set - defaults to version 2/3.
@type default_ssl_method: int
"""
default_port = HTTPS_PORT
default_ssl_method = SSL.SSLv23_METHOD
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, ssl_context=None):
HTTPConnection.__init__(self, host, port, strict, timeout)
if not hasattr(self, 'ssl_context'):
self.ssl_context = None
if ssl_context is not None:
if not isinstance(ssl_context, SSL.Context):
raise TypeError('Expecting OpenSSL.SSL.Context type for "'
'ssl_context" keyword; got %r instead' %
ssl_context)
self.ssl_context = ssl_context
def connect(self):
"""Create SSL socket and connect to peer
"""
if getattr(self, 'ssl_context', None):
if not isinstance(self.ssl_context, SSL.Context):
raise TypeError('Expecting OpenSSL.SSL.Context type for "'
'ssl_context" attribute; got %r instead' %
self.ssl_context)
ssl_context = self.ssl_context
else:
ssl_context = SSL.Context(self.__class__.default_ssl_method)
sock = socket.create_connection((self.host, self.port), self.timeout)
# Tunnel if using a proxy - ONLY available for Python 2.6.2 and above
if getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
self.sock = SSLSocket(ssl_context, sock)
# Go to client mode.
self.sock.set_connect_state()
def close(self):
"""Close socket and shut down SSL connection"""
self.sock.close()
class HTTPSContextHandler(AbstractHTTPHandler):
'''HTTPS handler that allows a SSL context to be set for the SSL
connections.
'''
https_request = AbstractHTTPHandler.do_request_
def __init__(self, ssl_context, debuglevel=0):
"""
@param ssl_context:SSL context
@type ssl_context: OpenSSL.SSL.Context
@param debuglevel: debug level for HTTPSHandler
@type debuglevel: int
"""
AbstractHTTPHandler.__init__(self, debuglevel)
if ssl_context is not None:
if not isinstance(ssl_context, SSL.Context):
raise TypeError('Expecting OpenSSL.SSL.Context type for "'
'ssl_context" keyword; got %r instead' %
ssl_context)
self.ssl_context = ssl_context
else:
self.ssl_context = SSL.Context(SSL.SSLv23_METHOD)
def https_open(self, req):
"""Opens HTTPS request
@param req: HTTP request
@return: HTTP Response object
"""
# Make a custom class extending HTTPSConnection, with the SSL context
# set as a class variable so that it is available to the connect method.
customHTTPSContextConnection = type('CustomHTTPSContextConnection',
(HTTPSConnection, object),
{'ssl_context': self.ssl_context})
return self.do_open(customHTTPSContextConnection, req)
| gpl-3.0 |
jk1/intellij-community | python/lib/Lib/site-packages/django/core/serializers/xml_serializer.py | 293 | 11885 | """
XML serializer.
"""
from django.conf import settings
from django.core.serializers import base
from django.db import models, DEFAULT_DB_ALIAS
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import smart_unicode
from xml.dom import pulldom
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent', None) is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent', None) * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version" : "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
obj_pk = obj._get_pk_val()
if obj_pk is None:
attrs = {"model": smart_unicode(obj._meta),}
else:
attrs = {
"pk": smart_unicode(obj._get_pk_val()),
"model": smart_unicode(obj._meta),
}
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"type" : field.get_internal_type()
})
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
self.xml.characters(field.value_to_string(obj))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related = getattr(obj, field.name)
if related is not None:
if self.use_natural_keys and hasattr(related, 'natural_key'):
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_unicode(key_value))
self.xml.endElement("natural")
else:
if field.rel.field_name == related._meta.pk.name:
# Related to remote object via primary key
related = related._get_pk_val()
else:
# Related to remote object via other field
related = getattr(related, field.rel.field_name)
self.xml.characters(smart_unicode(related))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.rel.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_unicode(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk' : smart_unicode(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"rel" : field.rel.__class__.__name__,
"to" : smart_unicode(field.rel.to._meta),
})
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream)
self.db = options.pop('using', DEFAULT_DB_ALIAS)
def next(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
# If the node is missing the pk set it to None
if node.hasAttribute("pk"):
pk = node.getAttribute("pk")
else:
pk = None
data = {Model._meta.pk.attname : Model._meta.pk.to_python(pk)}
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly.
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.rel and isinstance(field.rel, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(Model(**data), m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = field.rel.to._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = field.rel.to._default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = field.rel.to._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
m2m_convert = lambda n: field.rel.to._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute" \
% (node.nodeName, attr))
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'" % \
(node.nodeName, model_identifier))
return Model
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return u"".join(inner_text)
| apache-2.0 |
brownharryb/erpnext | erpnext/utilities/user_progress_utils.py | 7 | 6851 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import json
from frappe import _
from frappe.utils import flt
from erpnext.setup.doctype.setup_progress.setup_progress import update_domain_actions, get_domain_actions_state
@frappe.whitelist()
def set_sales_target(args_data):
args = json.loads(args_data)
defaults = frappe.defaults.get_defaults()
frappe.db.set_value("Company", defaults.get("company"), "monthly_sales_target", args.get('monthly_sales_target'))
@frappe.whitelist()
def create_customers(args_data):
args = json.loads(args_data)
defaults = frappe.defaults.get_defaults()
for i in range(1,4):
customer = args.get("customer_" + str(i))
if customer:
try:
doc = frappe.get_doc({
"doctype":"Customer",
"customer_name": customer,
"customer_type": "Company",
"customer_group": _("Commercial"),
"territory": defaults.get("country"),
"company": defaults.get("company")
}).insert()
if args.get("customer_contact_" + str(i)):
create_contact(args.get("customer_contact_" + str(i)),
"Customer", doc.name)
except frappe.NameError:
pass
@frappe.whitelist()
def create_letterhead(args_data):
args = json.loads(args_data)
letterhead = args.get("letterhead")
if letterhead:
try:
frappe.get_doc({
"doctype":"Letter Head",
"content":"""<div><img src="{0}" style='max-width: 100%%;'><br></div>""".format(letterhead.encode('utf-8')),
"letter_head_name": _("Standard"),
"is_default": 1
}).insert()
except frappe.NameError:
pass
@frappe.whitelist()
def create_suppliers(args_data):
args = json.loads(args_data)
defaults = frappe.defaults.get_defaults()
for i in range(1,4):
supplier = args.get("supplier_" + str(i))
if supplier:
try:
doc = frappe.get_doc({
"doctype":"Supplier",
"supplier_name": supplier,
"supplier_group": _("Local"),
"company": defaults.get("company")
}).insert()
if args.get("supplier_contact_" + str(i)):
create_contact(args.get("supplier_contact_" + str(i)),
"Supplier", doc.name)
except frappe.NameError:
pass
def create_contact(contact, party_type, party):
"""Create contact based on given contact name"""
contact = contact .split(" ")
contact = frappe.get_doc({
"doctype":"Contact",
"first_name":contact[0],
"last_name": len(contact) > 1 and contact[1] or ""
})
contact.append('links', dict(link_doctype=party_type, link_name=party))
contact.insert()
@frappe.whitelist()
def create_items(args_data):
args = json.loads(args_data)
defaults = frappe.defaults.get_defaults()
for i in range(1,4):
item = args.get("item_" + str(i))
if item:
default_warehouse = ""
default_warehouse = frappe.db.get_value("Warehouse", filters={
"warehouse_name": _("Finished Goods"),
"company": defaults.get("company_name")
})
try:
frappe.get_doc({
"doctype":"Item",
"item_code": item,
"item_name": item,
"description": item,
"show_in_website": 1,
"is_sales_item": 1,
"is_purchase_item": 1,
"is_stock_item": 1,
"item_group": _("Products"),
"stock_uom": _(args.get("item_uom_" + str(i))),
"item_defaults": [{
"default_warehouse": default_warehouse,
"company": defaults.get("company_name")
}]
}).insert()
except frappe.NameError:
pass
else:
if args.get("item_price_" + str(i)):
item_price = flt(args.get("item_price_" + str(i)))
price_list_name = frappe.db.get_value("Price List", {"selling": 1})
make_item_price(item, price_list_name, item_price)
price_list_name = frappe.db.get_value("Price List", {"buying": 1})
make_item_price(item, price_list_name, item_price)
def make_item_price(item, price_list_name, item_price):
frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list_name,
"item_code": item,
"price_list_rate": item_price
}).insert()
# Education
@frappe.whitelist()
def create_program(args_data):
args = json.loads(args_data)
for i in range(1,4):
if args.get("program_" + str(i)):
program = frappe.new_doc("Program")
program.program_code = args.get("program_" + str(i))
program.program_name = args.get("program_" + str(i))
try:
program.save()
except frappe.DuplicateEntryError:
pass
@frappe.whitelist()
def create_course(args_data):
args = json.loads(args_data)
for i in range(1,4):
if args.get("course_" + str(i)):
course = frappe.new_doc("Course")
course.course_code = args.get("course_" + str(i))
course.course_name = args.get("course_" + str(i))
try:
course.save()
except frappe.DuplicateEntryError:
pass
@frappe.whitelist()
def create_instructor(args_data):
args = json.loads(args_data)
for i in range(1,4):
if args.get("instructor_" + str(i)):
instructor = frappe.new_doc("Instructor")
instructor.instructor_name = args.get("instructor_" + str(i))
try:
instructor.save()
except frappe.DuplicateEntryError:
pass
@frappe.whitelist()
def create_room(args_data):
args = json.loads(args_data)
for i in range(1,4):
if args.get("room_" + str(i)):
room = frappe.new_doc("Room")
room.room_name = args.get("room_" + str(i))
room.seating_capacity = args.get("room_capacity_" + str(i))
try:
room.save()
except frappe.DuplicateEntryError:
pass
@frappe.whitelist()
def create_users(args_data):
if frappe.session.user == 'Administrator':
return
args = json.loads(args_data)
defaults = frappe.defaults.get_defaults()
for i in range(1,4):
email = args.get("user_email_" + str(i))
fullname = args.get("user_fullname_" + str(i))
if email:
if not fullname:
fullname = email.split("@")[0]
parts = fullname.split(" ", 1)
user = frappe.get_doc({
"doctype": "User",
"email": email,
"first_name": parts[0],
"last_name": parts[1] if len(parts) > 1 else "",
"enabled": 1,
"user_type": "System User"
})
# default roles
user.append_roles("Projects User", "Stock User", "Support Team")
user.flags.delay_emails = True
if not frappe.db.get_value("User", email):
user.insert(ignore_permissions=True)
# create employee
emp = frappe.get_doc({
"doctype": "Employee",
"employee_name": fullname,
"user_id": email,
"status": "Active",
"company": defaults.get("company")
})
emp.flags.ignore_mandatory = True
emp.insert(ignore_permissions = True)
# Ennumerate the setup hooks you're going to need, apart from the slides
@frappe.whitelist()
def update_default_domain_actions_and_get_state():
domain = frappe.get_cached_value('Company', erpnext.get_default_company(), 'domain')
update_domain_actions(domain)
return get_domain_actions_state(domain)
| gpl-3.0 |
lizardsystem/flooding | flooding_lib/tasks/calculaterisespeed_132.py | 3 | 9870 | #!c:/python25/python.exe
# -*- coding: utf-8 -*-
#***********************************************************************
# This file is part of the nens library.
#
# the nens library is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# the nens library is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the nens libraray. If not, see
# <http://www.gnu.org/licenses/>.
#
# Copyright 2008, 2009 Nelen & Schuurmans
#*
#***********************************************************************
#* Library : <if this is a module, what is its name>
#* Purpose :
#* Function : main
#* Usage : calculaterisespeed.py --help
#*
#* Project : K0115
#*
#* $Id: calculaterisespeed_132.py 9992 2010-03-15 10:13:14Z Mario $
#*
#* initial programmer : Mario Frasca
#* initial date : 20081210
#* changed by : Alexandr Seleznev
#* changed at : 20120601
#* changes : integration with django, pylint, pep8
#**********************************************************************
__revision__ = "$Rev: 9992 $"[6:-2]
"""this script computes the water level rise speed needed by the
module HISSSM. please refer to Ticket:1092.
"""
import os
import logging
log = logging.getLogger('nens')
import nens.asc
from django import db
from zipfile import ZipFile, ZIP_DEFLATED
from flooding_lib.models import Scenario, Result, ResultType
from flooding_base.models import Setting
def set_broker_logging_handler(broker_handler=None):
"""
"""
if broker_handler is not None:
log.addHandler(broker_handler)
else:
log.warning("Broker logging handler does not set.")
def perform_calculation(scenario_id, tmp_location, timeout=0):
log.debug("step 0b: get the settings for scenario '%s'." % scenario_id)
log.debug("0b1: scenario_id, region_id, breach_id")
scenario = Scenario.objects.get(pk=scenario_id)
log.debug("0b2: destination_dir")
destination_dir = Setting.objects.get(key='DESTINATION_DIR').value
log.debug("0c: resetting to forward-slash")
location = tmp_location.replace("\\", "/")
if not location.endswith("/"):
location += "/"
log.debug("0f: restore the files from the database.")
for resulttype, names in [
(15, ['fls_h.inc']), # "fls_import.zip"
(18, ['fls_h.inc']), # "fls_import.zip"
(1, ['dm1maxd0.asc']),
]:
try:
resultloc = scenario.result_set.get(
resulttype=ResultType.objects.get(pk=resulttype)).resultloc
input_file = ZipFile(os.path.join(destination_dir, resultloc), "r")
for name in names:
try:
content = input_file.read(name)
temp = file(os.path.join(location, name.lower()), "wb")
temp.write(content)
temp.close()
except KeyError:
log.debug('file %s not found in archive' % name)
except Result.DoesNotExist as e:
log.info('inputfile of resulttype %s not found' % resulttype)
log.debug(','.join(map(str, e.args)))
log.debug(
"0g:retrieve dm1maxd0 from gridmaxwaterdepth as to get default shape.")
def_grid = None
def_name = 'dm1maxd0.asc'
import stat
try:
ref_result = scenario.result_set.filter(resulttype__id=1)[0].resultloc
if os.stat(
os.path.join(destination_dir, ref_result))[stat.ST_SIZE] == 0:
log.warning("input file '%s' is empty" % ref_result)
else:
input_file = ZipFile(os.path.join(destination_dir, ref_result))
def_grid = nens.asc.AscGrid(data=input_file, name=def_name)
except Scenario.DoesNotExist:
log.warning("Reference grid does not exist")
log.debug("step 3: use the fls_h.inc (sequence of water levels) " \
"into grid_dh.asc (maximum water raise speed)")
input_name = "fls_h.inc"
first_timestamps_generator = nens.asc.AscGrid.xlistFromStream(
os.path.join(location, input_name), just_count=True,
default_grid=def_grid)
first_timestamp, _ = first_timestamps_generator.next()
second_timestamp, _ = first_timestamps_generator.next()
delta_t = second_timestamp - first_timestamp
arrival, arrival_value = nens.asc.AscGrid.firstTimestampWithValue(
os.path.join(location, input_name), default_grid=def_grid)
temp = file(os.path.join(location, 'grid_ta.asc'), 'wb')
arrival.writeToStream(temp)
temp.close()
deadly, deadly_value = nens.asc.AscGrid.firstTimestampWithValue(
os.path.join(location, input_name),
threshold=1.5, default_grid=def_grid)
temp = file(location + 'grid_td.asc', 'wb')
deadly.writeToStream(temp)
temp.close()
time_difference = nens.asc.AscGrid.apply(
lambda x, y: x - y, deadly, arrival)
value_difference = nens.asc.AscGrid.apply(
lambda x: x - 0.02, deadly_value)
def speedFirstMetersFunction(x_value, y_value):
if y_value == 0:
return (x_value + 0.3) / delta_t
else:
return x_value / y_value
def speedFirstMetersFunctionLoop(x, y):
result = x.copy()
for col in range(len(x)):
for row in range(len(x[0])):
x_value = x[col][row]
y_value = y[col][row]
try:
if y_value == 0:
result[col][row] = (x_value + 0.3) / delta_t
else:
result[col][row] = x_value / y_value
except TypeError:
pass
return result
def fillInTheSpeedBlanks(speed, wet):
"if water arrives but does not reach deadly level, return 0"
result = speed.copy()
for col in range(len(speed)):
for row in range(len(speed[0])):
speed_value = speed[col][row]
wet_value = wet[col][row]
try:
if wet_value > 0 and not speed_value > 0:
result[col][row] = 0.0
else:
result[col][row] = speed_value
except TypeError:
pass
return result
speedFirstMeters = nens.asc.AscGrid.apply(
speedFirstMetersFunctionLoop, value_difference, time_difference)
speedFirstMeters = nens.asc.AscGrid.apply(
fillInTheSpeedBlanks, speedFirstMeters, arrival_value)
temp = file(location + 'grid_dh.asc', 'wb')
speedFirstMeters.writeToStream(temp)
temp.close()
def computeMaxSpeed(value_tsgrid):
"""compute maximum speed of water raise as of Ticket:1532
'value_tsgrid' is an ordered list of pairs, associating the
values from the .inc file to the grids holding the timestamps
for which the value is first reached for the pixel.
return value is a grid containing the maximum raise speed.
"""
result = value_tsgrid[0][1].copy()
for col in range(1, result.ncols + 1):
for row in range(1, result.nrows + 1):
if arrival[col, row] is not None:
for value, ts in value_tsgrid:
if value < 1.5:
continue # below deadly
if ts[col, row] is None:
continue # value not present for timestamp
# includes deadly at arrival case
speed = speedFirstMetersFunction(
value, ts[col, row] - arrival[col, row])
result[col, row] = max(speed, result[col, row])
return result
value_tsgrid = nens.asc.AscGrid.firstTimestamp(
location + input_name, threshold=True, default_grid=def_grid)
maxWaterRaiseSpeed = computeMaxSpeed(value_tsgrid)
temp = file(location + 'grid_ss.asc', 'wb')
maxWaterRaiseSpeed.writeToStream(temp)
temp.close()
log.debug("step 5: store the output files and the fact that they exist")
for dirname, filename, zipfilename, resulttype, unit, value in [
('.', 'grid_dh.asc', 'griddh.zip', 19, None, None),
('.', 'grid_ss.asc', 'gridss.zip', 23, None, None),
('.', 'grid_ta.asc', 'gridta.zip', 21, None, None),
('.', 'grid_td.asc', 'gridtd.zip', 22, None, None), ]:
resultloc = os.path.join(scenario.get_rel_destdir(), zipfilename)
content = file(os.path.join(location, dirname, filename), 'rb').read()
output_file = ZipFile(os.path.join(destination_dir, resultloc),
mode="w",
compression=ZIP_DEFLATED)
output_file.writestr(filename, content)
output_file.close()
result, new = scenario.result_set.get_or_create(
resulttype=ResultType.objects.get(pk=resulttype))
result.resultloc = resultloc
result.unit = unit
result.value = value
result.save()
log.debug("Finish task.")
log.debug("close db connection to avoid an idle process.")
db.close_connection()
return True
| gpl-3.0 |
prantlf/node-gyp | gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
ray-project/ray | release/tune_tests/scalability_tests/workloads/test_network_overhead.py | 1 | 1297 | """Networking overhead (200 trials on 200 nodes)
In this run, we will start 100 trials and run them on 100 different nodes.
This test will thus measure the overhead that comes with network communication
and specifically log synchronization.
Cluster: cluster_100x2.yaml
Test owner: krfricke
Acceptance criteria: Should run faster than 500 seconds.
Theoretical minimum time: 300 seconds
"""
import argparse
import ray
from ray import tune
from ray.tune.utils.release_test_util import timed_tune_run
def main(smoke_test: bool = False):
ray.init(address="auto")
num_samples = 100 if not smoke_test else 20
results_per_second = 0.01
trial_length_s = 300
max_runtime = 1000
timed_tune_run(
name="result network overhead",
num_samples=num_samples,
results_per_second=results_per_second,
trial_length_s=trial_length_s,
max_runtime=max_runtime,
resources_per_trial={"cpu": 2}, # One per node
sync_config=tune.SyncConfig(sync_to_driver=True))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for training.")
args = parser.parse_args()
main(args.smoke_test)
| apache-2.0 |
arahuja/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <L.J.Buitinck@uva.nl>
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
lifanov/cobbler | cobbler/item_package.py | 15 | 2384 | """
Copyright 2006-2009, MadHatter
Kelsey Hightower <kelsey.hightower@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import resource
from cobbler.cexceptions import CX
from cobbler.utils import _
# this data structure is described in item.py
FIELDS = [
# non-editable in UI (internal)
["ctime", 0, 0, "", False, "", 0, "float"],
["depth", 2, 0, "", False, "", 0, "float"],
["mtime", 0, 0, "", False, "", 0, "float"],
["uid", "", 0, "", False, "", 0, "str"],
# editable in UI
["action", "create", 0, "Action", True, "Install or remove package resource", 0, "str"],
["comment", "", 0, "Comment", True, "Free form text description", 0, "str"],
["installer", "yum", 0, "Installer", True, "Package Manager", 0, "str"],
["name", "", 0, "Name", True, "Name of file resource", 0, "str"],
["owners", "SETTINGS:default_ownership", 0, "Owners", True, "Owners list for authz_ownership (space delimited)", [], "list"],
["version", "", 0, "Version", True, "Package Version", 0, "str"],
]
class Package(resource.Resource):
TYPE_NAME = _("package")
COLLECTION_TYPE = "package"
#
# override some base class methods first (item.Item)
#
def make_clone(self):
_dict = self.to_dict()
cloned = Package(self.collection_mgr)
cloned.from_dict(_dict)
return cloned
def get_fields(self):
return FIELDS
def check_if_valid(self):
if self.name is None or self.name == "":
raise CX("name is required")
#
# specific methods for item.Package
#
def set_installer(self, installer):
self.installer = installer.lower()
def set_version(self, version):
self.version = version
# EOF
| gpl-2.0 |
nephila/django-filer | runtests.py | 4 | 2515 | #!/usr/bin/env python
import argparse
import os
import sys
import warnings
from filer.test_utils.cli import configure
from filer.test_utils.tmpdir import temp_dir
from filer.test_utils.cli import configure
from filer.test_utils.tmpdir import temp_dir
def main(verbosity=1, failfast=False, test_labels=None, migrate=False,
filer_image_model=False):
verbosity = int(verbosity)
with temp_dir() as STATIC_ROOT:
with temp_dir() as MEDIA_ROOT:
with temp_dir() as FILE_UPLOAD_TEMP_DIR:
from django import VERSION
use_tz = VERSION[:2] >= (1, 4)
test_suffix = ""
if VERSION[:2] >= (1, 6):
test_suffix = ".tests"
if not test_labels:
test_labels = ['filer%s' % test_suffix]
else:
test_labels = ["filer%s.%s" % (test_suffix, label) for label in test_labels]
warnings.filterwarnings(
'error', r"DateTimeField received a naive datetime",
RuntimeWarning, r'django\.db\.models\.fields')
configure(
ROOT_URLCONF='test_urls',
STATIC_ROOT=STATIC_ROOT, MEDIA_ROOT=MEDIA_ROOT,
FILE_UPLOAD_TEMP_DIR=FILE_UPLOAD_TEMP_DIR,
SOUTH_TESTS_MIGRATE=migrate,
FILER_IMAGE_MODEL=filer_image_model,
USE_TZ=use_tz)
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=verbosity, interactive=False, failfast=failfast)
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--failfast', action='store_true', default=False,
dest='failfast')
parser.add_argument('--verbosity', default=1)
parser.add_argument('--migrate', action='store_true', default=True)
parser.add_argument('--custom-image', action='store', default=os.environ.get('CUSTOM_IMAGE', False))
parser.add_argument('test_labels', nargs='*')
args = parser.parse_args()
test_labels = ['%s' % label for label in args.test_labels]
main(verbosity=args.verbosity, failfast=args.failfast,
test_labels=test_labels, migrate=args.migrate, filer_image_model=args.custom_image)
| bsd-3-clause |
cdiener/pyart | asciinator.py | 1 | 1723 | #!/usr/bin/env python
# asciinator.py
#
# Copyright 2014 Christian Diener <ch.diener@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from __future__ import print_function # for python2 compat
import sys;
from PIL import Image;
import numpy as np
# ascii chars sorted by "density"
chars = np.asarray(list(' .,:;irsXA253hMHGS#9B&@'))
# check command line arguments
if len(sys.argv) != 4:
print( 'Usage: asciinator.py image scale factor' )
sys.exit()
# set basic program parameters
# f = filename, SC = scale, GCF = gamma correction factor, WCF = width correction factor
f, SC, GCF, WCF = sys.argv[1], float(sys.argv[2]), float(sys.argv[3]), 7.0/4.0
# open, scale and normalize image by pixel intensities
img = Image.open(f)
S = (int(img.size[0]*SC*WCF), int(img.size[1]*SC))
img = np.sum( np.asarray(img.resize(S), dtype="float"), axis=2)
img -= img.min()
img = (1.0 - img/img.max())**GCF*(chars.size-1)
# Assemble and print ascii art
print( "\n".join(("".join(r) for r in chars[img.astype(int)])))
print()
| gpl-3.0 |
isaac-s/cloudify-plugins-common | cloudify/decorators.py | 2 | 15492 | ########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import traceback
import copy
import sys
import Queue
from threading import Thread
from StringIO import StringIO
from functools import wraps
from cloudify import context
from cloudify.workflows.workflow_context import (
CloudifyWorkflowContext,
CloudifySystemWideWorkflowContext)
from cloudify.manager import update_execution_status, get_rest_client
from cloudify.workflows import api
from cloudify_rest_client.executions import Execution
from cloudify import exceptions
from cloudify.state import current_ctx, current_workflow_ctx
def _stub_task(fn):
return fn
try:
from cloudify_agent.app import app as _app
_task = _app.task
except ImportError as e:
_app = None
_task = _stub_task
CLOUDIFY_ID_PROPERTY = '__cloudify_id'
CLOUDIFY_NODE_STATE_PROPERTY = 'node_state'
CLOUDIFY_CONTEXT_PROPERTY_KEY = '__cloudify_context'
CLOUDIFY_CONTEXT_IDENTIFIER = '__cloudify_context'
def _is_cloudify_context(obj):
"""
Gets whether the provided obj is a CloudifyContext instance.
From some reason Python's isinstance returned False when it should
have returned True.
"""
return context.CloudifyContext.__name__ in obj.__class__.__name__
def _find_context_arg(args, kwargs, is_context):
"""
Find cloudify context in args or kwargs.
Cloudify context is either a dict with a unique identifier (passed
from the workflow engine) or an instance of CloudifyContext.
"""
for arg in args:
if is_context(arg):
return arg
if isinstance(arg, dict) and CLOUDIFY_CONTEXT_IDENTIFIER in arg:
return arg
for arg in kwargs.values():
if is_context(arg):
return arg
return kwargs.get(CLOUDIFY_CONTEXT_PROPERTY_KEY)
def operation(func=None, **arguments):
"""
Decorate plugin operation function with this decorator.
Internally, if celery is installed, will also wrap the function
with a ``@celery.task`` decorator
The ``ctx`` injected to the function arguments is of type
``cloudify.context.CloudifyContext``
The ``ctx`` object can also be accessed by importing
``cloudify.ctx``
Example::
from cloudify import ctx
@operations
def start(**kwargs):
pass
"""
if func is not None:
@wraps(func)
def wrapper(*args, **kwargs):
ctx = _find_context_arg(args, kwargs, _is_cloudify_context)
if ctx is None:
ctx = {}
if not _is_cloudify_context(ctx):
ctx = context.CloudifyContext(ctx)
# remove __cloudify_context
raw_context = kwargs.pop(CLOUDIFY_CONTEXT_PROPERTY_KEY, {})
if ctx.task_target is None:
# task is local (not through celery) so we need to
# clone kwarg
kwargs = copy.deepcopy(kwargs)
if raw_context.get('has_intrinsic_functions') is True:
kwargs = ctx._endpoint.evaluate_functions(payload=kwargs)
kwargs['ctx'] = ctx
try:
current_ctx.set(ctx, kwargs)
result = func(*args, **kwargs)
except BaseException as e:
ctx.logger.error(
'Exception raised on operation [%s] invocation',
ctx.task_name, exc_info=True)
if ctx.task_target is None:
# local task execution
# no serialization issues
raise
# extract exception details
# type, value, traceback
tpe, value, tb = sys.exc_info()
# we re-create the exception here
# since it will be sent
# over the wire. And the original exception
# may cause de-serialization issues
# on the other side.
# preserve original type in the message
message = '{0}: {1}'.format(tpe.__name__, str(e))
# if the exception type is directly one of our exception
# than there is no need for conversion and we can just
# raise the original exception
if type(e) in [exceptions.OperationRetry,
exceptions.RecoverableError,
exceptions.NonRecoverableError,
exceptions.HttpException]:
raise
# if the exception inherits from our base exceptions, there
# still might be a de-serialization problem caused by one of
# the types in the inheritance tree.
if isinstance(e, exceptions.NonRecoverableError):
value = exceptions.NonRecoverableError(message)
elif isinstance(e, exceptions.OperationRetry):
value = exceptions.OperationRetry(message, e.retry_after)
elif isinstance(e, exceptions.RecoverableError):
value = exceptions.RecoverableError(message, e.retry_after)
else:
# convert pure user exceptions
# to a RecoverableError
value = exceptions.RecoverableError(message)
raise type(value), value, tb
finally:
current_ctx.clear()
if ctx.type == context.NODE_INSTANCE:
ctx.instance.update()
elif ctx.type == context.RELATIONSHIP_INSTANCE:
ctx.source.instance.update()
ctx.target.instance.update()
if ctx.operation._operation_retry:
raise ctx.operation._operation_retry
return result
return _process_wrapper(wrapper, arguments)
else:
def partial_wrapper(fn):
return operation(fn, **arguments)
return partial_wrapper
def workflow(func=None, system_wide=False, **arguments):
"""
Decorate workflow functions with this decorator.
Internally, if celery is installed, ``@workflow`` will also wrap
the function with a ``@celery.task`` decorator
The ``ctx`` injected to the function arguments is of type
``cloudify.workflows.workflow_context.CloudifyWorkflowContext`` or
``cloudify.workflows.workflow_context.CloudifySystemWideWorkflowContext``
if ``system_wide`` flag is set to True.
The ``ctx`` object can also be accessed by importing
``cloudify.workflows.ctx``
``system_wide`` flag turns this workflow into a system-wide workflow that
is executed by the management worker and has access to an instance of
``cloudify.workflows.workflow_context.CloudifySystemWideWorkflowContext``
as its context.
Example::
from cloudify.workflows import ctx
@workflow
def reinstall(**kwargs):
pass
"""
if system_wide:
ctx_class = CloudifySystemWideWorkflowContext
else:
ctx_class = CloudifyWorkflowContext
if func is not None:
@wraps(func)
def wrapper(*args, **kwargs):
def is_ctx_class_instance(obj):
return isinstance(obj, ctx_class)
ctx = _find_context_arg(args, kwargs, is_ctx_class_instance)
if not is_ctx_class_instance(ctx):
ctx = ctx_class(ctx)
kwargs['ctx'] = ctx
if ctx.local:
workflow_wrapper = _local_workflow
else:
workflow_wrapper = _remote_workflow
return workflow_wrapper(ctx, func, args, kwargs)
return _process_wrapper(wrapper, arguments)
else:
def partial_wrapper(fn):
return workflow(fn, system_wide, **arguments)
return partial_wrapper
class RequestSystemExit(SystemExit):
pass
def _remote_workflow(ctx, func, args, kwargs):
def update_execution_cancelled():
update_execution_status(ctx.execution_id, Execution.CANCELLED)
_send_workflow_cancelled_event(ctx)
rest = get_rest_client()
parent_queue, child_queue = (Queue.Queue(), Queue.Queue())
try:
if rest.executions.get(ctx.execution_id).status in \
(Execution.CANCELLING, Execution.FORCE_CANCELLING):
# execution has been requested to be cancelled before it
# was even started
update_execution_cancelled()
return api.EXECUTION_CANCELLED_RESULT
update_execution_status(ctx.execution_id, Execution.STARTED)
_send_workflow_started_event(ctx)
# the actual execution of the workflow will run in another
# thread - this wrapper is the entry point for that
# thread, and takes care of forwarding the result or error
# back to the parent thread
def child_wrapper():
try:
ctx.internal.start_event_monitor()
workflow_result = _execute_workflow_function(
ctx, func, args, kwargs)
child_queue.put({'result': workflow_result})
except api.ExecutionCancelled:
child_queue.put({
'result': api.EXECUTION_CANCELLED_RESULT})
except BaseException as workflow_ex:
tb = StringIO()
traceback.print_exc(file=tb)
err = {
'type': type(workflow_ex).__name__,
'message': str(workflow_ex),
'traceback': tb.getvalue()
}
child_queue.put({'error': err})
finally:
ctx.internal.stop_event_monitor()
api.queue = parent_queue
# starting workflow execution on child thread
t = Thread(target=child_wrapper)
t.start()
# while the child thread is executing the workflow,
# the parent thread is polling for 'cancel' requests while
# also waiting for messages from the child thread
has_sent_cancelling_action = False
result = None
execution = None
while True:
# check if child thread sent a message
try:
data = child_queue.get(timeout=5)
if 'result' in data:
# child thread has terminated
result = data['result']
break
else:
# error occurred in child thread
error = data['error']
raise exceptions.ProcessExecutionError(error['message'],
error['type'],
error['traceback'])
except Queue.Empty:
pass
# check for 'cancel' requests
execution = rest.executions.get(ctx.execution_id)
if execution.status == Execution.FORCE_CANCELLING:
result = api.EXECUTION_CANCELLED_RESULT
break
elif not has_sent_cancelling_action and \
execution.status == Execution.CANCELLING:
# send a 'cancel' message to the child thread. It
# is up to the workflow implementation to check for
# this message and act accordingly (by stopping and
# raising an api.ExecutionCancelled error, or by returning
# the deprecated api.EXECUTION_CANCELLED_RESULT as result).
# parent thread then goes back to polling for
# messages from child process or possibly
# 'force-cancelling' requests
parent_queue.put({'action': 'cancel'})
has_sent_cancelling_action = True
# updating execution status and sending events according to
# how the execution ended
if result == api.EXECUTION_CANCELLED_RESULT:
update_execution_cancelled()
if execution and execution.status == Execution.FORCE_CANCELLING:
# TODO: kill worker externally
raise RequestSystemExit()
else:
update_execution_status(ctx.execution_id, Execution.TERMINATED)
_send_workflow_succeeded_event(ctx)
return result
except RequestSystemExit:
raise
except BaseException as e:
if isinstance(e, exceptions.ProcessExecutionError):
error_traceback = e.traceback
else:
error = StringIO()
traceback.print_exc(file=error)
error_traceback = error.getvalue()
update_execution_status(ctx.execution_id, Execution.FAILED,
error_traceback)
_send_workflow_failed_event(ctx, e, error_traceback)
raise
def _local_workflow(ctx, func, args, kwargs):
try:
_send_workflow_started_event(ctx)
result = _execute_workflow_function(ctx, func, args, kwargs)
_send_workflow_succeeded_event(ctx)
return result
except Exception, e:
error = StringIO()
traceback.print_exc(file=error)
_send_workflow_failed_event(ctx, e, error.getvalue())
raise
def _execute_workflow_function(ctx, func, args, kwargs):
try:
ctx.internal.start_local_tasks_processing()
current_workflow_ctx.set(ctx, kwargs)
result = func(*args, **kwargs)
if not ctx.internal.graph_mode:
tasks = list(ctx.internal.task_graph.tasks_iter())
for workflow_task in tasks:
workflow_task.async_result.get()
return result
finally:
ctx.internal.stop_local_tasks_processing()
current_workflow_ctx.clear()
def _send_workflow_started_event(ctx):
ctx.internal.send_workflow_event(
event_type='workflow_started',
message="Starting '{0}' workflow execution".format(ctx.workflow_id))
def _send_workflow_succeeded_event(ctx):
ctx.internal.send_workflow_event(
event_type='workflow_succeeded',
message="'{0}' workflow execution succeeded"
.format(ctx.workflow_id))
def _send_workflow_failed_event(ctx, exception, error_traceback):
ctx.internal.send_workflow_event(
event_type='workflow_failed',
message="'{0}' workflow execution failed: {1}"
.format(ctx.workflow_id, str(exception)),
args={'error': error_traceback})
def _send_workflow_cancelled_event(ctx):
ctx.internal.send_workflow_event(
event_type='workflow_cancelled',
message="'{0}' workflow execution cancelled"
.format(ctx.workflow_id))
def _process_wrapper(wrapper, arguments):
result_wrapper = _task
if arguments.get('force_not_celery') is True:
result_wrapper = _stub_task
return result_wrapper(wrapper)
task = operation
| apache-2.0 |
jstoxrocky/statsmodels | statsmodels/tsa/interp/tests/test_denton.py | 35 | 1245 | import numpy as np
from statsmodels.tsa.interp import dentonm
def test_denton_quarterly():
# Data and results taken from IMF paper
indicator = np.array([98.2, 100.8, 102.2, 100.8, 99.0, 101.6,
102.7, 101.5, 100.5, 103.0, 103.5, 101.5])
benchmark = np.array([4000.,4161.4])
x_imf = dentonm(indicator, benchmark, freq="aq")
imf_stata = np.array([969.8, 998.4, 1018.3, 1013.4, 1007.2, 1042.9,
1060.3, 1051.0, 1040.6, 1066.5, 1071.7, 1051.0])
np.testing.assert_almost_equal(imf_stata, x_imf, 1)
def test_denton_quarterly2():
# Test denton vs stata. Higher precision than other test.
zQ = np.array([50,100,150,100] * 5)
Y = np.array([500,400,300,400,500])
x_denton = dentonm(zQ, Y, freq="aq")
x_stata = np.array([64.334796,127.80616,187.82379,120.03526,56.563894,
105.97568,147.50144,89.958987,40.547201,74.445963,
108.34473,76.66211,42.763347,94.14664,153.41596,
109.67405,58.290761,122.62556,190.41409,128.66959])
np.testing.assert_almost_equal(x_denton, x_stata, 5)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x', '--pdb'], exit=False)
| bsd-3-clause |
leoliujie/odoo | addons/account_test/report/account_test_report.py | 194 | 3819 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from openerp.tools.safe_eval import safe_eval as eval
#
# Use period and Journal for selection or resources
#
class report_assert_account(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_assert_account, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'datetime': datetime,
'execute_code': self.execute_code,
})
def execute_code(self, code_exec):
def reconciled_inv():
"""
returns the list of invoices that are set as reconciled = True
"""
return self.pool.get('account.invoice').search(self.cr, self.uid, [('reconciled','=',True)])
def order_columns(item, cols=None):
"""
This function is used to display a dictionary as a string, with its columns in the order chosen.
:param item: dict
:param cols: list of field names
:returns: a list of tuples (fieldname: value) in a similar way that would dict.items() do except that the
returned values are following the order given by cols
:rtype: [(key, value)]
"""
if cols is None:
cols = item.keys()
return [(col, item.get(col)) for col in cols if col in item.keys()]
localdict = {
'cr': self.cr,
'uid': self.uid,
'reconciled_inv': reconciled_inv, #specific function used in different tests
'result': None, #used to store the result of the test
'column_order': None, #used to choose the display order of columns (in case you are returning a list of dict)
}
eval(code_exec, localdict, mode="exec", nocopy=True)
result = localdict['result']
column_order = localdict.get('column_order', None)
if not isinstance(result, (tuple, list, set)):
result = [result]
if not result:
result = [_('The test was passed successfully')]
else:
def _format(item):
if isinstance(item, dict):
return ', '.join(["%s: %s" % (tup[0], tup[1]) for tup in order_columns(item, column_order)])
else:
return item
result = [_(_format(rec)) for rec in result]
return result
class report_accounttest(osv.AbstractModel):
_name = 'report.account_test.report_accounttest'
_inherit = 'report.abstract_report'
_template = 'account_test.report_accounttest'
_wrapped_report_class = report_assert_account
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bmazin/ARCONS-pipeline | astrometry/guide-centroid/manage.py | 1 | 5233 | from FitsAnalysis import convert,StarCalibration
from catalog import queryVizier,queryFitsImage
import os
import warnings
from radec import radec
from functions import *
#ignore the warning caused by astropy
warnings.filterwarnings("ignore")
#This specifies the center of fits images retrieved from the data base
#Though it is possible to specify by name, it is a good idea to use 'RA DEC' in degrees to avoid erros
#pos = '104.9566125,14.2341555'
#pos = 'corot18b'
#RA,DEC = convert([['6:59:49.587','+14:14:02.96']])[0]
#print RA,DEC
#pos = '%s,%s' %(RA,DEC)
#PSR 0656+14
pos = '104.95,14.24'
#source of data:'USNO-B1.0' or '2MASS' are usually enough. For full list: http://cdsarc.u-strasbg.fr/viz-bin/vizHelp?cats/U.htx
source = 'USNO-B1.0'
#source = '2MASS'
#name of the saved files
tfitsTable = 'test.fits'
tfitsImage = 'test_image.fits'
#if manCat=True, manual catalog will be used instead of vizier
#if semiManCat=True, stars will be added on top of the vizier catalog stars
#stars appended in both cases are specified in manCatFile
#notice that manCat and semiManCat can't both be true at the same time
manCat = False
semiManCat = True
manCatFile = 'manCat.cat'
calHeight = 3
#saving directory of all the calibrated files in relative path
caldir = './cal/'
#directory of fits images to be calibrated, put all the files here
fdir = './origin/'
sedir = './config/'
#the distoriton parameter file
paramFile = None
#if manual = False, the program will use sextractor to find source and match the correponding stars in the images
#also make sure the ./origin/ folder has appropriate sextractor parameters files and parameters
manual = False
#if calibrate is True, all the files that are calibrated will be used as data points to calculate distortion parameters
calibrate = False
#next, if automatic calibration is chosen, it is best to first manually correct the reference pixel coordinate on the header. This greatly increases the chances of calibrating.
refFix = True
#specificy the RA,DEC of the obect in CRVAL1 AND CRAVAL2 and the approximate pixel coordinate in the guider pixel coordinate.
CRVAL1 = 104.950558
CRVAL2 = 14.239306
CRPIX1 = 629
CRPIX2 = 318
'''
-----------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------
Input Ends Here
-----------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------
'''
#it will overwrite any existing files with the same names, the file 'test_vo.xml' is not important and can be ignored
queryVizier(tfitsTable,source=source,pos=pos)
queryFitsImage(tfitsImage,'test_vo.xml',pos=pos)
if manCat and semiManCat:
raise ValueError, 'Manual catalog and semi-manual catalog cannot be True all at once!'
elif manCat:
catOption = 'full'
elif semiManCat:
catOption = 'semi'
else:
catOption = None
#perform linear and polynomial calibration to each file in dir specified
for fitsImage in os.listdir(fdir):
#I am separation lines
print '--------------------------------------------------------------------------'
print '--------------------------------------------------------------------------'
print '> Calibrating %s...' %(fitsImage)
#fix reference value if refFix is True
if refFix:
updateHeader(fdir+fitsImage,'CRVAL1',CRVAL1)
updateHeader(fdir+fitsImage,'CRVAL2',CRVAL2)
updateHeader(fdir+fitsImage,'CRPIX1',CRPIX1)
updateHeader(fdir+fitsImage,'CRPIX2',CRPIX2)
try:
cal = StarCalibration(fitsImage,tfitsTable,tfitsImage,manual,paramFile=paramFile,caldir=caldir,fdir=fdir,sedir=sedir,height=3,manCat=catOption,manCatFile=manCatFile)
cal.linCal()
if paramFile != None:
distHeaderUpdate(caldir+fitsImage[:-5]+'_offCal_rotCal.fits',caldir+fitsImage[:-5]+'_allCal.fits',paramFile)
#cal.distCal()
except ValueError as err:
print '> WARNING: %s is NOT calibrated: %s ' %(fitsImage,err)
#try to remove the intermediate files after calibration
try:
os.remove(caldir + fitsImage[:-5] + '_offCal.fits')
os.remove(caldir + fitsImage[:-5] + '.check')
print 'clean up completed'
except:
pass
if calibrate:
#just choose a random file in the original folder in order to call the function
dummyList = os.listdir(fdir)
print dummyList
firstDummy = dummyList[0]
cal= StarCalibration(firstDummy,tfitsTable,tfitsImage,manual,paramFile=None,caldir=caldir,fdir=fdir,sedir=sedir,manCat=catOption,manCatFile=manCatFile)
cal.distCal(addFiles=dummyList[1:])
'''
#testing scripts
#convert world coordinate(in degrees) to ARCONS coordinate
worldCoor = [98.172398,-0.0315900]
#worldCoor = [98.169492,-0.03306112]
#guide stars 20121207/112636.fits
worldCoor = [104.95365,14.241674]
worldCoor = [104.9578,14.241021]
photon = [35.9084,32.5359]
test = radec(tolError=1000)
nlist = test.centroid(worldCoor=worldCoor)
mapp = test.photonMapping('090001',15.72,14.65)
''' | gpl-2.0 |
tommo/gii | lib/3rdparty/common/yaml/cyaml.py | 537 | 3290 |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from constructor import *
from serializer import *
from representer import *
from resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| mit |
brittanystoroz/kitsune | kitsune/landings/views.py | 18 | 1425 | from django.shortcuts import render
from mobility.decorators import mobile_template
from kitsune.products.models import Product
from kitsune.sumo.decorators import ssl_required
from kitsune.sumo.views import redirect_to
from kitsune.wiki.decorators import check_simple_wiki_locale
@check_simple_wiki_locale
def home(request):
"""The home page."""
if request.MOBILE:
return redirect_to(request, 'products', permanent=False)
return render(request, 'landings/home.html', {
'products': Product.objects.filter(visible=True)
})
@ssl_required
@mobile_template('landings/{mobile/}get-involved.html')
def get_involved(request, template):
return render(request, template)
@ssl_required
@mobile_template('landings/{mobile/}get-involved-aoa.html')
def get_involved_aoa(request, template):
return render(request, template)
@ssl_required
@mobile_template('landings/{mobile/}get-involved-questions.html')
def get_involved_questions(request, template):
return render(request, template)
@ssl_required
@mobile_template('landings/{mobile/}get-involved-kb.html')
def get_involved_kb(request, template):
return render(request, template)
@ssl_required
@mobile_template('landings/{mobile/}get-involved-l10n.html')
def get_involved_l10n(request, template):
return render(request, template)
def integrity_check(request):
return render(request, 'landings/integrity-check.html')
| bsd-3-clause |
demaranderson/othello-py | othello_gui.py | 3 | 6937 | # othello_gui: a GUI based interface to get the user's move
# Copyright (C) 2006 Nimar S. Arora
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# nimar.arora@gmail.com
import Tkinter
import time
import othello
import game2
import minimax
BOXWIDTH=80
BOXHEIGHT=80
class player:
"""Make a user player to play the game via a GUI."""
def __init__(self):
# create the GUI state variables
self.alive = True
self.move = None
self.move_played = False
# create the GUI windows and handlers
self.root = Tkinter.Tk()
self.root.protocol("WM_DELETE_WINDOW", self.quit)
# create a button to get the No move command from the user
Tkinter.Button(self.root, text="No Move", command = self.nomove).pack()
# create a label for displaying the next player's name
self.movemesg = Tkinter.StringVar()
Tkinter.Label(self.root, textvariable=self.movemesg).pack()
self.canvas = Tkinter.Canvas(self.root, bg="lightblue",
height = BOXHEIGHT*othello.size,
width = BOXWIDTH*othello.size)
self.canvas.bind("<Button-1>", self.click)
# create a box for highlighting the last move
self.lastbox = self.canvas.create_rectangle(0, 0, BOXWIDTH*othello.size,
BOXHEIGHT*othello.size,
outline="yellow")
# draw the game canvas
for i in xrange(1,othello.size):
# horizontal lines
self.canvas.create_line(0, i*BOXHEIGHT,
BOXWIDTH*othello.size, i*BOXHEIGHT)
# vertical lines
self.canvas.create_line(i*BOXWIDTH, 0,
i*BOXWIDTH, BOXHEIGHT*othello.size)
# the board will store the widgets to be displayed in each square
self.board = [[None for y in range(othello.size)]
for x in range(othello.size)]
# display the window
self.canvas.pack()
self.canvas.focus_set()
self.root.update()
def draw_board(self, game, last_move):
"""Draw an othello game on the board."""
if game.player == -1:
self.movemesg.set("Black to play")
else:
self.movemesg.set("White to play")
for i in range(othello.size):
for j in range(othello.size):
color = game.get_color((i,j))
if color == -1:
board_color = "black"
elif color == 1:
board_color = "white"
else:
if self.board[i][j] is not None:
self.canvas.delete(self.board[i][j])
self.board[i][j] = None
continue
if self.board[i][j] is None:
self.board[i][j] = self.canvas.create_oval(
j*BOXWIDTH+2, i*BOXHEIGHT+2, (j+1)*BOXWIDTH-2,
(i+1)*BOXHEIGHT-2, fill = board_color)
else:
self.canvas.itemconfig(self.board[i][j], fill=board_color)
# highlight the last move
if last_move is None:
self.canvas.coords(self.lastbox,
1, 1, BOXWIDTH*othello.size-1,BOXHEIGHT*othello.size-1)
else:
self.canvas.coords(
self.lastbox, last_move[1]*BOXWIDTH+1, last_move[0]*BOXHEIGHT+1,
(last_move[1]+1)*BOXWIDTH-1, (last_move[0]+1)*BOXHEIGHT-1)
def nomove(self):
self.move = None
self.move_played = True
def click(self, event):
self.move = (event.y/BOXHEIGHT, event.x/BOXWIDTH)
self.move_played = True
def quit(self):
self.alive = False
self.root.destroy()
def play(self, game, last_move):
# keep looping for a user move unless the user quits
while self.alive:
# wait for a user move
self.move_played = False
# grab the focus to ask the user for a move
self.draw_board(game, last_move)
self.canvas.focus_force()
self.root.configure(cursor="target")
while (not self.move_played) and self.alive:
self.root.update()
time.sleep(0.1)
if not self.move_played:
continue
# check the move
if self.move not in game.generate_moves():
self.root.bell()
continue
# display the new move
game.play_move(self.move)
self.draw_board(game, self.move)
self.root.configure(cursor="watch")
self.root.update()
# give a pause so I can see my move
time.sleep(.1)
return (0, self.move)
# if the user has quit the GUI then the game has to terminate,
# we force a termination by returning an illegal value
else:
return None
def gameover(self, game, last_move):
score = game.score() * game.player
if score > 0:
win_text = "White Won"
elif score < 0:
win_text = "Black Won"
else:
win_text = "Draw"
self.draw_board(game, last_move)
self.root.configure(cursor="X_cursor")
self.movemesg.set("Game Over "+win_text)
# wait for the user to quit the game
while self.alive:
self.root.update()
time.sleep(.1)
return
if __name__ == "__main__":
print """othello_gui, Copyright (C) 2006 Nimar S. Arora
othello_gui comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions."""
game2.play(othello.game(),
game2.player(lambda x: minimax.alphabeta(x, 4, othello.edge_eval)),
player(), True)
| gpl-2.0 |
MikeAmy/django | django/core/management/commands/makemessages.py | 19 | 24495 | from __future__ import unicode_literals
import fnmatch
import glob
import io
import os
import re
import sys
from functools import total_ordering
from itertools import dropwhile
import django
from django.conf import settings
from django.core.files.temp import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import (
find_command, handle_extensions, popen_wrapper,
)
from django.utils._os import upath
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_str
from django.utils.functional import cached_property
from django.utils.jslex import prepare_js_for_gettext
from django.utils.text import get_text_list
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
NO_LOCALE_DIR = object()
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError("Can't find %s. Make sure you have GNU "
"gettext tools 0.15 or newer installed." % program)
@total_ordering
class TranslatableFile(object):
def __init__(self, dirpath, file_name, locale_dir):
self.file = file_name
self.dirpath = dirpath
self.locale_dir = locale_dir
def __repr__(self):
return "<TranslatableFile: %s>" % os.sep.join([self.dirpath, self.file])
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
return self.path < other.path
@property
def path(self):
return os.path.join(self.dirpath, self.file)
class BuildFile(object):
"""
Represents the state of a translatable file during the build process.
"""
def __init__(self, command, domain, translatable):
self.command = command
self.domain = domain
self.translatable = translatable
@cached_property
def is_templatized(self):
if self.domain == 'djangojs':
return self.command.gettext_version < (0, 18, 3)
elif self.domain == 'django':
file_ext = os.path.splitext(self.translatable.file)[1]
return file_ext != '.py'
return False
@cached_property
def path(self):
return self.translatable.path
@cached_property
def work_path(self):
"""
Path to a file which is being fed into GNU gettext pipeline. This may
be either a translatable or its preprocessed version.
"""
if not self.is_templatized:
return self.path
extension = {
'djangojs': 'c',
'django': 'py',
}.get(self.domain)
filename = '%s.%s' % (self.translatable.file, extension)
return os.path.join(self.translatable.dirpath, filename)
def preprocess(self):
"""
Preprocess (if necessary) a translatable file before passing it to
xgettext GNU gettext utility.
"""
from django.utils.translation import templatize
if not self.is_templatized:
return
with io.open(self.path, 'r', encoding=settings.FILE_CHARSET) as fp:
src_data = fp.read()
if self.domain == 'djangojs':
content = prepare_js_for_gettext(src_data)
elif self.domain == 'django':
content = templatize(src_data, self.path[2:])
with io.open(self.work_path, 'w', encoding='utf-8') as fp:
fp.write(content)
def postprocess_messages(self, msgs):
"""
Postprocess messages generated by xgettext GNU gettext utility.
Transform paths as if these messages were generated from original
translatable files rather than from preprocessed versions.
"""
if not self.is_templatized:
return msgs
# Remove '.py' suffix
if os.name == 'nt':
# Preserve '.\' prefix on Windows to respect gettext behavior
old = '#: ' + self.work_path
new = '#: ' + self.path
else:
old = '#: ' + self.work_path[2:]
new = '#: ' + self.path[2:]
return msgs.replace(old, new)
def cleanup(self):
"""
Remove a preprocessed copy of a translatable file (if any).
"""
if self.is_templatized:
# This check is needed for the case of a symlinked file and its
# source being processed inside a single group (locale dir);
# removing either of those two removes both.
if os.path.exists(self.work_path):
os.unlink(self.work_path)
def write_pot_file(potfile, msgs):
"""
Write the :param potfile: POT file with the :param msgs: contents,
previously making sure its format is valid.
"""
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
with io.open(potfile, 'a', encoding='utf-8') as fp:
fp.write(msgs)
class Command(BaseCommand):
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale, --exclude or --all options.")
translatable_file_class = TranslatableFile
build_file_class = BuildFile
requires_system_checks = False
leave_locale_alone = True
msgmerge_options = ['-q', '--previous']
msguniq_options = ['--to-code=utf-8']
msgattrib_options = ['--no-obsolete']
xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators']
def add_arguments(self, parser):
parser.add_argument('--locale', '-l', default=[], dest='locale', action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times.')
parser.add_argument('--exclude', '-x', default=[], dest='exclude', action='append',
help='Locales to exclude. Default is none. Can be used multiple times.')
parser.add_argument('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").')
parser.add_argument('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.')
parser.add_argument('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: "html,txt,py", or "js" '
'if the domain is "djangojs"). Separate multiple extensions with '
'commas, or use -e multiple times.',
action='append')
parser.add_argument('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining '
'source code and templates for translation strings.')
parser.add_argument('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN',
help='Ignore files or directories matching this glob-style pattern. '
'Use multiple times to ignore more.')
parser.add_argument('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.")
parser.add_argument('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines.")
parser.add_argument('--no-location', action='store_true', dest='no_location',
default=False, help="Don't write '#: filename:line' lines.")
parser.add_argument('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings.")
parser.add_argument('--keep-pot', action='store_true', dest='keep_pot',
default=False, help="Keep .pot file after making messages. Useful when debugging.")
def handle(self, *args, **options):
locale = options.get('locale')
exclude = options.get('exclude')
self.domain = options.get('domain')
self.verbosity = options.get('verbosity')
process_all = options.get('all')
extensions = options.get('extensions')
self.symlinks = options.get('symlinks')
# Need to ensure that the i18n framework is enabled
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N=True)
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~', '*.pyc']
self.ignore_patterns = list(set(ignore_patterns))
# Avoid messing with mutable class variables
if options.get('no_wrap'):
self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap']
self.msguniq_options = self.msguniq_options[:] + ['--no-wrap']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap']
self.xgettext_options = self.xgettext_options[:] + ['--no-wrap']
if options.get('no_location'):
self.msgmerge_options = self.msgmerge_options[:] + ['--no-location']
self.msguniq_options = self.msguniq_options[:] + ['--no-location']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-location']
self.xgettext_options = self.xgettext_options[:] + ['--no-location']
self.no_obsolete = options.get('no_obsolete')
self.keep_pot = options.get('keep_pot')
if self.domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains "
"'django' and 'djangojs'")
if self.domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt', 'py']
self.extensions = handle_extensions(exts)
if (locale is None and not exclude and not process_all) or self.domain is None:
raise CommandError("Type '%s help %s' for usage information." % (
os.path.basename(sys.argv[0]), sys.argv[1]))
if self.verbosity > 1:
self.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(self.extensions), 'and'))
self.invoked_for_django = False
self.locale_paths = []
self.default_locale_path = None
if os.path.isdir(os.path.join('conf', 'locale')):
self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))]
self.default_locale_path = self.locale_paths[0]
self.invoked_for_django = True
else:
self.locale_paths.extend(settings.LOCALE_PATHS)
# Allow to run makemessages inside an app dir
if os.path.isdir('locale'):
self.locale_paths.append(os.path.abspath('locale'))
if self.locale_paths:
self.default_locale_path = self.locale_paths[0]
if not os.path.exists(self.default_locale_path):
os.makedirs(self.default_locale_path)
# Build locale list
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path))
all_locales = map(os.path.basename, locale_dirs)
# Account for excluded locales
if process_all:
locales = all_locales
else:
locales = locale or all_locales
locales = set(locales) - set(exclude)
if locales:
check_programs('msguniq', 'msgmerge', 'msgattrib')
check_programs('xgettext')
try:
potfiles = self.build_potfiles()
# Build po files for each selected locale
for locale in locales:
if self.verbosity > 0:
self.stdout.write("processing locale %s\n" % locale)
for potfile in potfiles:
self.write_po_file(potfile, locale)
finally:
if not self.keep_pot:
self.remove_potfiles()
@cached_property
def gettext_version(self):
# Gettext tools will output system-encoded bytestrings instead of UTF-8,
# when looking up the version. It's especially a problem on Windows.
out, err, status = popen_wrapper(
['xgettext', '--version'],
stdout_encoding=DEFAULT_LOCALE_ENCODING,
)
m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out)
if m:
return tuple(int(d) for d in m.groups() if d is not None)
else:
raise CommandError("Unable to get gettext version. Is it installed?")
def build_potfiles(self):
"""
Build pot files and apply msguniq to them.
"""
file_list = self.find_files(".")
self.remove_potfiles()
self.process_files(file_list)
potfiles = []
for path in self.locale_paths:
potfile = os.path.join(path, '%s.pot' % str(self.domain))
if not os.path.exists(potfile):
continue
args = ['msguniq'] + self.msguniq_options + [potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
with io.open(potfile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
potfiles.append(potfile)
return potfiles
def remove_potfiles(self):
for path in self.locale_paths:
pot_path = os.path.join(path, '%s.pot' % str(self.domain))
if os.path.exists(pot_path):
os.unlink(pot_path)
def find_files(self, root):
"""
Helper method to get all files in the given root. Also check that there
is a matching locale dir for each file.
"""
def is_ignored(path, ignore_patterns):
"""
Check if the given path should be ignored or not.
"""
filename = os.path.basename(path)
ignore = lambda pattern: (fnmatch.fnmatchcase(filename, pattern) or
fnmatch.fnmatchcase(path, pattern))
return any(ignore(pattern) for pattern in ignore_patterns)
ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns]
dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}}
norm_patterns = []
for p in ignore_patterns:
for dir_suffix in dir_suffixes:
if p.endswith(dir_suffix):
norm_patterns.append(p[:-len(dir_suffix)])
break
else:
norm_patterns.append(p)
all_files = []
ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p]
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks):
for dirname in dirnames[:]:
if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or
os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots):
dirnames.remove(dirname)
if self.verbosity > 1:
self.stdout.write('ignoring directory %s\n' % dirname)
elif dirname == 'locale':
dirnames.remove(dirname)
self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname))
for filename in filenames:
file_path = os.path.normpath(os.path.join(dirpath, filename))
file_ext = os.path.splitext(filename)[1]
if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns):
if self.verbosity > 1:
self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
locale_dir = None
for path in self.locale_paths:
if os.path.abspath(dirpath).startswith(os.path.dirname(path)):
locale_dir = path
break
if not locale_dir:
locale_dir = self.default_locale_path
if not locale_dir:
locale_dir = NO_LOCALE_DIR
all_files.append(self.translatable_file_class(dirpath, filename, locale_dir))
return sorted(all_files)
def process_files(self, file_list):
"""
Group translatable files by locale directory and run pot file build
process for each group.
"""
file_groups = {}
for translatable in file_list:
file_group = file_groups.setdefault(translatable.locale_dir, [])
file_group.append(translatable)
for locale_dir, files in file_groups.items():
self.process_locale_dir(locale_dir, files)
def process_locale_dir(self, locale_dir, files):
"""
Extract translatable literals from the specified files, creating or
updating the POT file for a given locale directory.
Uses the xgettext GNU gettext utility.
"""
build_files = []
for translatable in files:
if self.verbosity > 1:
self.stdout.write('processing file %s in %s\n' % (
translatable.file, translatable.dirpath
))
if self.domain not in ('djangojs', 'django'):
continue
build_file = self.build_file_class(self, self.domain, translatable)
try:
build_file.preprocess()
except UnicodeDecodeError as e:
self.stdout.write(
'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (
translatable.file, translatable.dirpath, e,
)
)
continue
build_files.append(build_file)
if self.domain == 'djangojs':
is_templatized = build_file.is_templatized
args = [
'xgettext',
'-d', self.domain,
'--language=%s' % ('C' if is_templatized else 'JavaScript',),
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--output=-',
]
elif self.domain == 'django':
args = [
'xgettext',
'-d', self.domain,
'--language=Python',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=ugettext_noop',
'--keyword=ugettext_lazy',
'--keyword=ungettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--keyword=pgettext_lazy:1c,2',
'--keyword=npgettext_lazy:1c,2,3',
'--output=-',
]
else:
return
input_files = [bf.work_path for bf in build_files]
with NamedTemporaryFile(mode='w+') as input_files_list:
input_files_list.write('\n'.join(input_files))
input_files_list.flush()
args.extend(['--files-from', input_files_list.name])
args.extend(self.xgettext_options)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
for build_file in build_files:
build_file.cleanup()
raise CommandError(
'errors happened while running xgettext on %s\n%s' %
('\n'.join(input_files), errors)
)
elif self.verbosity > 0:
# Print warnings
self.stdout.write(errors)
if msgs:
if locale_dir is NO_LOCALE_DIR:
file_path = os.path.normpath(build_files[0].path)
raise CommandError(
'Unable to find a locale path to store translations for '
'file %s' % file_path
)
for build_file in build_files:
msgs = build_file.postprocess_messages(msgs)
potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain))
write_pot_file(potfile, msgs)
for build_file in build_files:
build_file.cleanup()
def write_po_file(self, potfile, locale):
"""
Creates or updates the PO file for self.domain and :param locale:.
Uses contents of the existing :param potfile:.
Uses msgmerge, and msgattrib GNU gettext utilities.
"""
basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % str(self.domain))
if os.path.exists(pofile):
args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
else:
with io.open(potfile, 'r', encoding='utf-8') as fp:
msgs = fp.read()
if not self.invoked_for_django:
msgs = self.copy_plural_forms(msgs, locale)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "")
with io.open(pofile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
if self.no_obsolete:
args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
def copy_plural_forms(self, msgs, locale):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(upath(django.__file__))))
if self.domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with io.open(django_po, 'r', encoding='utf-8') as fp:
m = plural_forms_re.search(fp.read())
if m:
plural_form_line = force_str(m.group('value'))
if self.verbosity > 1:
self.stdout.write("copying plural forms: %s\n" % plural_form_line)
lines = []
found = False
for line in msgs.split('\n'):
if not found and (not line or plural_forms_re.search(line)):
line = '%s\n' % plural_form_line
found = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
| bsd-3-clause |
ericzundel/pants | src/python/pants/backend/graph_info/tasks/dependees.py | 4 | 3226 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
from collections import defaultdict
from pants.backend.graph_info.tasks.target_filter_task_mixin import TargetFilterTaskMixin
from pants.task.console_task import ConsoleTask
class ReverseDepmap(TargetFilterTaskMixin, ConsoleTask):
"""List all targets that depend on any of the input targets."""
@classmethod
def register_options(cls, register):
super(ReverseDepmap, cls).register_options(register)
register('--transitive', type=bool,
help='List transitive dependees.')
register('--closed', type=bool,
help='Include the input targets in the output along with the dependees.')
# TODO: consider refactoring out common output format methods into MultiFormatConsoleTask.
register('--output-format', default='text', choices=['text', 'json'],
help='Output format of results.')
def __init__(self, *args, **kwargs):
super(ReverseDepmap, self).__init__(*args, **kwargs)
self._transitive = self.get_options().transitive
self._closed = self.get_options().closed
def console_output(self, _):
address_mapper = self.context.address_mapper
build_graph = self.context.build_graph
dependees_by_target = defaultdict(set)
for address in address_mapper.scan_addresses():
build_graph.inject_address_closure(address)
target = build_graph.get_target(address)
# TODO(John Sirois): tighten up the notion of targets written down in a BUILD by a
# user vs. targets created by pants at runtime.
target = self.get_concrete_target(target)
for dependency in target.dependencies:
dependency = self.get_concrete_target(dependency)
dependees_by_target[dependency].add(target)
roots = set(self.context.target_roots)
if self.get_options().output_format == 'json':
deps = defaultdict(list)
for root in roots:
if self._closed:
deps[root.address.spec].append(root.address.spec)
for dependent in self.get_dependents(dependees_by_target, [root]):
deps[root.address.spec].append(dependent.address.spec)
for address in deps.keys():
deps[address].sort()
yield json.dumps(deps, indent=4, separators=(',', ': '), sort_keys=True)
else:
if self._closed:
for root in roots:
yield root.address.spec
for dependent in self.get_dependents(dependees_by_target, roots):
yield dependent.address.spec
def get_dependents(self, dependees_by_target, roots):
check = set(roots)
known_dependents = set()
while True:
dependents = set(known_dependents)
for target in check:
dependents.update(dependees_by_target[target])
check = dependents - known_dependents
if not check or not self._transitive:
return dependents - set(roots)
known_dependents = dependents
def get_concrete_target(self, target):
return target.concrete_derived_from
| apache-2.0 |
mjrulesamrat/xbmcbackup | resources/lib/relativedelta.py | 11 | 17115 | """
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import calendar
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta:
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if not isinstance(dt1, datetime.date) or \
not isinstance(dt2, datetime.date):
raise TypeError, "relativedelta only diffs datetime/date"
if type(dt1) is not type(dt2):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if type(weekday) is int:
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError, "invalid year day (%d)" % yday
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __radd__(self, other):
if not isinstance(other, datetime.date):
raise TypeError, "unsupported type for add operation"
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __add__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for add operation"
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for sub operation"
return relativedelta(years=other.years-self.years,
months=other.months-self.months,
days=other.days-self.days,
hours=other.hours-self.hours,
minutes=other.minutes-self.minutes,
seconds=other.seconds-self.seconds,
microseconds=other.microseconds-self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __nonzero__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=self.years*f,
months=self.months*f,
days=self.days*f,
hours=self.hours*f,
minutes=self.minutes*f,
seconds=self.seconds*f,
microseconds=self.microseconds*f,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
| mit |
Stavitsky/nova | nova/tests/unit/virt/xenapi/client/test_objects.py | 80 | 3981 | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.xenapi import stubs
from nova import utils
from nova.virt.xenapi.client import objects
class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPISessionObjectTestCase, self).setUp()
self.session = mock.Mock()
self.obj = objects.XenAPISessionObject(self.session, "FAKE")
def test_call_method_via_attr(self):
self.session.call_xenapi.return_value = "asdf"
result = self.obj.get_X("ref")
self.assertEqual(result, "asdf")
self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref")
class ObjectsTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(ObjectsTestCase, self).setUp()
self.session = mock.Mock()
def test_VM(self):
vm = objects.VM(self.session)
vm.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
def test_SR(self):
sr = objects.SR(self.session)
sr.get_X("ref")
self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
def test_VDI(self):
vdi = objects.VDI(self.session)
vdi.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
def test_VBD(self):
vbd = objects.VBD(self.session)
vbd.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
def test_PBD(self):
pbd = objects.PBD(self.session)
pbd.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
def test_PIF(self):
pif = objects.PIF(self.session)
pif.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
def test_VLAN(self):
vlan = objects.VLAN(self.session)
vlan.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
def test_host(self):
host = objects.Host(self.session)
host.get_X("ref")
self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
def test_network(self):
network = objects.Network(self.session)
network.get_X("ref")
self.session.call_xenapi.assert_called_once_with("network.get_X",
"ref")
def test_pool(self):
pool = objects.Pool(self.session)
pool.get_X("ref")
self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
class VBDTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VBDTestCase, self).setUp()
self.session = mock.Mock()
self.session.VBD = objects.VBD(self.session)
def test_plug(self):
self.session.VBD.plug("vbd_ref", "vm_ref")
self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
def test_unplug(self):
self.session.VBD.unplug("vbd_ref", "vm_ref")
self.session.call_xenapi.assert_called_once_with("VBD.unplug",
"vbd_ref")
@mock.patch.object(utils, 'synchronized')
def test_vbd_plug_check_synchronized(self, mock_synchronized):
self.session.VBD.unplug("vbd_ref", "vm_ref")
mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
| apache-2.0 |
ricardogsilva/QGIS | tests/src/python/test_qgis_local_server.py | 45 | 6464 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for qgis_local_server.py Python test module
From build dir: ctest -R PyQgsLocalServer -V
Set the following env variables when manually running tests:
QGIS_TEST_SUITE to run specific tests (define in __main__)
QGIS_TEST_VERBOSE to output individual test summary
QGIS_TEST_REPORT to open any failed image check reports in web browser
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Larry Shaffer'
__date__ = '2014/02/16'
__copyright__ = 'Copyright 2014, The QGIS Project'
import os
import sys
import datetime
if os.name == 'nt':
print("TestQgisLocalServer currently doesn't support windows")
sys.exit(0)
from qgis.core import (
QgsRectangle,
QgsCoordinateReferenceSystem,
QgsRenderChecker
)
from qgis_local_server import getLocalServer
from qgis.testing import (
start_app,
unittest
)
from utilities import openInBrowserTab, getTempfilePath
start_app()
MAPSERV = getLocalServer()
QGIS_TEST_REPORT = 'QGIS_TEST_REPORT' in os.environ
TESTREPORTS = {}
class TestQgisLocalServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# setup server controller class
# verify controller can re-initialize processes and temp directory setup
MAPSERV.startup() # should recreate tempdir
msg = 'Server processes could not be restarted'
assert MAPSERV.processes_running(), msg
msg = 'Temp web directory could not be recreated'
assert os.path.exists(MAPSERV.temp_dir()), msg
# install test project components to temporary web directory
test_proj_dir = os.path.join(MAPSERV.config_dir(), 'test-project')
MAPSERV.web_dir_install(os.listdir(test_proj_dir), test_proj_dir)
msg = 'Test project could not be re-copied to temp web directory'
res = os.path.exists(os.path.join(MAPSERV.web_dir(),
'test-server.qgs'))
assert res, msg
# web server should be left running throughout fcgi tests
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
MAPSERV.shutdown()
def setUp(self):
"""Run before each test."""
# web server stays up across all tests
MAPSERV.fcgi_server_process().start()
def tearDown(self):
"""Run after each test."""
# web server stays up across all tests
MAPSERV.fcgi_server_process().stop()
# @unittest.skip('')
def test_convert_param_instances(self):
params = dict()
params['LAYERS'] = ['background', 'aoi']
params['BBOX'] = QgsRectangle(606510, 4823130, 612510, 4827130)
# creating crs needs QGISAPP instance to access resources/srs.db
# WGS 84 / UTM zone 13N
params['CRS'] = QgsCoordinateReferenceSystem(
32613, QgsCoordinateReferenceSystem.EpsgCrsId)
params_p = MAPSERV.process_params(params)
# print repr(params_p)
param_lyrs = 'LAYERS=background%2Caoi'
param_crs = 'CRS=EPSG%3A32613'
param_bbx = 'BBOX=606510%2C4823130%2C612510%2C4827130'
msg = '\nParameter instances could not be converted'
assert (param_lyrs in params_p and
param_crs in params_p and
param_bbx in params_p), msg
# @unittest.skip('')
def test_getmap(self):
test_name = 'qgis_local_server'
success, img_path, url = MAPSERV.get_map(self.getmap_params())
msg = '\nLocal server get_map failed'
assert success, msg
chk = QgsRenderChecker()
chk.setControlName('expected_' + test_name)
# chk.setMapRenderer(None)
res = chk.compareImages(test_name, 0, img_path)
if QGIS_TEST_REPORT and not res: # don't report OK checks
TESTREPORTS[test_name] = chk.report()
msg = '\nRender check failed for "{0}"'.format(test_name)
assert res, msg
def getmap_params(self):
return {
'SERVICE': 'WMS',
'VERSION': '1.3.0',
'REQUEST': 'GetMap',
# 'MAP': abs path, also looks in localserver.web_dir()
'MAP': 'test-server.qgs',
# layer stacking order for rendering: bottom, to, top
'LAYERS': ['background', 'aoi'], # or 'background,aoi'
'STYLES': ',',
'CRS': 'EPSG:32613', # or QgsCoordinateReferenceSystem obj
'BBOX': '606510,4823130,612510,4827130', # or QgsRectangle obj
'FORMAT': 'image/png', # or: 'image/png; mode=8bit'
'WIDTH': '600',
'HEIGHT': '400',
'DPI': '72',
'MAP_RESOLUTION': '72',
'FORMAT_OPTIONS': 'dpi:72',
'TRANSPARENT': 'FALSE',
'IgnoreGetMapUrl': '1'
}
def run_suite(module, tests):
"""This allows for a list of test names to be selectively run.
Also, ensures unittest verbose output comes at end, after debug output"""
loader = unittest.defaultTestLoader
if 'QGIS_TEST_SUITE' in os.environ and tests:
suite = loader.loadTestsFromNames(tests, module)
else:
suite = loader.loadTestsFromModule(module)
verb = 2 if 'QGIS_TEST_VERBOSE' in os.environ else 0
res = unittest.TextTestRunner(verbosity=verb).run(suite)
if QGIS_TEST_REPORT and len(TESTREPORTS) > 0:
teststamp = 'Local Server Test Report: ' + \
datetime.datetime.now().strftime('%Y-%m-%d %X')
report = '<html><head><title>{0}</title></head><body>'.format(teststamp)
report += '\n<h2>Failed Image Tests: {0}</h2>'.format(len(TESTREPORTS))
for k, v in list(TESTREPORTS.items()):
report += '\n<h3>{0}</h3>\n{1}'.format(k, v)
report += '</body></html>'
tmp_name = getTempfilePath("html")
with open(tmp_name, 'wb') as temp_file:
temp_file.write(report)
openInBrowserTab('file://' + tmp_name)
return res
if __name__ == '__main__':
# NOTE: unless QGIS_TEST_SUITE env var is set all tests will be run
test_suite = [
'TestQgisLocalServer.test_getmap'
]
test_res = run_suite(sys.modules[__name__], test_suite)
sys.exit(not test_res.wasSuccessful())
| gpl-2.0 |
asuradaimao/linux | tools/perf/scripts/python/check-perf-trace.py | 1997 | 2539 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
ChanChiChoi/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
JulienMcJay/eclock | windows/Python27/Lib/site-packages/requests-2.2.1-py2.7.egg/requests/packages/urllib3/connectionpool.py | 223 | 25767 | # urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import errno
import logging
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ConnectTimeoutError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
ReadTimeoutError,
ProxyError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util import (
assert_fingerprint,
get_host,
is_connection_dropped,
Timeout,
)
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
# httplib doesn't like it when we include brackets in ipv6 addresses
host = host.strip('[]')
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
**extra_params)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
try:
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, timeout_obj.connect_timeout))
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if hasattr(conn, 'sock'):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except SocketTimeout:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
except BaseSSLError as e:
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.")
raise
except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue
raise EmptyPoolError(self, "No pool connections are available.")
except BaseSSLError as e:
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except TimeoutError as e:
# Connection broken, discard.
conn = None
# Save the error off for retry logic.
err = e
if retries == 0:
raise
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
if isinstance(e, SocketError) and self.proxy is not None:
raise ProxyError('Cannot connect to proxy. '
'Socket error: %s.' % e)
else:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, _proxy, _proxy_headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
**extra_params)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return self._prepare_conn(conn)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| gpl-2.0 |
kenglishhi/gae-django-sandbox | django/contrib/gis/geos/tests/test_geos_mutation.py | 68 | 5446 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
import django.utils.copycompat as copy
from django.contrib.gis.geos import *
from django.contrib.gis.geos.error import GEOSIndexError
def getItem(o,i): return o[i]
def delItem(o,i): del o[i]
def setItem(o,i,v): o[i] = v
def api_get_distance(x): return x.distance(Point(-200,-200))
def api_get_buffer(x): return x.buffer(10)
def api_get_geom_typeid(x): return x.geom_typeid
def api_get_num_coords(x): return x.num_coords
def api_get_centroid(x): return x.centroid
def api_get_empty(x): return x.empty
def api_get_valid(x): return x.valid
def api_get_simple(x): return x.simple
def api_get_ring(x): return x.ring
def api_get_boundary(x): return x.boundary
def api_get_convex_hull(x): return x.convex_hull
def api_get_extent(x): return x.extent
def api_get_area(x): return x.area
def api_get_length(x): return x.length
geos_function_tests = [ val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_') ]
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry GEOSIndexError'
p = Point(1,2)
for i in range(-2,2): p._checkindex(i)
self.assertRaises(GEOSIndexError, p._checkindex, 2)
self.assertRaises(GEOSIndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0,100)
self.assertEqual(p.coords, (100.0,2.0,3.0), 'Point _set_single')
# _set_list
p._set_list(2,(50,3141))
self.assertEqual(p.coords, (50.0,3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, range(1))
self.assertRaises(TypeError, Point, range(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4,5,3)
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
p[0:2] = [4,5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1,0),(4,1),(6,-1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0,1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0,(-50,25))
self.assertEqual(ls.coords, ((-50.0,25.0),(4.0,1.0),(6.0,-1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0,25.0),(6.0,-1.0)))
self.assertEqual(ls.coords, ((-50.0,25.0),(6.0,-1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1,0),(4,1),(6,-1),(8,10),(1,0)),
((5,4),(6,4),(6,3),(5,4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1,0),(4,1),(6,-1),(8,10),(1,0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5,4),(6,4),(6,3),(5,4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1,2),(10,0),(12,9),(-1,15),(1,2)),
((4,2),(5,2),(5,3),(4,2))))
self.assertEqual(pg.coords,
(((1.0,2.0),(10.0,0.0),(12.0,9.0),(-1.0,15.0),(1.0,2.0)),
((4.0,2.0),(5.0,2.0),(5.0,3.0),(4.0,2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*map(Point,((3,4),(-1,2),(5,-4),(2,8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5,-4), 'Collection _get_single_external')
mp._set_list(3, map(Point,((5,5),(3,-2),(8,1))))
self.assertEqual(mp.coords, ((5.0,5.0),(3.0,-2.0),(8.0,1.0)), 'Collection _set_list')
lsa = MultiPoint(*map(Point,((5,5),(3,-2),(8,1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSMutationTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
| apache-2.0 |
katchengli/tech-interview-prep | interview_cake/ic3.py | 1 | 1451 | #constraint: list_of_ints will always have at least 3 integers
#can have negative numbers
def highest_product_three_ints(list_of_ints):
biggest_int = max(list_of_ints)
list_of_ints.remove(biggest_int)
max_int1 = max(list_of_ints)
list_of_ints.remove(max_int1)
max_int2 = max(list_of_ints)
list_of_ints.remove(max_int2)
if list_of_ints:
min_int1 = min(list_of_ints)
list_of_ints.remove(min_int1)
else:
return biggest_int * max_int1 * max_int2
if list_of_ints:
min_int2 = min(list_of_ints)
#list_of_ints.remove(min_int2)
else:
min_int2 = max_int2
potent_highest_product1 = biggest_int * min_int1 * min_int2
potent_highest_product2 = biggest_int * max_int1 * max_int2
if potent_highest_product1 > potent_highest_product2:
return potent_highest_product1
else:
return potent_highest_product2
print(highest_product_three_ints([3, 4, 5, 6]))
#should return 120
print(highest_product_three_ints([-10, -10, 5, 6]))
#should return 600
print(highest_product_three_ints([-60, -100, -1, -2]))
#should return -120
print(highest_product_three_ints([600, 200, -1, -2]))
#should return 1200
print(highest_product_three_ints([1000, -1000, -1, 1]))
#should return 1000000
print(highest_product_three_ints([1000, -1000, -1, 1, 800]))
#should return 1000000
print(highest_product_three_ints([1000, -1000, -1, 1, -800]))
#should return 800000000
| apache-2.0 |
harshita-gupta/Harvard-FRSEM-Catalog-2016-17 | flask/lib/python2.7/site-packages/requests/packages/chardet/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| mit |
alexryndin/ambari | ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEYARN/package/scripts/yarn_client.py | 3 | 1123 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
# Python Imports
# Local Imports
from resource_management.libraries.script.dummy import Dummy
class YarnClient(Dummy):
"""
Dummy script that simulates a client component.
"""
def __init__(self):
super(YarnClient, self).__init__()
self.component_name = "FAKEYARN_CLIENT"
if __name__ == "__main__":
YarnClient().execute()
| apache-2.0 |
mitocw/latex2edx | latex2edx/test/test_custom_html.py | 1 | 2044 | import os
import unittest
from lxml import etree
from io import StringIO
from latex2edx.main import latex2edx
from latex2edx.test.util import make_temp_directory
class MakeTeX(object):
def __init__(self, tex):
buf = """\\documentclass[12pt]{article}\n\\usepackage{edXpsl}\n\n\\begin{document}"""
buf += tex
buf += "\\end{document}"
self.buf = buf
@property
def fp(self):
return StringIO(self.buf)
class TestCustomHtml(unittest.TestCase):
def test_custom_html1(self):
tex = ('\\begin{edXcourse}{1.00x}{1.00x Fall 2013}[url_name=2013_Fall]\n'
'\n'
'\\begin{edXchapter}{Unit 1}[start="2013-11-22"]\n'
'\n'
'\\begin{edXsection}{Introduction}\n'
'\n'
'\\begin{edXtext}{My Name}[url_name=text_url_name]\n'
'Hello world!\n\n'
'\n'
'\\begin{html}{span}[style="display:none;color:red;border-style:solid" data-x=3]\n'
'this is red text with a border\n'
'\\end{html}\n\n'
'\n'
'\\end{edXtext}\n'
'\\end{edXsection}\n'
'\\end{edXchapter}\n'
'\\end{edXcourse}\n'
)
with make_temp_directory() as tmdir:
os.chdir(tmdir)
fp = MakeTeX(tex).fp
l2e = latex2edx(tmdir + '/test.tex', fp=fp, do_images=False, output_dir=tmdir)
l2e.xhtml2xbundle()
print("xbundle = ")
print(str(l2e.xb))
print()
# self.assertIn(r'<html display_name="My Name" url_name="text_url_name">', str(l2e.xb))
xml = etree.fromstring(str(l2e.xb))
html = xml.find('.//html')
self.assertTrue(html.get('display_name') == 'My Name')
self.assertIn('<span style="display:none;color:red;border-style:solid" data-x="3">this is red text with a border </span>', str(l2e.xb))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
jaberg/nengo | examples/question.py | 2 | 2681 | D=16
subdim=4
N=100
seed=7
import nef.nef_theano as nef
import nef.convolution
import hrr
import math
import random
random.seed(seed)
vocab=hrr.Vocabulary(D,max_similarity=0.1)
net=nef.Network('Question Answering') #Create the network object
net.make('A',1,D,mode='direct') #Make some pseudo populations (so they
#run well on less powerful machines):
#1 neuron, 16 dimensions, direct mode
net.make('B',1,D,mode='direct')
net.make_array('C',N,D/subdim,dimensions=subdim,quick=True,radius=1.0/math.sqrt(D))
#Make a real population, with 100 neurons per
#array element and D/subdim elements in the array
#each with subdim dimensions, set the radius as
#appropriate for multiplying things of this
#dimension
net.make('E',1,D,mode='direct')
net.make('F',1,D,mode='direct')
conv1=nef.convolution.make_convolution(net,'*','A','B','C',N,
quick=True) #Make a convolution network using the construct populations
conv2=nef.convolution.make_convolution(net,'/','C','E','F',N,
invert_second=True,quick=True) #Make a 'correlation' network (by using
#convolution, but inverting the second
#input)
CIRCLE=vocab.parse('CIRCLE').v #Add elements to the vocabulary to use
BLUE=vocab.parse('BLUE').v
RED=vocab.parse('RED').v
SQUARE=vocab.parse('SQUARE').v
ZERO=[0]*D
# Create the inputs
inputA={}
inputA[0.0]=RED
inputA[0.5]=BLUE
inputA[1.0]=RED
inputA[1.5]=BLUE
inputA[2.0]=RED
inputA[2.5]=BLUE
inputA[3.0]=RED
inputA[3.5]=BLUE
inputA[4.0]=RED
inputA[4.5]=BLUE
net.make_input('inputA',inputA)
net.connect('inputA','A')
inputB={}
inputB[0.0]=CIRCLE
inputB[0.5]=SQUARE
inputB[1.0]=CIRCLE
inputB[1.5]=SQUARE
inputB[2.0]=CIRCLE
inputB[2.5]=SQUARE
inputB[3.0]=CIRCLE
inputB[3.5]=SQUARE
inputB[4.0]=CIRCLE
inputB[4.5]=SQUARE
net.make_input('inputB',inputB)
net.connect('inputB','B')
inputE={}
inputE[0.0]=ZERO
inputE[0.2]=CIRCLE
inputE[0.35]=RED
inputE[0.5]=ZERO
inputE[0.7]=SQUARE
inputE[0.85]=BLUE
inputE[1.0]=ZERO
inputE[1.2]=CIRCLE
inputE[1.35]=RED
inputE[1.5]=ZERO
inputE[1.7]=SQUARE
inputE[1.85]=BLUE
inputE[2.0]=ZERO
inputE[2.2]=CIRCLE
inputE[2.35]=RED
inputE[2.5]=ZERO
inputE[2.7]=SQUARE
inputE[2.85]=BLUE
inputE[3.0]=ZERO
inputE[3.2]=CIRCLE
inputE[3.35]=RED
inputE[3.5]=ZERO
inputE[3.7]=SQUARE
inputE[3.85]=BLUE
inputE[4.0]=ZERO
inputE[4.2]=CIRCLE
inputE[4.35]=RED
inputE[4.5]=ZERO
inputE[4.7]=SQUARE
inputE[4.85]=BLUE
net.make_input('inputE',inputE)
net.connect('inputE','E')
net.add_to_nengo()
| mit |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/chardet/universaldetector.py | 244 | 12485 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
"""
Module containing the UniversalDetector detector class, which is the primary
class a user of ``chardet`` should use.
:author: Mark Pilgrim (initial port to Python)
:author: Shy Shalom (original C code)
:author: Dan Blanchard (major refactoring for 3.0)
:author: Ian Cordasco
"""
import codecs
import logging
import re
from .charsetgroupprober import CharSetGroupProber
from .enums import InputState, LanguageFilter, ProbingState
from .escprober import EscCharSetProber
from .latin1prober import Latin1Prober
from .mbcsgroupprober import MBCSGroupProber
from .sbcsgroupprober import SBCSGroupProber
class UniversalDetector(object):
"""
The ``UniversalDetector`` class underlies the ``chardet.detect`` function
and coordinates all of the different charset probers.
To get a ``dict`` containing an encoding and its confidence, you can simply
run:
.. code::
u = UniversalDetector()
u.feed(some_bytes)
u.close()
detected = u.result
"""
MINIMUM_THRESHOLD = 0.20
HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
ESC_DETECTOR = re.compile(b'(\033|~{)')
WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
'iso-8859-2': 'Windows-1250',
'iso-8859-5': 'Windows-1251',
'iso-8859-6': 'Windows-1256',
'iso-8859-7': 'Windows-1253',
'iso-8859-8': 'Windows-1255',
'iso-8859-9': 'Windows-1254',
'iso-8859-13': 'Windows-1257'}
def __init__(self, lang_filter=LanguageFilter.ALL):
self._esc_charset_prober = None
self._charset_probers = []
self.result = None
self.done = None
self._got_data = None
self._input_state = None
self._last_char = None
self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__)
self._has_win_bytes = None
self.reset()
def reset(self):
"""
Reset the UniversalDetector and all of its probers back to their
initial states. This is called by ``__init__``, so you only need to
call this directly in between analyses of different documents.
"""
self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
self.done = False
self._got_data = False
self._has_win_bytes = False
self._input_state = InputState.PURE_ASCII
self._last_char = b''
if self._esc_charset_prober:
self._esc_charset_prober.reset()
for prober in self._charset_probers:
prober.reset()
def feed(self, byte_str):
"""
Takes a chunk of a document and feeds it through all of the relevant
charset probers.
After calling ``feed``, you can check the value of the ``done``
attribute to see if you need to continue feeding the
``UniversalDetector`` more data, or if it has made a prediction
(in the ``result`` attribute).
.. note::
You should always call ``close`` when you're done feeding in your
document if ``done`` is not already ``True``.
"""
if self.done:
return
if not len(byte_str):
return
if not isinstance(byte_str, bytearray):
byte_str = bytearray(byte_str)
# First check for known BOMs, since these are guaranteed to be correct
if not self._got_data:
# If the data starts with BOM, we know it is UTF
if byte_str.startswith(codecs.BOM_UTF8):
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_UTF32_LE,
codecs.BOM_UTF32_BE)):
# FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
# FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16",
'confidence': 1.0,
'language': ''}
self._got_data = True
if self.result['encoding'] is not None:
self.done = True
return
# If none of those matched and we've only see ASCII so far, check
# for high bytes and escape sequences
if self._input_state == InputState.PURE_ASCII:
if self.HIGH_BYTE_DETECTOR.search(byte_str):
self._input_state = InputState.HIGH_BYTE
elif self._input_state == InputState.PURE_ASCII and \
self.ESC_DETECTOR.search(self._last_char + byte_str):
self._input_state = InputState.ESC_ASCII
self._last_char = byte_str[-1:]
# If we've seen escape sequences, use the EscCharSetProber, which
# uses a simple state machine to check for known escape sequences in
# HZ and ISO-2022 encodings, since those are the only encodings that
# use such sequences.
if self._input_state == InputState.ESC_ASCII:
if not self._esc_charset_prober:
self._esc_charset_prober = EscCharSetProber(self.lang_filter)
if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding':
self._esc_charset_prober.charset_name,
'confidence':
self._esc_charset_prober.get_confidence(),
'language':
self._esc_charset_prober.language}
self.done = True
# If we've seen high bytes (i.e., those with values greater than 127),
# we need to do more complicated checks using all our multi-byte and
# single-byte probers that are left. The single-byte probers
# use character bigram distributions to determine the encoding, whereas
# the multi-byte probers use a combination of character unigram and
# bigram distributions.
elif self._input_state == InputState.HIGH_BYTE:
if not self._charset_probers:
self._charset_probers = [MBCSGroupProber(self.lang_filter)]
# If we're checking non-CJK encodings, use single-byte prober
if self.lang_filter & LanguageFilter.NON_CJK:
self._charset_probers.append(SBCSGroupProber())
self._charset_probers.append(Latin1Prober())
for prober in self._charset_probers:
if prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding': prober.charset_name,
'confidence': prober.get_confidence(),
'language': prober.language}
self.done = True
break
if self.WIN_BYTE_DETECTOR.search(byte_str):
self._has_win_bytes = True
def close(self):
"""
Stop analyzing the current document and come up with a final
prediction.
:returns: The ``result`` attribute, a ``dict`` with the keys
`encoding`, `confidence`, and `language`.
"""
# Don't bother with checks if we're already done
if self.done:
return self.result
self.done = True
if not self._got_data:
self.logger.debug('no data received!')
# Default to ASCII if it is all we've seen so far
elif self._input_state == InputState.PURE_ASCII:
self.result = {'encoding': 'ascii',
'confidence': 1.0,
'language': ''}
# If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
elif self._input_state == InputState.HIGH_BYTE:
prober_confidence = None
max_prober_confidence = 0.0
max_prober = None
for prober in self._charset_probers:
if not prober:
continue
prober_confidence = prober.get_confidence()
if prober_confidence > max_prober_confidence:
max_prober_confidence = prober_confidence
max_prober = prober
if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
charset_name = max_prober.charset_name
lower_charset_name = max_prober.charset_name.lower()
confidence = max_prober.get_confidence()
# Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes
if lower_charset_name.startswith('iso-8859'):
if self._has_win_bytes:
charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
charset_name)
self.result = {'encoding': charset_name,
'confidence': confidence,
'language': max_prober.language}
# Log all prober confidences if none met MINIMUM_THRESHOLD
if self.logger.getEffectiveLevel() == logging.DEBUG:
if self.result['encoding'] is None:
self.logger.debug('no probers hit minimum threshold')
for group_prober in self._charset_probers:
if not group_prober:
continue
if isinstance(group_prober, CharSetGroupProber):
for prober in group_prober.probers:
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
else:
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
return self.result
| apache-2.0 |
MattCrystal/yolo-computing-machine | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
Twistbioscience/incubator-airflow | airflow/contrib/operators/emr_terminate_job_flow_operator.py | 16 | 1879 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
from airflow.exceptions import AirflowException
from airflow.contrib.hooks.emr_hook import EmrHook
class EmrTerminateJobFlowOperator(BaseOperator):
"""
Operator to terminate EMR JobFlows.
:param job_flow_id: id of the JobFlow to terminate
:type job_flow_name: str
:param aws_conn_id: aws connection to uses
:type aws_conn_id: str
"""
template_fields = ['job_flow_id']
template_ext = ()
ui_color = '#f9c915'
@apply_defaults
def __init__(
self,
job_flow_id,
aws_conn_id='s3_default',
*args, **kwargs):
super(EmrTerminateJobFlowOperator, self).__init__(*args, **kwargs)
self.job_flow_id = job_flow_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
emr = EmrHook(aws_conn_id=self.aws_conn_id).get_conn()
self.log.info('Terminating JobFlow %s', self.job_flow_id)
response = emr.terminate_job_flows(JobFlowIds=[self.job_flow_id])
if not response['ResponseMetadata']['HTTPStatusCode'] == 200:
raise AirflowException('JobFlow termination failed: %s' % response)
else:
self.log.info('JobFlow with id %s terminated', self.job_flow_id)
| apache-2.0 |
jendap/tensorflow | tensorflow/python/ops/tensor_array_grad.py | 32 | 9133 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in tensor_array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops
# TODO(b/31222613): These ops may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable("TensorArray")
ops.NotDifferentiable("TensorArrayGrad")
ops.NotDifferentiable("TensorArraySize")
ops.NotDifferentiable("TensorArrayClose")
ops.NotDifferentiable("TensorArrayV2")
ops.NotDifferentiable("TensorArrayGradV2")
ops.NotDifferentiable("TensorArraySizeV2")
ops.NotDifferentiable("TensorArrayCloseV2")
ops.NotDifferentiable("TensorArrayV3")
ops.NotDifferentiable("TensorArrayGradV3")
ops.NotDifferentiable("TensorArrayGradWithShape")
ops.NotDifferentiable("TensorArraySizeV3")
ops.NotDifferentiable("TensorArrayCloseV3")
def _GetGradSource(op_or_tensor):
"""Identify which call to tf.gradients created this gradient op or tensor.
TensorArray gradient calls use an accumulator TensorArray object. If
multiple gradients are calculated and run in the same session, the multiple
gradient nodes may accidentally flow throuth the same accumulator TensorArray.
This double counting breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular
TensorArray*Grad is being called in, by looking at the input gradient
tensor's name, and create or lookup an accumulator gradient TensorArray
associated with this specific call. This solves any confusion and ensures
different gradients from the same forward graph get their own accumulators.
This function creates the unique label associated with the tf.gradients call
that is used to create the gradient TensorArray.
Args:
op_or_tensor: `Tensor` or `Operation` which is an input to a
TensorArray*Grad call.
Returns:
A python string, the unique label associated with this particular
gradients calculation.
Raises:
ValueError: If not called within a gradients calculation.
"""
name_tokens = op_or_tensor.name.split("/")
grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith("gradients")]
if not grad_pos:
raise ValueError(
"Expected op/tensor name to start with gradients (excluding scope)"
", got: %s" % op_or_tensor.name)
return "/".join(name_tokens[:grad_pos[-1] + 1])
@ops.RegisterGradient("TensorArrayRead")
@ops.RegisterGradient("TensorArrayReadV2")
@ops.RegisterGradient("TensorArrayReadV3")
def _TensorArrayReadGrad(op, grad):
"""Gradient for TensorArrayRead.
Args:
op: Forward TensorArrayRead op.
grad: Gradient `Tensor` to TensorArrayRead.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
index = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
w_g = g.write(index, grad)
return [None, None, w_g.flow]
@ops.RegisterGradient("TensorArrayWrite")
@ops.RegisterGradient("TensorArrayWriteV2")
@ops.RegisterGradient("TensorArrayWriteV3")
def _TensorArrayWriteGrad(op, flow):
"""Gradient for TensorArrayWrite.
Args:
op: Forward TensorArrayWrite op.
flow: Gradient `Tensor` flow to TensorArrayWrite.
Returns:
A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
"""
# handle is the output store_handle of TensorArrayReadGrad or
# the handle output of TensorArrayWriteGrad. we must use this one.
handle = op.inputs[0]
index = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.read(index)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayGather")
@ops.RegisterGradient("TensorArrayGatherV2")
@ops.RegisterGradient("TensorArrayGatherV3")
def _TensorArrayGatherGrad(op, grad):
"""Gradient for TensorArrayGather.
Args:
op: Forward TensorArrayGather op.
grad: Gradient `Tensor` to TensorArrayGather.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
indices = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
u_g = g.scatter(indices, grad)
return [None, None, u_g.flow]
@ops.RegisterGradient("TensorArrayScatter")
@ops.RegisterGradient("TensorArrayScatterV2")
@ops.RegisterGradient("TensorArrayScatterV3")
def _TensorArrayScatterGrad(op, flow):
"""Gradient for TensorArrayScatter.
Args:
op: Forward TensorArrayScatter op.
flow: Gradient `Tensor` flow to TensorArrayScatter.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
indices = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.gather(indices)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayConcat")
@ops.RegisterGradient("TensorArrayConcatV2")
@ops.RegisterGradient("TensorArrayConcatV3")
def _TensorArrayConcatGrad(op, grad, unused_lengths_grad):
"""Gradient for TensorArrayConcat.
Args:
op: Forward TensorArrayConcat op.
grad: Gradient `Tensor` to TensorArrayConcat.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
flow = op.inputs[1]
lengths = op.outputs[1]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
u_g = g.split(grad, lengths=lengths)
# handle, flow_in
return [None, u_g.flow]
@ops.RegisterGradient("TensorArraySplit")
@ops.RegisterGradient("TensorArraySplitV2")
@ops.RegisterGradient("TensorArraySplitV3")
def _TensorArraySplitGrad(op, flow):
"""Gradient for TensorArraySplit.
Args:
op: Forward TensorArraySplit op.
flow: Gradient `Tensor` flow to TensorArraySplit.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.concat()
# handle, value, lengths, flow_in
return [None, grad, None, flow]
| apache-2.0 |
ville-k/tensorflow | tensorflow/contrib/layers/python/layers/feature_column_test.py | 22 | 45974 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import os
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
def _sparse_id_tensor(shape, vocab_size, seed=112123):
# Returns a arbitrary `SparseTensor` with given shape and vocab size.
np.random.seed(seed)
indices = np.array(list(itertools.product(*[range(s) for s in shape])))
# In order to create some sparsity, we include a value outside the vocab.
values = np.random.randint(0, vocab_size + 1, size=np.prod(shape))
# Remove entries outside the vocabulary.
keep = values < vocab_size
indices = indices[keep]
values = values[keep]
return sparse_tensor_lib.SparseTensor(
indices=indices, values=values, dense_shape=shape)
class FeatureColumnTest(test.TestCase):
def testImmutability(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
with self.assertRaises(AttributeError):
a.column_name = "bbb"
def testSparseColumnWithHashBucket(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.string)
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.int64)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.float32)
def testSparseColumnWithVocabularyFile(self):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454)
self.assertEqual(b.dtype, dtypes.string)
self.assertEqual(b.lookup_config.vocab_size, 454)
self.assertEqual(b.lookup_config.vocabulary_file, "a_file")
with self.assertRaises(ValueError):
# Vocabulary size should be defined if vocabulary_file is used.
fc.sparse_column_with_vocabulary_file("bbb", vocabulary_file="somefile")
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.int64)
self.assertEqual(b.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.float32)
def testWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertEqual(weighted_ids.name, "ids_weighted_by_weights")
def testWeightedSparseColumnDeepCopy(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted = fc.weighted_sparse_column(ids, "weights")
weighted_copy = copy.deepcopy(weighted)
self.assertEqual(weighted_copy.sparse_id_column.name, "ids")
self.assertEqual(weighted_copy.weight_column_name, "weights")
self.assertEqual(weighted_copy.name, "ids_weighted_by_weights")
def testEmbeddingColumn(self):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, combiner="sum")
b = fc.embedding_column(a, dimension=4, combiner="mean")
self.assertEqual(b.sparse_id_column.name, "aaa")
self.assertEqual(b.dimension, 4)
self.assertEqual(b.combiner, "mean")
def testEmbeddingColumnDeepCopy(self):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, combiner="sum")
column = fc.embedding_column(a, dimension=4, combiner="mean")
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.name, "aaa_embedding")
self.assertEqual(column_copy.sparse_id_column.name, "aaa")
self.assertEqual(column_copy.dimension, 4)
self.assertEqual(column_copy.combiner, "mean")
def testScatteredEmbeddingColumn(self):
column = fc.scattered_embedding_column(
"aaa", size=100, dimension=10, hash_key=1)
self.assertEqual(column.column_name, "aaa")
self.assertEqual(column.size, 100)
self.assertEqual(column.dimension, 10)
self.assertEqual(column.hash_key, 1)
self.assertEqual(column.name, "aaa_scattered_embedding")
def testScatteredEmbeddingColumnDeepCopy(self):
column = fc.scattered_embedding_column(
"aaa", size=100, dimension=10, hash_key=1)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.column_name, "aaa")
self.assertEqual(column_copy.size, 100)
self.assertEqual(column_copy.dimension, 10)
self.assertEqual(column_copy.hash_key, 1)
self.assertEqual(column_copy.name, "aaa_scattered_embedding")
def testSharedEmbeddingColumn(self):
a1 = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
a2 = fc.sparse_column_with_keys("a2", ["marlo", "omar", "stringer"])
b = fc.shared_embedding_columns([a1, a2], dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name, "a1_a2_shared_embedding")
self.assertEqual(b[1].shared_embedding_name, "a1_a2_shared_embedding")
# Create a sparse id tensor for a1.
input_tensor_c1 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
# Create a sparse id tensor for a2.
input_tensor_c2 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
with variable_scope.variable_scope("run_1"):
b1 = feature_column_ops.input_from_feature_columns({
b[0]: input_tensor_c1
}, [b[0]])
b2 = feature_column_ops.input_from_feature_columns({
b[1]: input_tensor_c2
}, [b[1]])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
b1_value = b1.eval()
b2_value = b2.eval()
for i in range(len(b1_value)):
self.assertAllClose(b1_value[i], b2_value[i])
# Test the case when a shared_embedding_name is explicitly specified.
d = fc.shared_embedding_columns(
[a1, a2],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
# a3 is a completely different sparse column with a1 and a2, but since the
# same shared_embedding_name is passed in, a3 will have the same embedding
# as a1 and a2
a3 = fc.sparse_column_with_keys("a3", [42, 1, -1000], dtype=dtypes.int32)
e = fc.shared_embedding_columns(
[a3],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
with variable_scope.variable_scope("run_2"):
d1 = feature_column_ops.input_from_feature_columns({
d[0]: input_tensor_c1
}, [d[0]])
e1 = feature_column_ops.input_from_feature_columns({
e[0]: input_tensor_c1
}, [e[0]])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
d1_value = d1.eval()
e1_value = e1.eval()
for i in range(len(d1_value)):
self.assertAllClose(d1_value[i], e1_value[i])
def testSharedEmbeddingColumnDeterminism(self):
# Tests determinism in auto-generated shared_embedding_name.
sparse_id_columns = tuple([
fc.sparse_column_with_keys(k, ["foo", "bar"])
for k in ["07", "02", "00", "03", "05", "01", "09", "06", "04", "08"]
])
output = fc.shared_embedding_columns(
sparse_id_columns, dimension=2, combiner="mean")
self.assertEqual(len(output), 10)
for x in output:
self.assertEqual(x.shared_embedding_name,
"00_01_02_plus_7_others_shared_embedding")
def testSharedEmbeddingColumnErrors(self):
# Tries passing in a string.
with self.assertRaises(TypeError):
invalid_string = "Invalid string."
fc.shared_embedding_columns(invalid_string, dimension=2, combiner="mean")
# Tries passing in a set of sparse columns.
with self.assertRaises(TypeError):
invalid_set = set([
fc.sparse_column_with_keys("a", ["foo", "bar"]),
fc.sparse_column_with_keys("b", ["foo", "bar"]),
])
fc.shared_embedding_columns(invalid_set, dimension=2, combiner="mean")
def testSharedEmbeddingColumnDeepCopy(self):
a1 = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
a2 = fc.sparse_column_with_keys("a2", ["marlo", "omar", "stringer"])
columns = fc.shared_embedding_columns(
[a1, a2], dimension=4, combiner="mean")
columns_copy = copy.deepcopy(columns)
self.assertEqual(
columns_copy[0].shared_embedding_name, "a1_a2_shared_embedding")
self.assertEqual(
columns_copy[1].shared_embedding_name, "a1_a2_shared_embedding")
def testOneHotColumn(self):
a = fc.sparse_column_with_keys("a", ["a", "b", "c", "d"])
onehot_a = fc.one_hot_column(a)
self.assertEqual(onehot_a.sparse_id_column.name, "a")
self.assertEqual(onehot_a.length, 4)
b = fc.sparse_column_with_hash_bucket(
"b", hash_bucket_size=100, combiner="sum")
onehot_b = fc.one_hot_column(b)
self.assertEqual(onehot_b.sparse_id_column.name, "b")
self.assertEqual(onehot_b.length, 100)
def testOneHotReshaping(self):
"""Tests reshaping behavior of `OneHotColumn`."""
id_tensor_shape = [3, 2, 4, 5]
sparse_column = fc.sparse_column_with_keys(
"animals", ["squirrel", "moose", "dragon", "octopus"])
one_hot = fc.one_hot_column(sparse_column)
vocab_size = len(sparse_column.lookup_config.keys)
id_tensor = _sparse_id_tensor(id_tensor_shape, vocab_size)
for output_rank in range(1, len(id_tensor_shape) + 1):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
one_hot_output = one_hot._to_dnn_input_layer(
id_tensor, output_rank=output_rank)
with self.test_session() as sess:
one_hot_value = sess.run(one_hot_output)
expected_shape = (id_tensor_shape[:output_rank - 1] + [vocab_size])
self.assertEquals(expected_shape, list(one_hot_value.shape))
def testOneHotColumnForWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
one_hot = fc.one_hot_column(weighted_ids)
self.assertEqual(one_hot.sparse_id_column.name, "ids_weighted_by_weights")
self.assertEqual(one_hot.length, 3)
def testOneHotColumnDeepCopy(self):
a = fc.sparse_column_with_keys("a", ["a", "b", "c", "d"])
column = fc.one_hot_column(a)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.sparse_id_column.name, "a")
self.assertEqual(column.name, "a_one_hot")
self.assertEqual(column.length, 4)
def testRealValuedVarLenColumn(self):
c = fc._real_valued_var_len_column("ccc", is_sparse=True)
self.assertTrue(c.is_sparse)
self.assertTrue(c.default_value is None)
# default_value is an integer.
c5 = fc._real_valued_var_len_column("c5", default_value=2)
self.assertEqual(c5.default_value, 2)
# default_value is a float.
d4 = fc._real_valued_var_len_column("d4", is_sparse=True)
self.assertEqual(d4.default_value, None)
self.assertEqual(d4.is_sparse, True)
# Default value is a list but dimension is None.
with self.assertRaisesRegexp(ValueError,
"Only scalar default value.*"):
fc._real_valued_var_len_column("g5", default_value=[2., 3.])
def testRealValuedVarLenColumnDtypes(self):
rvc = fc._real_valued_var_len_column("rvc", is_sparse=True)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.float32)
}, rvc.config)
rvc = fc._real_valued_var_len_column("rvc", default_value=0,
is_sparse=False)
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenSequenceFeature(shape=[],
dtype=dtypes.float32,
allow_missing=True,
default_value=0.0)
}, rvc.config)
rvc = fc._real_valued_var_len_column("rvc", dtype=dtypes.int32,
default_value=0, is_sparse=True)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, rvc.config)
with self.assertRaisesRegexp(TypeError,
"dtype must be convertible to float"):
fc._real_valued_var_len_column("rvc", dtype=dtypes.string,
default_value="", is_sparse=True)
def testRealValuedColumn(self):
a = fc.real_valued_column("aaa")
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dimension, 1)
b = fc.real_valued_column("bbb", 10)
self.assertEqual(b.dimension, 10)
self.assertTrue(b.default_value is None)
with self.assertRaisesRegexp(TypeError, "dimension must be an integer"):
fc.real_valued_column("d3", dimension=1.0)
with self.assertRaisesRegexp(ValueError,
"dimension must be greater than 0"):
fc.real_valued_column("d3", dimension=0)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("d3", dtype=dtypes.string)
# default_value is an integer.
c1 = fc.real_valued_column("c1", default_value=2)
self.assertListEqual(list(c1.default_value), [2.])
c2 = fc.real_valued_column("c2", default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c2.default_value), [2])
c3 = fc.real_valued_column("c3", dimension=4, default_value=2)
self.assertListEqual(list(c3.default_value), [2, 2, 2, 2])
c4 = fc.real_valued_column(
"c4", dimension=4, default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c4.default_value), [2, 2, 2, 2])
# default_value is a float.
d1 = fc.real_valued_column("d1", default_value=2.)
self.assertListEqual(list(d1.default_value), [2.])
d2 = fc.real_valued_column("d2", dimension=4, default_value=2.)
self.assertListEqual(list(d2.default_value), [2., 2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("d3", default_value=2., dtype=dtypes.int32)
# default_value is neither integer nor float.
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", default_value="string")
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", dimension=3, default_value=[1, 3., "string"])
# default_value is a list of integers.
f1 = fc.real_valued_column("f1", default_value=[2])
self.assertListEqual(list(f1.default_value), [2])
f2 = fc.real_valued_column("f2", dimension=3, default_value=[2, 2, 2])
self.assertListEqual(list(f2.default_value), [2., 2., 2.])
f3 = fc.real_valued_column(
"f3", dimension=3, default_value=[2, 2, 2], dtype=dtypes.int32)
self.assertListEqual(list(f3.default_value), [2, 2, 2])
# default_value is a list of floats.
g1 = fc.real_valued_column("g1", default_value=[2.])
self.assertListEqual(list(g1.default_value), [2.])
g2 = fc.real_valued_column("g2", dimension=3, default_value=[2., 2, 2])
self.assertListEqual(list(g2.default_value), [2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("g3", default_value=[2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(
ValueError, "The length of default_value must be equal to dimension"):
fc.real_valued_column("g4", dimension=3, default_value=[2.])
# Test that the normalizer_fn gets stored for a real_valued_column
normalizer = lambda x: x - 1
h1 = fc.real_valued_column("h1", normalizer=normalizer)
self.assertEqual(normalizer(10), h1.normalizer_fn(10))
# Test that normalizer is not stored within key
self.assertFalse("normalizer" in g1.key)
self.assertFalse("normalizer" in g2.key)
self.assertFalse("normalizer" in h1.key)
def testRealValuedColumnReshaping(self):
"""Tests reshaping behavior of `RealValuedColumn`."""
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
np.random.seed(2222)
input_shape = [batch_size, sequence_length] + dimensions
real_valued_input = np.random.rand(*input_shape)
real_valued_column = fc.real_valued_column("values")
for output_rank in range(1, 3 + len(dimensions)):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
real_valued_output = real_valued_column._to_dnn_input_layer(
constant_op.constant(
real_valued_input, dtype=dtypes.float32),
output_rank=output_rank)
with self.test_session() as sess:
real_valued_eval = sess.run(real_valued_output)
expected_shape = (input_shape[:output_rank - 1] +
[np.prod(input_shape[output_rank - 1:])])
self.assertEquals(expected_shape, list(real_valued_eval.shape))
def testRealValuedColumnDensification(self):
"""Tests densification behavior of `RealValuedColumn`."""
# No default value, dimension 1 float.
real_valued_column = fc._real_valued_var_len_column(
"sparse_real_valued1", is_sparse=True)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
with self.assertRaisesRegexp(
ValueError, "Set is_sparse to False"):
real_valued_column._to_dnn_input_layer(sparse_tensor)
def testRealValuedColumnDeepCopy(self):
column = fc.real_valued_column(
"aaa", dimension=3, default_value=[1, 2, 3], dtype=dtypes.int32)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.name, "aaa")
self.assertEqual(column_copy.dimension, 3)
self.assertEqual(column_copy.default_value, (1, 2, 3))
def testBucketizedColumnNameEndsWithUnderscoreBucketized(self):
a = fc.bucketized_column(fc.real_valued_column("aaa"), [0, 4])
self.assertEqual(a.name, "aaa_bucketized")
def testBucketizedColumnRequiresRealValuedColumn(self):
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column("bbb", [0])
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column(
fc.sparse_column_with_integerized_feature(
column_name="bbb", bucket_size=10), [0])
def testBucketizedColumnRequiresRealValuedColumnDimension(self):
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn.*"):
fc.bucketized_column(fc._real_valued_var_len_column("bbb",
is_sparse=True),
[0])
def testBucketizedColumnRequiresSortedBuckets(self):
with self.assertRaisesRegexp(ValueError,
"boundaries must be a sorted list"):
fc.bucketized_column(fc.real_valued_column("ccc"), [5, 0, 4])
def testBucketizedColumnWithSameBucketBoundaries(self):
a_bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(a_bucketized.name, "a_bucketized")
self.assertTupleEqual(a_bucketized.boundaries, (1., 2., 3.))
def testBucketizedColumnDeepCopy(self):
"""Tests that we can do a deepcopy of a bucketized column.
This test requires that the bucketized column also accept boundaries
as tuples.
"""
bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(bucketized.name, "a_bucketized")
self.assertTupleEqual(bucketized.boundaries, (1., 2., 3.))
bucketized_copy = copy.deepcopy(bucketized)
self.assertEqual(bucketized_copy.name, "a_bucketized")
self.assertTupleEqual(bucketized_copy.boundaries, (1., 2., 3.))
def testCrossedColumnNameCreatesSortedNames(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
bucket = fc.bucketized_column(fc.real_valued_column("cost"), [0, 4])
crossed = fc.crossed_column(set([b, bucket, a]), hash_bucket_size=10000)
self.assertEqual("aaa_X_bbb_X_cost_bucketized", crossed.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed.columns[0].name)
self.assertEqual("bbb", crossed.columns[1].name)
self.assertEqual("cost_bucketized", crossed.columns[2].name)
def testCrossedColumnNotSupportRealValuedColumn(self):
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
with self.assertRaisesRegexp(
TypeError, "columns must be a set of _SparseColumn, _CrossedColumn, "
"or _BucketizedColumn instances"):
fc.crossed_column(
set([b, fc.real_valued_column("real")]), hash_bucket_size=10000)
def testCrossedColumnDeepCopy(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
bucket = fc.bucketized_column(fc.real_valued_column("cost"), [0, 4])
crossed = fc.crossed_column(set([b, bucket, a]), hash_bucket_size=10000)
crossed_copy = copy.deepcopy(crossed)
self.assertEqual("aaa_X_bbb_X_cost_bucketized", crossed_copy.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed_copy.columns[0].name)
self.assertEqual("bbb", crossed_copy.columns[1].name)
self.assertEqual("cost_bucketized", crossed_copy.columns[2].name)
def testFloat32WeightedSparseInt32ColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", [42, 1, -1000], dtype=dtypes.int32)
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.int32),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
def testFloat32WeightedSparseStringColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
def testInt32WeightedSparseStringColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testInt32WeightedSparseInt64ColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", [42, 1, -1000], dtype=dtypes.int64)
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.int64),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testRealValuedColumnDtypes(self):
rvc = fc.real_valued_column("rvc")
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)
},
rvc.config)
rvc = fc.real_valued_column("rvc", dtype=dtypes.int32)
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.int32)
},
rvc.config)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("rvc", dtype=dtypes.string)
def testSparseColumnDtypes(self):
sc = fc.sparse_column_with_integerized_feature("sc", 10)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
sc = fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.int32)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, sc.config)
with self.assertRaisesRegexp(ValueError, "dtype must be an integer"):
fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.float32)
def testSparseColumnSingleBucket(self):
sc = fc.sparse_column_with_integerized_feature("sc", 1)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
self.assertEqual(1, sc._wide_embedding_lookup_arguments(None).vocab_size)
def testSparseColumnAcceptsDenseScalar(self):
"""Tests that `SparseColumn`s accept dense scalar inputs."""
batch_size = 4
dense_scalar_input = [1, 2, 3, 4]
sparse_column = fc.sparse_column_with_integerized_feature("values", 10)
features = {"values":
constant_op.constant(dense_scalar_input, dtype=dtypes.int64)}
sparse_column.insert_transformed_feature(features)
sparse_output = features[sparse_column]
expected_shape = [batch_size, 1]
with self.test_session() as sess:
sparse_result = sess.run(sparse_output)
self.assertEquals(expected_shape, list(sparse_result.dense_shape))
def testSparseColumnIntegerizedDeepCopy(self):
"""Tests deepcopy of sparse_column_with_integerized_feature."""
column = fc.sparse_column_with_integerized_feature("a", 10)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(10, column_copy.bucket_size)
self.assertTrue(column_copy.is_integerized)
def testSparseColumnHashBucketDeepCopy(self):
"""Tests deepcopy of sparse_column_with_hash_bucket."""
column = fc.sparse_column_with_hash_bucket("a", 10)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(10, column_copy.bucket_size)
self.assertFalse(column_copy.is_integerized)
def testSparseColumnKeysDeepCopy(self):
"""Tests deepcopy of sparse_column_with_keys."""
column = fc.sparse_column_with_keys(
"a", keys=["key0", "key1", "key2"])
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(
fc._SparseIdLookupConfig( # pylint: disable=protected-access
keys=("key0", "key1", "key2"),
vocab_size=3,
default_value=-1),
column_copy.lookup_config)
self.assertFalse(column_copy.is_integerized)
def testSparseColumnVocabularyDeepCopy(self):
"""Tests deepcopy of sparse_column_with_vocabulary_file."""
column = fc.sparse_column_with_vocabulary_file(
"a", vocabulary_file="path_to_file", vocab_size=3)
self.assertEqual("a", column.name)
column_copy = copy.deepcopy(column)
self.assertEqual("a", column_copy.name)
self.assertEqual(
fc._SparseIdLookupConfig( # pylint: disable=protected-access
vocabulary_file="path_to_file",
num_oov_buckets=0,
vocab_size=3,
default_value=-1),
column_copy.lookup_config)
self.assertFalse(column_copy.is_integerized)
def testCreateFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
str_sparse_id_col = fc.sparse_column_with_keys(
"str_id_column", ["marlo", "omar", "stringer"])
int32_sparse_id_col = fc.sparse_column_with_keys(
"int32_id_column", [42, 1, -1000], dtype=dtypes.int32)
int64_sparse_id_col = fc.sparse_column_with_keys(
"int64_id_column", [42, 1, -1000], dtype=dtypes.int64)
weighted_id_col = fc.weighted_sparse_column(str_sparse_id_col,
"str_id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column1")
real_valued_col2 = fc.real_valued_column("real_valued_column2", 5)
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
bucketized_col2 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization2", 4),
[0, 4])
a = fc.sparse_column_with_hash_bucket("cross_aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("cross_bbb", hash_bucket_size=100)
cross_col = fc.crossed_column(set([a, b]), hash_bucket_size=10000)
one_hot_col = fc.one_hot_column(fc.sparse_column_with_hash_bucket(
"sparse_column_for_one_hot", hash_bucket_size=100))
scattered_embedding_col = fc.scattered_embedding_column(
"scattered_embedding_column", size=100, dimension=10, hash_key=1)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, int32_sparse_id_col,
int64_sparse_id_col, real_valued_col1, real_valued_col2,
bucketized_col1, bucketized_col2, cross_col, one_hot_col,
scattered_embedding_col
])
expected_config = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"str_id_column":
parsing_ops.VarLenFeature(dtypes.string),
"int32_id_column":
parsing_ops.VarLenFeature(dtypes.int32),
"int64_id_column":
parsing_ops.VarLenFeature(dtypes.int64),
"str_id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5], dtype=dtypes.float32),
"real_valued_column_for_bucketization1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32),
"real_valued_column_for_bucketization2":
parsing_ops.FixedLenFeature(
[4], dtype=dtypes.float32),
"cross_aaa":
parsing_ops.VarLenFeature(dtypes.string),
"cross_bbb":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_one_hot":
parsing_ops.VarLenFeature(dtypes.string),
"scattered_embedding_column":
parsing_ops.VarLenFeature(dtypes.string),
}
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual(expected_config, config)
# Tests that contrib feature columns work with core library:
config_core = fc_core.make_parse_example_spec(feature_columns)
self.assertDictEqual(expected_config, config_core)
# Test that the same config is parsed out if we pass a dictionary.
feature_columns_dict = {
str(i): val
for i, val in enumerate(feature_columns)
}
config = fc.create_feature_spec_for_parsing(feature_columns_dict)
self.assertDictEqual(expected_config, config)
def testCreateFeatureSpec_ExperimentalColumns(self):
real_valued_col0 = fc._real_valued_var_len_column(
"real_valued_column0", is_sparse=True)
real_valued_col1 = fc._real_valued_var_len_column(
"real_valued_column1", dtype=dtypes.int64, default_value=0,
is_sparse=False)
feature_columns = set([real_valued_col0, real_valued_col1])
expected_config = {
"real_valued_column0": parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_column1":
parsing_ops.FixedLenSequenceFeature(
[], dtype=dtypes.int64, allow_missing=True, default_value=0),
}
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual(expected_config, config)
def testCreateFeatureSpec_RealValuedColumnWithDefaultValue(self):
real_valued_col1 = fc.real_valued_column(
"real_valued_column1", default_value=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_column2", 5, default_value=4)
real_valued_col3 = fc.real_valued_column(
"real_valued_column3", default_value=[8])
real_valued_col4 = fc.real_valued_column(
"real_valued_column4", 3, default_value=[1, 0, 6])
real_valued_col5 = fc._real_valued_var_len_column(
"real_valued_column5", default_value=2, is_sparse=True)
real_valued_col6 = fc._real_valued_var_len_column(
"real_valued_column6", dtype=dtypes.int64, default_value=1,
is_sparse=False)
feature_columns = [
real_valued_col1, real_valued_col2, real_valued_col3, real_valued_col4,
real_valued_col5, real_valued_col6
]
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertEqual(6, len(config))
self.assertDictEqual(
{
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[2.]),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5],
dtype=dtypes.float32,
default_value=[4., 4., 4., 4., 4.]),
"real_valued_column3":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[8.]),
"real_valued_column4":
parsing_ops.FixedLenFeature(
[3], dtype=dtypes.float32, default_value=[1., 0., 6.]),
"real_valued_column5":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_column6":
parsing_ops.FixedLenSequenceFeature(
[], dtype=dtypes.int64, allow_missing=True,
default_value=1)
},
config)
def testCreateSequenceFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
sparse_id_col = fc.sparse_column_with_keys("id_column",
["marlo", "omar", "stringer"])
weighted_id_col = fc.weighted_sparse_column(sparse_id_col,
"id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column", dimension=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_default_column", dimension=5, default_value=3.0)
real_valued_col3 = fc._real_valued_var_len_column(
"real_valued_var_len_column", default_value=3.0, is_sparse=True)
real_valued_col4 = fc._real_valued_var_len_column(
"real_valued_var_len_dense_column", default_value=4.0, is_sparse=False)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, real_valued_col1,
real_valued_col2, real_valued_col3, real_valued_col4
])
feature_spec = fc._create_sequence_feature_spec_for_parsing(feature_columns)
expected_feature_spec = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"id_column":
parsing_ops.VarLenFeature(dtypes.string),
"id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column":
parsing_ops.FixedLenSequenceFeature(
shape=[2], dtype=dtypes.float32, allow_missing=False),
"real_valued_default_column":
parsing_ops.FixedLenSequenceFeature(
shape=[5], dtype=dtypes.float32, allow_missing=True),
"real_valued_var_len_column":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_var_len_dense_column":
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.float32, allow_missing=True,
default_value=4.0),
}
self.assertDictEqual(expected_feature_spec, feature_spec)
def testMakePlaceHolderTensorsForBaseFeatures(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
real_valued_col = fc.real_valued_column("real_valued_column", 5)
vlen_real_valued_col = fc._real_valued_var_len_column(
"vlen_real_valued_column", is_sparse=True)
bucketized_col = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization"), [0, 4])
feature_columns = set(
[sparse_col, real_valued_col, vlen_real_valued_col, bucketized_col])
placeholders = (
fc.make_place_holder_tensors_for_base_features(feature_columns))
self.assertEqual(4, len(placeholders))
self.assertTrue(
isinstance(placeholders["sparse_column"],
sparse_tensor_lib.SparseTensor))
self.assertTrue(
isinstance(placeholders["vlen_real_valued_column"],
sparse_tensor_lib.SparseTensor))
placeholder = placeholders["real_valued_column"]
self.assertGreaterEqual(
placeholder.name.find(u"Placeholder_real_valued_column"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 5], placeholder.get_shape().as_list())
placeholder = placeholders["real_valued_column_for_bucketization"]
self.assertGreaterEqual(
placeholder.name.find(
u"Placeholder_real_valued_column_for_bucketization"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 1], placeholder.get_shape().as_list())
def testInitEmbeddingColumnWeightsFromCkpt(self):
sparse_col = fc.sparse_column_with_hash_bucket(
column_name="object_in_image", hash_bucket_size=4)
# Create _EmbeddingColumn which randomly initializes embedding of size
# [4, 16].
embedding_col = fc.embedding_column(sparse_col, dimension=16)
# Creating a SparseTensor which has all the ids possible for the given
# vocab.
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'layers.input_from_feature_columns' will create the embedding
# variable. Creating under scope 'run_1' so as to prevent name conflicts
# when creating embedding variable for 'embedding_column_pretrained'.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(embedding_col.name):
# This will return a [4, 16] tensor which is same as embedding variable.
embeddings = feature_column_ops.input_from_feature_columns({
embedding_col: input_tensor
}, [embedding_col])
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_embedding_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
saved_embedding = embeddings.eval()
save.save(sess, checkpoint_path)
embedding_col_initialized = fc.embedding_column(
sparse_id_column=sparse_col,
dimension=16,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/object_in_image_embedding/"
"input_from_feature_columns/object"
"_in_image_embedding/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the embedding from provided checkpoint and return a
# [4, 16] tensor which is same as embedding variable. Since we didn't
# modify embeddings, this should be same as 'saved_embedding'.
pretrained_embeddings = feature_column_ops.input_from_feature_columns({
embedding_col_initialized: input_tensor
}, [embedding_col_initialized])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_embedding = pretrained_embeddings.eval()
self.assertAllClose(saved_embedding, loaded_embedding)
def testInitCrossedColumnWeightsFromCkpt(self):
sparse_col_1 = fc.sparse_column_with_hash_bucket(
column_name="col_1", hash_bucket_size=4)
sparse_col_2 = fc.sparse_column_with_keys(
column_name="col_2", keys=("foo", "bar", "baz"))
sparse_col_3 = fc.sparse_column_with_keys(
column_name="col_3", keys=(42, 1, -1000), dtype=dtypes.int64)
crossed_col = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2, sparse_col_3], hash_bucket_size=4)
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'weighted_sum_from_feature_columns' will create the crossed
# column weights variable.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(crossed_col.name):
# Returns looked up column weights which is same as crossed column
# weights as well as actual references to weights variables.
_, col_weights, _ = (
feature_column_ops.weighted_sum_from_feature_columns({
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor,
sparse_col_3.name: input_tensor
}, [crossed_col], 1))
# Update the weights since default initializer initializes all weights
# to 0.0.
for weight in col_weights.values():
assign_op = state_ops.assign(weight[0], weight[0] + 0.5)
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_crossed_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(assign_op)
saved_col_weights = col_weights[crossed_col][0].eval()
save.save(sess, checkpoint_path)
crossed_col_initialized = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2],
hash_bucket_size=4,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/col_1_X_col_2_X_col_3/"
"weighted_sum_from_feature_columns/"
"col_1_X_col_2_X_col_3/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the crossed column weights from provided checkpoint
# and return a [4, 1] tensor which is same as weights variable. Since we
# won't modify weights, this should be same as 'saved_col_weights'.
_, col_weights, _ = (feature_column_ops.weighted_sum_from_feature_columns(
{
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor
}, [crossed_col_initialized], 1))
col_weights_from_ckpt = col_weights[crossed_col_initialized][0]
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_col_weights = col_weights_from_ckpt.eval()
self.assertAllClose(saved_col_weights, loaded_col_weights)
if __name__ == "__main__":
test.main()
| apache-2.0 |
faridani/pyDoc | Unidecode/unidecode/x012.py | 252 | 4318 | data = (
'ha', # 0x00
'hu', # 0x01
'hi', # 0x02
'haa', # 0x03
'hee', # 0x04
'he', # 0x05
'ho', # 0x06
'[?]', # 0x07
'la', # 0x08
'lu', # 0x09
'li', # 0x0a
'laa', # 0x0b
'lee', # 0x0c
'le', # 0x0d
'lo', # 0x0e
'lwa', # 0x0f
'hha', # 0x10
'hhu', # 0x11
'hhi', # 0x12
'hhaa', # 0x13
'hhee', # 0x14
'hhe', # 0x15
'hho', # 0x16
'hhwa', # 0x17
'ma', # 0x18
'mu', # 0x19
'mi', # 0x1a
'maa', # 0x1b
'mee', # 0x1c
'me', # 0x1d
'mo', # 0x1e
'mwa', # 0x1f
'sza', # 0x20
'szu', # 0x21
'szi', # 0x22
'szaa', # 0x23
'szee', # 0x24
'sze', # 0x25
'szo', # 0x26
'szwa', # 0x27
'ra', # 0x28
'ru', # 0x29
'ri', # 0x2a
'raa', # 0x2b
'ree', # 0x2c
're', # 0x2d
'ro', # 0x2e
'rwa', # 0x2f
'sa', # 0x30
'su', # 0x31
'si', # 0x32
'saa', # 0x33
'see', # 0x34
'se', # 0x35
'so', # 0x36
'swa', # 0x37
'sha', # 0x38
'shu', # 0x39
'shi', # 0x3a
'shaa', # 0x3b
'shee', # 0x3c
'she', # 0x3d
'sho', # 0x3e
'shwa', # 0x3f
'qa', # 0x40
'qu', # 0x41
'qi', # 0x42
'qaa', # 0x43
'qee', # 0x44
'qe', # 0x45
'qo', # 0x46
'[?]', # 0x47
'qwa', # 0x48
'[?]', # 0x49
'qwi', # 0x4a
'qwaa', # 0x4b
'qwee', # 0x4c
'qwe', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'qha', # 0x50
'qhu', # 0x51
'qhi', # 0x52
'qhaa', # 0x53
'qhee', # 0x54
'qhe', # 0x55
'qho', # 0x56
'[?]', # 0x57
'qhwa', # 0x58
'[?]', # 0x59
'qhwi', # 0x5a
'qhwaa', # 0x5b
'qhwee', # 0x5c
'qhwe', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'ba', # 0x60
'bu', # 0x61
'bi', # 0x62
'baa', # 0x63
'bee', # 0x64
'be', # 0x65
'bo', # 0x66
'bwa', # 0x67
'va', # 0x68
'vu', # 0x69
'vi', # 0x6a
'vaa', # 0x6b
'vee', # 0x6c
've', # 0x6d
'vo', # 0x6e
'vwa', # 0x6f
'ta', # 0x70
'tu', # 0x71
'ti', # 0x72
'taa', # 0x73
'tee', # 0x74
'te', # 0x75
'to', # 0x76
'twa', # 0x77
'ca', # 0x78
'cu', # 0x79
'ci', # 0x7a
'caa', # 0x7b
'cee', # 0x7c
'ce', # 0x7d
'co', # 0x7e
'cwa', # 0x7f
'xa', # 0x80
'xu', # 0x81
'xi', # 0x82
'xaa', # 0x83
'xee', # 0x84
'xe', # 0x85
'xo', # 0x86
'[?]', # 0x87
'xwa', # 0x88
'[?]', # 0x89
'xwi', # 0x8a
'xwaa', # 0x8b
'xwee', # 0x8c
'xwe', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'na', # 0x90
'nu', # 0x91
'ni', # 0x92
'naa', # 0x93
'nee', # 0x94
'ne', # 0x95
'no', # 0x96
'nwa', # 0x97
'nya', # 0x98
'nyu', # 0x99
'nyi', # 0x9a
'nyaa', # 0x9b
'nyee', # 0x9c
'nye', # 0x9d
'nyo', # 0x9e
'nywa', # 0x9f
'\'a', # 0xa0
'\'u', # 0xa1
'[?]', # 0xa2
'\'aa', # 0xa3
'\'ee', # 0xa4
'\'e', # 0xa5
'\'o', # 0xa6
'\'wa', # 0xa7
'ka', # 0xa8
'ku', # 0xa9
'ki', # 0xaa
'kaa', # 0xab
'kee', # 0xac
'ke', # 0xad
'ko', # 0xae
'[?]', # 0xaf
'kwa', # 0xb0
'[?]', # 0xb1
'kwi', # 0xb2
'kwaa', # 0xb3
'kwee', # 0xb4
'kwe', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'kxa', # 0xb8
'kxu', # 0xb9
'kxi', # 0xba
'kxaa', # 0xbb
'kxee', # 0xbc
'kxe', # 0xbd
'kxo', # 0xbe
'[?]', # 0xbf
'kxwa', # 0xc0
'[?]', # 0xc1
'kxwi', # 0xc2
'kxwaa', # 0xc3
'kxwee', # 0xc4
'kxwe', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'wa', # 0xc8
'wu', # 0xc9
'wi', # 0xca
'waa', # 0xcb
'wee', # 0xcc
'we', # 0xcd
'wo', # 0xce
'[?]', # 0xcf
'`a', # 0xd0
'`u', # 0xd1
'`i', # 0xd2
'`aa', # 0xd3
'`ee', # 0xd4
'`e', # 0xd5
'`o', # 0xd6
'[?]', # 0xd7
'za', # 0xd8
'zu', # 0xd9
'zi', # 0xda
'zaa', # 0xdb
'zee', # 0xdc
'ze', # 0xdd
'zo', # 0xde
'zwa', # 0xdf
'zha', # 0xe0
'zhu', # 0xe1
'zhi', # 0xe2
'zhaa', # 0xe3
'zhee', # 0xe4
'zhe', # 0xe5
'zho', # 0xe6
'zhwa', # 0xe7
'ya', # 0xe8
'yu', # 0xe9
'yi', # 0xea
'yaa', # 0xeb
'yee', # 0xec
'ye', # 0xed
'yo', # 0xee
'[?]', # 0xef
'da', # 0xf0
'du', # 0xf1
'di', # 0xf2
'daa', # 0xf3
'dee', # 0xf4
'de', # 0xf5
'do', # 0xf6
'dwa', # 0xf7
'dda', # 0xf8
'ddu', # 0xf9
'ddi', # 0xfa
'ddaa', # 0xfb
'ddee', # 0xfc
'dde', # 0xfd
'ddo', # 0xfe
'ddwa', # 0xff
)
| mit |
cloudera/hue | desktop/core/ext-py/Babel-2.5.1/tests/test_localedata.py | 2 | 3832 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import unittest
import random
from operator import methodcaller
import sys
from babel import localedata, numbers
class MergeResolveTestCase(unittest.TestCase):
def test_merge_items(self):
d = {1: 'foo', 3: 'baz'}
localedata.merge(d, {1: 'Foo', 2: 'Bar'})
self.assertEqual({1: 'Foo', 2: 'Bar', 3: 'baz'}, d)
def test_merge_nested_dict(self):
d1 = {'x': {'a': 1, 'b': 2, 'c': 3}}
d2 = {'x': {'a': 1, 'b': 12, 'd': 14}}
localedata.merge(d1, d2)
self.assertEqual({
'x': {'a': 1, 'b': 12, 'c': 3, 'd': 14}
}, d1)
def test_merge_nested_dict_no_overlap(self):
d1 = {'x': {'a': 1, 'b': 2}}
d2 = {'y': {'a': 11, 'b': 12}}
localedata.merge(d1, d2)
self.assertEqual({
'x': {'a': 1, 'b': 2},
'y': {'a': 11, 'b': 12}
}, d1)
def test_merge_with_alias_and_resolve(self):
alias = localedata.Alias('x')
d1 = {
'x': {'a': 1, 'b': 2, 'c': 3},
'y': alias
}
d2 = {
'x': {'a': 1, 'b': 12, 'd': 14},
'y': {'b': 22, 'e': 25}
}
localedata.merge(d1, d2)
self.assertEqual({
'x': {'a': 1, 'b': 12, 'c': 3, 'd': 14},
'y': (alias, {'b': 22, 'e': 25})
}, d1)
d = localedata.LocaleDataDict(d1)
self.assertEqual({
'x': {'a': 1, 'b': 12, 'c': 3, 'd': 14},
'y': {'a': 1, 'b': 22, 'c': 3, 'd': 14, 'e': 25}
}, dict(d.items()))
def test_load():
assert localedata.load('en_US')['languages']['sv'] == 'Swedish'
assert localedata.load('en_US') is localedata.load('en_US')
def test_merge():
d = {1: 'foo', 3: 'baz'}
localedata.merge(d, {1: 'Foo', 2: 'Bar'})
assert d == {1: 'Foo', 2: 'Bar', 3: 'baz'}
def test_locale_identification():
for l in localedata.locale_identifiers():
assert localedata.exists(l)
def test_unique_ids():
# Check all locale IDs are uniques.
all_ids = localedata.locale_identifiers()
assert len(all_ids) == len(set(all_ids))
# Check locale IDs don't collide after lower-case normalization.
lower_case_ids = list(map(methodcaller('lower'), all_ids))
assert len(lower_case_ids) == len(set(lower_case_ids))
def test_mixedcased_locale():
for l in localedata.locale_identifiers():
locale_id = ''.join([
methodcaller(random.choice(['lower', 'upper']))(c) for c in l])
assert localedata.exists(locale_id)
def test_pi_support_frozen(monkeypatch):
monkeypatch.setattr(sys, '_MEIPASS', 'testdir', raising=False)
monkeypatch.setattr(sys, 'frozen', True, raising=False)
assert localedata.get_base_dir() == 'testdir'
def test_pi_support_not_frozen():
assert not getattr(sys, 'frozen', False)
assert localedata.get_base_dir().endswith('babel')
def test_locale_argument_acceptance():
# Testing None input.
normalized_locale = localedata.normalize_locale(None)
assert normalized_locale == None
locale_exist = localedata.exists(None)
assert locale_exist == False
# # Testing list input.
normalized_locale = localedata.normalize_locale(['en_us', None])
assert normalized_locale == None
locale_exist = localedata.exists(['en_us', None])
assert locale_exist == False
| apache-2.0 |
jxta/cc | vendor/Twisted-10.0.0/twisted/cred/util.py | 4 | 1284 | # -*- test-case-name: twisted.test.test_newcred -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Outdated, deprecated functionality related to challenge-based authentication.
Seek a solution to your problem elsewhere. This module is deprecated.
"""
# System Imports
import random, warnings
from twisted.python.hashlib import md5
from twisted.cred.error import Unauthorized
def respond(challenge, password):
"""Respond to a challenge.
This is useful for challenge/response authentication.
"""
warnings.warn(
"twisted.cred.util.respond is deprecated since Twisted 8.3.",
category=PendingDeprecationWarning,
stacklevel=2)
m = md5()
m.update(password)
hashedPassword = m.digest()
m = md5()
m.update(hashedPassword)
m.update(challenge)
doubleHashedPassword = m.digest()
return doubleHashedPassword
def challenge():
"""I return some random data.
"""
warnings.warn(
"twisted.cred.util.challenge is deprecated since Twisted 8.3.",
category=PendingDeprecationWarning,
stacklevel=2)
crap = ''
for x in range(random.randrange(15,25)):
crap = crap + chr(random.randint(65,90))
crap = md5(crap).digest()
return crap
| apache-2.0 |
Bismarrck/tensorflow | tensorflow/contrib/constrained_optimization/python/candidates.py | 26 | 13286 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for optimizing over a set of candidate solutions.
The functions in this file deal with the constrained problem:
> minimize f(w)
> s.t. g_i(w) <= 0 for all i in {0,1,...,m-1}
Here, f(w) is the "objective function", and g_i(w) is the ith (of m) "constraint
function". Given the values of the objective and constraint functions for a set
of n "candidate solutions" {w_0,w_1,...,w_{n-1}} (for a total of n objective
function values, and n*m constraint function values), the
`find_best_candidate_distribution` function finds the best DISTRIBUTION over
these candidates, while `find_best_candidate_index' heuristically finds the
single best candidate.
Both of these functions have dependencies on `scipy`, so if you want to call
them, then you must make sure that `scipy` is available. The imports are
performed inside the functions themselves, so if they're not actually called,
then `scipy` is not needed.
For more specifics, please refer to:
> Cotter, Jiang and Sridharan. "Two-Player Games for Efficient Non-Convex
> Constrained Optimization".
> [https://arxiv.org/abs/1804.06500](https://arxiv.org/abs/1804.06500)
The `find_best_candidate_distribution` function implements the approach
described in Lemma 3, while `find_best_candidate_index` implements the heuristic
used for hyperparameter search in the experiments of Section 5.2.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
def _find_best_candidate_distribution_helper(objective_vector,
constraints_matrix,
maximum_violation=0.0):
"""Finds a distribution minimizing an objective subject to constraints.
This function deals with the constrained problem:
> minimize f(w)
> s.t. g_i(w) <= 0 for all i in {0,1,...,m-1}
Here, f(w) is the "objective function", and g_i(w) is the ith (of m)
"constraint function". Given a set of n "candidate solutions"
{w_0,w_1,...,w_{n-1}}, this function finds a distribution over these n
candidates that, in expectation, minimizes the objective while violating
the constraints by no more than `maximum_violation`. If no such distribution
exists, it returns an error (using Go-style error reporting).
The `objective_vector` parameter should be a numpy array with shape (n,), for
which objective_vector[i] = f(w_i). Likewise, `constraints_matrix` should be a
numpy array with shape (m,n), for which constraints_matrix[i,j] = g_i(w_j).
This function will return a distribution for which at most m+1 probabilities,
and often fewer, are nonzero.
Args:
objective_vector: numpy array of shape (n,), where n is the number of
"candidate solutions". Contains the objective function values.
constraints_matrix: numpy array of shape (m,n), where m is the number of
constraints and n is the number of "candidate solutions". Contains the
constraint violation magnitudes.
maximum_violation: nonnegative float, the maximum amount by which any
constraint may be violated, in expectation.
Returns:
A pair (`result`, `message`), exactly one of which is None. If `message` is
None, then the `result` contains the optimal distribution as a numpy array
of shape (n,). If `result` is None, then `message` contains an error
message.
Raises:
ValueError: If `objective_vector` and `constraints_matrix` have inconsistent
shapes, or if `maximum_violation` is negative.
ImportError: If we're unable to import `scipy.optimize`.
"""
if maximum_violation < 0.0:
raise ValueError("maximum_violation must be nonnegative")
mm, nn = np.shape(constraints_matrix)
if (nn,) != np.shape(objective_vector):
raise ValueError(
"objective_vector must have shape (n,), and constraints_matrix (m, n),"
" where n is the number of candidates, and m is the number of "
"constraints")
# We import scipy inline, instead of at the top of the file, so that a scipy
# dependency is only introduced if either find_best_candidate_distribution()
# or find_best_candidate_index() are actually called.
import scipy.optimize # pylint: disable=g-import-not-at-top
# Feasibility (within maximum_violation) constraints.
a_ub = constraints_matrix
b_ub = np.full((mm, 1), maximum_violation)
# Sum-to-one constraint.
a_eq = np.ones((1, nn))
b_eq = np.ones((1, 1))
# Nonnegativity constraints.
bounds = (0, None)
result = scipy.optimize.linprog(
objective_vector,
A_ub=a_ub,
b_ub=b_ub,
A_eq=a_eq,
b_eq=b_eq,
bounds=bounds)
# Go-style error reporting. We don't raise on error, since
# find_best_candidate_distribution() needs to handle the failure case, and we
# shouldn't use exceptions as flow-control.
if not result.success:
return (None, result.message)
else:
return (result.x, None)
def find_best_candidate_distribution(objective_vector,
constraints_matrix,
epsilon=0.0):
"""Finds a distribution minimizing an objective subject to constraints.
This function deals with the constrained problem:
> minimize f(w)
> s.t. g_i(w) <= 0 for all i in {0,1,...,m-1}
Here, f(w) is the "objective function", and g_i(w) is the ith (of m)
"constraint function". Given a set of n "candidate solutions"
{w_0,w_1,...,w_{n-1}}, this function finds a distribution over these n
candidates that, in expectation, minimizes the objective while violating
the constraints by the smallest possible amount (with the amount being found
via bisection search).
The `objective_vector` parameter should be a numpy array with shape (n,), for
which objective_vector[i] = f(w_i). Likewise, `constraints_matrix` should be a
numpy array with shape (m,n), for which constraints_matrix[i,j] = g_i(w_j).
This function will return a distribution for which at most m+1 probabilities,
and often fewer, are nonzero.
For more specifics, please refer to:
> Cotter, Jiang and Sridharan. "Two-Player Games for Efficient Non-Convex
> Constrained Optimization".
> [https://arxiv.org/abs/1804.06500](https://arxiv.org/abs/1804.06500)
This function implements the approach described in Lemma 3.
Args:
objective_vector: numpy array of shape (n,), where n is the number of
"candidate solutions". Contains the objective function values.
constraints_matrix: numpy array of shape (m,n), where m is the number of
constraints and n is the number of "candidate solutions". Contains the
constraint violation magnitudes.
epsilon: nonnegative float, the threshold at which to terminate the binary
search while searching for the minimal expected constraint violation
magnitude.
Returns:
The optimal distribution, as a numpy array of shape (n,).
Raises:
ValueError: If `objective_vector` and `constraints_matrix` have inconsistent
shapes, or if `epsilon` is negative.
ImportError: If we're unable to import `scipy.optimize`.
"""
if epsilon < 0.0:
raise ValueError("epsilon must be nonnegative")
# If there is a feasible solution (i.e. with maximum_violation=0), then that's
# what we'll return.
pp, _ = _find_best_candidate_distribution_helper(objective_vector,
constraints_matrix)
if pp is not None:
return pp
# The bound is the minimum over all candidates, of the maximum per-candidate
# constraint violation.
lower = 0.0
upper = np.min(np.amax(constraints_matrix, axis=0))
best_pp, _ = _find_best_candidate_distribution_helper(
objective_vector, constraints_matrix, maximum_violation=upper)
assert best_pp is not None
# Throughout this loop, a maximum_violation of "lower" is not achievable,
# but a maximum_violation of "upper" is achievable.
while True:
middle = 0.5 * (lower + upper)
if (middle - lower <= epsilon) or (upper - middle <= epsilon):
break
else:
pp, _ = _find_best_candidate_distribution_helper(
objective_vector, constraints_matrix, maximum_violation=middle)
if pp is None:
lower = middle
else:
best_pp = pp
upper = middle
return best_pp
def find_best_candidate_index(objective_vector,
constraints_matrix,
rank_objectives=False):
"""Heuristically finds the best candidate solution to a constrained problem.
This function deals with the constrained problem:
> minimize f(w)
> s.t. g_i(w) <= 0 for all i in {0,1,...,m-1}
Here, f(w) is the "objective function", and g_i(w) is the ith (of m)
"constraint function". Given a set of n "candidate solutions"
{w_0,w_1,...,w_{n-1}}, this function finds the "best" solution according
to the following heuristic:
1. Across all models, the ith constraint violations (i.e. max{0, g_i(0)})
are ranked, as are the objectives (if rank_objectives=True).
2. Each model is then associated its MAXIMUM rank across all m constraints
(and the objective, if rank_objectives=True).
3. The model with the minimal maximum rank is then identified. Ties are
broken using the objective function value.
4. The index of this "best" model is returned.
The `objective_vector` parameter should be a numpy array with shape (n,), for
which objective_vector[i] = f(w_i). Likewise, `constraints_matrix` should be a
numpy array with shape (m,n), for which constraints_matrix[i,j] = g_i(w_j).
For more specifics, please refer to:
> Cotter, Jiang and Sridharan. "Two-Player Games for Efficient Non-Convex
> Constrained Optimization".
> [https://arxiv.org/abs/1804.06500](https://arxiv.org/abs/1804.06500)
This function implements the heuristic used for hyperparameter search in the
experiments of Section 5.2.
Args:
objective_vector: numpy array of shape (n,), where n is the number of
"candidate solutions". Contains the objective function values.
constraints_matrix: numpy array of shape (m,n), where m is the number of
constraints and n is the number of "candidate solutions". Contains the
constraint violation magnitudes.
rank_objectives: bool, whether the objective function values should be
included in the initial ranking step. If True, both the objective and
constraints will be ranked. If False, only the constraints will be ranked.
In either case, the objective function values will be used for
tiebreaking.
Returns:
The index (in {0,1,...,n-1}) of the "best" model according to the above
heuristic.
Raises:
ValueError: If `objective_vector` and `constraints_matrix` have inconsistent
shapes.
ImportError: If we're unable to import `scipy.stats`.
"""
mm, nn = np.shape(constraints_matrix)
if (nn,) != np.shape(objective_vector):
raise ValueError(
"objective_vector must have shape (n,), and constraints_matrix (m, n),"
" where n is the number of candidates, and m is the number of "
"constraints")
# We import scipy inline, instead of at the top of the file, so that a scipy
# dependency is only introduced if either find_best_candidate_distribution()
# or find_best_candidate_index() are actually called.
import scipy.stats # pylint: disable=g-import-not-at-top
if rank_objectives:
maximum_ranks = scipy.stats.rankdata(objective_vector, method="min")
else:
maximum_ranks = np.zeros(nn, dtype=np.int64)
for ii in xrange(mm):
# Take the maximum of the constraint functions with zero, since we want to
# rank the magnitude of constraint *violations*. If the constraint is
# satisfied, then we don't care how much it's satisfied by (as a result, we
# we expect all models satisfying a constraint to be tied at rank 1).
ranks = scipy.stats.rankdata(
np.maximum(0.0, constraints_matrix[ii, :]), method="min")
maximum_ranks = np.maximum(maximum_ranks, ranks)
best_index = None
best_rank = float("Inf")
best_objective = float("Inf")
for ii in xrange(nn):
if maximum_ranks[ii] < best_rank:
best_index = ii
best_rank = maximum_ranks[ii]
best_objective = objective_vector[ii]
elif (maximum_ranks[ii] == best_rank) and (objective_vector[ii] <=
best_objective):
best_index = ii
best_objective = objective_vector[ii]
return best_index
| apache-2.0 |
hemidactylus/flaskbiblio | config.py | 1 | 1074 | import os
# directories and so on
basedir = os.path.abspath(os.path.dirname(__file__))
DB_DIRECTORY=os.path.join(basedir,'app/database')
DB_NAME='biblio.db'
# stuff for Flask
WTF_CSRF_ENABLED = True
from sensible_config import SECRET_KEY
# formats, etc
DATETIME_STR_FORMAT = '%Y-%m-%d %H:%M:%S'
SHORT_DATETIME_STR_FORMAT = '%d/%m/%y'
FILENAME_DATETIME_STR_FORMAT = '%Y_%m_%d'
USERS_TIMEZONE='Europe/Rome'
# similarity thresholds for author (last- and complete-) names
SIMILAR_USE_DIGRAMS=True # otherwise: use single-letter grams
# Different thresholds are required depending on the type of vectoring
if SIMILAR_USE_DIGRAMS:
SIMILAR_AUTHOR_THRESHOLD=0.7
SIMILAR_BOOK_THRESHOLD=0.7
else:
SIMILAR_AUTHOR_THRESHOLD=0.90
SIMILAR_BOOK_THRESHOLD=0.93
# what are the smallest tokens to employ in similar-search in book titles?
MINIMUM_SIMILAR_BOOK_TOKEN_SIZE=4
# Are multiple books with the same title allowed? (suggested: yes)
ALLOW_DUPLICATE_BOOKS=True
# temporary directory for storing import-related files
TEMP_DIRECTORY=os.path.join(basedir,'app/temp')
| gpl-3.0 |
xflows/textflows | workflows/management/commands/auto_import_packages.py | 4 | 6445 | from datetime import datetime
import os
import sys
from django.core.management.base import BaseCommand, CommandError
from workflows import module_importer
from workflows.management.commands import export_package_old as export_package
from workflows.management.commands import import_package_old as import_package
from optparse import make_option
class Command(BaseCommand):
help = 'Automatically iterates through all installed workflows sub-applications/projects/packages and imports their database entires. ' \
'Note: Installed workflows packages are defined in mothra/settings.py via variable INSTALLED_APPS and begin with the string "workflows.". ' \
'Auto import procedure does the following:\n' \
' - Creates database export of all definition objects using export_package command.\n'\
' - Export file goes to folder specified in mothra/settings.py/BACKUP_DIR and is timestamped\n'\
' For each installed package:\n' \
' - Loads package settings from "workflows/<package_name>/settings.py\n' \
' - If settings do not exist or settings.py/AUTO_IMPORT_DB == False then exit\n' \
' - Else tries to import all the files specified in settings.py/AUTO_IMPORT_DB_FILES list\n' \
' - If some files are missing skip them.\n' \
' - Imports are done using import_package command using -r option if settings.py/AUTO_IMPORT_DB_REPLACE_OPTION == True'
option_list = BaseCommand.option_list + (
make_option('-n', '--nobackup',
action="store_true",
dest='nobackup',
default=False,
help='No backup is created prior starting the import process.'
),
make_option('-a', '--ask',
action="store_true",
dest='ask',
default=False,
help='Ask to import packages which are marked not to be imported.'
),
)
def handle(self, *args, **options):
auto_import_all_packages(self.stdout.write, options['nobackup'], options['ask'])
self.stdout.write('Auto import procedure finished.\n')
def auto_import_all_packages(writeFunc, nobackup, ask):
if ask:
writeFunc('The procedure will interactively ask to import packages marked as not to be auto imported due to "--ask" option.\n')
if nobackup:
writeFunc('No backup will be created due to "--nobackup" option.\n')
else:
try:
from mothra.settings import BACKUP_DIR
except:
raise CommandError('Do not know where to backup existing database: BACKUP_DIR variable not found in mothra/settings.py. Consider using "--nobackup" option.')
if not os.path.exists(BACKUP_DIR): os.makedirs(BACKUP_DIR)
timeStamp = datetime.now().strftime('_%Y%m%d_%H%M%S.json')
backupDir = os.path.join(BACKUP_DIR,"db_backup"+timeStamp)
writeFunc('Exporting to backup...\n')
result = export_package.export_package_string(lambda text: writeFunc(' '+text), ('all',), False, False, True, 1)
try:
f = open(backupDir, 'w')
f.write(result.encode('utf-8'))
f.close()
writeFunc('Backup successfully written.\n')
except Exception as e:
raise CommandError('There was a problem with writing to the given backup file "%s". Problem: %s'%(backupDir, e))
writeFunc('Export procedure successfully finished. Results written to the file "%s".\n' %backupDir)
#get all relevant package settings:
packageSetts = module_importer.import_all_packages_libs_as_dict("settings")
for pckSett in packageSetts:
writeFunc('--------------------------------------------------------------------------------\n')
writeFunc('Auto importing package "%s":\n'%pckSett)
sett = packageSetts[pckSett]
if sett is None:
writeFunc(' No settings found for this package.\n')
continue
try:
imp = sett.AUTO_IMPORT_DB
files = sett.AUTO_IMPORT_DB_FILES
except:
writeFunc(' Either AUTO_IMPORT_DB or AUTO_IMPORT_DB_FILES not found in package\'s settings.\n')
continue
replace = False
try:
replace = sett.AUTO_IMPORT_DB_REPLACE_OPTION
except:
pass
if not imp:
writeFunc(' AUTO_IMPORT_DB set to false in package\'s settings.\n')
if not ask or not query_yes_no(' Do you want to import this package anyway?\n'):
continue
for fileName in files:
writeFunc(' Importing file "%s":\n' % fileName)
try:
fileContent = open(fileName, 'r').read()
except:
writeFunc(' Cannot open or read given package data file.\n')
else:
import_package.import_package_string(lambda text: writeFunc(' '+text), fileContent, replace)
writeFunc(' Done with file "%s":\n' % fileName)
writeFunc('--------------------------------------------------------------------------------\n')
return
def query_yes_no(question, default=None):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' " \
"(or 'y' or 'n').\n") | mit |
xen0l/ansible | lib/ansible/modules/storage/infinidat/infini_pool.py | 43 | 6070 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_pool
version_added: 2.3
short_description: Create, Delete and Modify Pools on Infinibox
description:
- This module to creates, deletes or modifies pools on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Pool Name
required: true
state:
description:
- Creates/Modifies Pool when present or removes when absent
required: false
default: present
choices: [ "present", "absent" ]
size:
description:
- Pool Physical Capacity in MB, GB or TB units.
If pool size is not set on pool creation, size will be equal to 1TB.
See examples.
required: false
vsize:
description:
- Pool Virtual Capacity in MB, GB or TB units.
If pool vsize is not set on pool creation, Virtual Capacity will be equal to Physical Capacity.
See examples.
required: false
ssd_cache:
description:
- Enable/Disable SSD Cache on Pool
required: false
default: yes
type: bool
notes:
- Infinibox Admin level access is required for pool modifications
extends_documentation_fragment:
- infinibox
requirements:
- capacity
'''
EXAMPLES = '''
- name: Make sure pool foo exists. Set pool physical capacity to 10TB
infini_pool:
name: foo
size: 10TB
vsize: 10TB
user: admin
password: secret
system: ibox001
- name: Disable SSD Cache on pool
infini_pool:
name: foo
ssd_cache: no
user: admin
password: secret
system: ibox001
'''
RETURN = '''
'''
try:
from capacity import KiB, Capacity
HAS_CAPACITY = True
except ImportError:
HAS_CAPACITY = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_pool(module, system):
"""Return Pool on None"""
try:
return system.pools.get(name=module.params['name'])
except:
return None
@api_wrapper
def create_pool(module, system):
"""Create Pool"""
name = module.params['name']
size = module.params['size']
vsize = module.params['vsize']
ssd_cache = module.params['ssd_cache']
if not module.check_mode:
if not size and not vsize:
pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity('1TB'))
elif size and not vsize:
pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(size))
elif not size and vsize:
pool = system.pools.create(name=name, physical_capacity=Capacity('1TB'), virtual_capacity=Capacity(vsize))
else:
pool = system.pools.create(name=name, physical_capacity=Capacity(size), virtual_capacity=Capacity(vsize))
# Default value of ssd_cache is True. Disable ssd chacing if False
if not ssd_cache:
pool.update_ssd_enabled(ssd_cache)
module.exit_json(changed=True)
@api_wrapper
def update_pool(module, system, pool):
"""Update Pool"""
changed = False
size = module.params['size']
vsize = module.params['vsize']
ssd_cache = module.params['ssd_cache']
# Roundup the capacity to mimic Infinibox behaviour
if size:
physical_capacity = Capacity(size).roundup(6 * 64 * KiB)
if pool.get_physical_capacity() != physical_capacity:
if not module.check_mode:
pool.update_physical_capacity(physical_capacity)
changed = True
if vsize:
virtual_capacity = Capacity(vsize).roundup(6 * 64 * KiB)
if pool.get_virtual_capacity() != virtual_capacity:
if not module.check_mode:
pool.update_virtual_capacity(virtual_capacity)
changed = True
if pool.get_ssd_enabled() != ssd_cache:
if not module.check_mode:
pool.update_ssd_enabled(ssd_cache)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_pool(module, pool):
"""Delete Pool"""
if not module.check_mode:
pool.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
size=dict(),
vsize=dict(),
ssd_cache=dict(type='bool', default=True)
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
if not HAS_CAPACITY:
module.fail_json(msg='The capacity python library is required for this module')
if module.params['size']:
try:
Capacity(module.params['size'])
except:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
if module.params['vsize']:
try:
Capacity(module.params['vsize'])
except:
module.fail_json(msg='vsize (Virtual Capacity) should be defined in MB, GB, TB or PB units')
state = module.params['state']
system = get_system(module)
pool = get_pool(module, system)
if state == 'present' and not pool:
create_pool(module, system)
elif state == 'present' and pool:
update_pool(module, system, pool)
elif state == 'absent' and pool:
delete_pool(module, pool)
elif state == 'absent' and not pool:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
evaschalde/odoo | addons/hr_holidays/hr_holidays.py | 159 | 33482 | # -*- coding: utf-8 -*-
##################################################################################
#
# Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com)
# and 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# $Id: hr.py 4656 2006-11-24 09:58:42Z Cyp $
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import math
import time
from operator import attrgetter
from openerp.exceptions import Warning
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_holidays_status(osv.osv):
_name = "hr.holidays.status"
_description = "Leave Type"
def get_days(self, cr, uid, ids, employee_id, context=None):
result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0,
virtual_remaining_leaves=0)) for id in ids)
holiday_ids = self.pool['hr.holidays'].search(cr, uid, [('employee_id', '=', employee_id),
('state', 'in', ['confirm', 'validate1', 'validate']),
('holiday_status_id', 'in', ids)
], context=context)
for holiday in self.pool['hr.holidays'].browse(cr, uid, holiday_ids, context=context):
status_dict = result[holiday.holiday_status_id.id]
if holiday.type == 'add':
status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['max_leaves'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] += holiday.number_of_days_temp
elif holiday.type == 'remove': # number of days is negative
status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['leaves_taken'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] -= holiday.number_of_days_temp
return result
def _user_left_days(self, cr, uid, ids, name, args, context=None):
employee_id = False
if context and 'employee_id' in context:
employee_id = context['employee_id']
else:
employee_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if employee_ids:
employee_id = employee_ids[0]
if employee_id:
res = self.get_days(cr, uid, ids, employee_id, context=context)
else:
res = dict((res_id, {'leaves_taken': 0, 'remaining_leaves': 0, 'max_leaves': 0}) for res_id in ids)
return res
_columns = {
'name': fields.char('Leave Type', size=64, required=True, translate=True),
'categ_id': fields.many2one('calendar.event.type', 'Meeting Type',
help='Once a leave is validated, Odoo will create a corresponding meeting of this type in the calendar.'),
'color_name': fields.selection([('red', 'Red'),('blue','Blue'), ('lightgreen', 'Light Green'), ('lightblue','Light Blue'), ('lightyellow', 'Light Yellow'), ('magenta', 'Magenta'),('lightcyan', 'Light Cyan'),('black', 'Black'),('lightpink', 'Light Pink'),('brown', 'Brown'),('violet', 'Violet'),('lightcoral', 'Light Coral'),('lightsalmon', 'Light Salmon'),('lavender', 'Lavender'),('wheat', 'Wheat'),('ivory', 'Ivory')],'Color in Report', required=True, help='This color will be used in the leaves summary located in Reporting\Leaves by Department.'),
'limit': fields.boolean('Allow to Override Limit', help='If you select this check box, the system allows the employees to take more leaves than the available ones for this type and will not take them into account for the "Remaining Legal Leaves" defined on the employee form.'),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the leave type without removing it."),
'max_leaves': fields.function(_user_left_days, string='Maximum Allowed', help='This value is given by the sum of all holidays requests with a positive value.', multi='user_left_days'),
'leaves_taken': fields.function(_user_left_days, string='Leaves Already Taken', help='This value is given by the sum of all holidays requests with a negative value.', multi='user_left_days'),
'remaining_leaves': fields.function(_user_left_days, string='Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken', multi='user_left_days'),
'virtual_remaining_leaves': fields.function(_user_left_days, string='Virtual Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken - Leaves Waiting Approval', multi='user_left_days'),
'double_validation': fields.boolean('Apply Double Validation', help="When selected, the Allocation/Leave Requests for this type require a second validation to be approved."),
}
_defaults = {
'color_name': 'red',
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not context.get('employee_id',False):
# leave counts is based on employee_id, would be inaccurate if not based on correct employee
return super(hr_holidays_status, self).name_get(cr, uid, ids, context=context)
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if not record.limit:
name = name + (' (%g/%g)' % (record.leaves_taken or 0.0, record.max_leaves or 0.0))
res.append((record.id, name))
return res
class hr_holidays(osv.osv):
_name = "hr.holidays"
_description = "Leave"
_order = "type desc, date_from asc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'hr_holidays.mt_holidays_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'validate',
'hr_holidays.mt_holidays_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'refuse',
'hr_holidays.mt_holidays_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
def _employee_get(self, cr, uid, context=None):
emp_id = context.get('default_employee_id', False)
if emp_id:
return emp_id
ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
def _compute_number_of_days(self, cr, uid, ids, name, args, context=None):
result = {}
for hol in self.browse(cr, uid, ids, context=context):
if hol.type=='remove':
result[hol.id] = -hol.number_of_days_temp
else:
result[hol.id] = hol.number_of_days_temp
return result
def _get_can_reset(self, cr, uid, ids, name, arg, context=None):
"""User can reset a leave request if it is its own leave request or if
he is an Hr Manager. """
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
group_hr_manager_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')[1]
if group_hr_manager_id in [g.id for g in user.groups_id]:
return dict.fromkeys(ids, True)
result = dict.fromkeys(ids, False)
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.employee_id and holiday.employee_id.user_id and holiday.employee_id.user_id.id == uid:
result[holiday.id] = True
return result
def _check_date(self, cr, uid, ids, context=None):
for holiday in self.browse(cr, uid, ids, context=context):
domain = [
('date_from', '<=', holiday.date_to),
('date_to', '>=', holiday.date_from),
('employee_id', '=', holiday.employee_id.id),
('id', '!=', holiday.id),
('state', 'not in', ['cancel', 'refuse']),
]
nholidays = self.search_count(cr, uid, domain, context=context)
if nholidays:
return False
return True
_check_holidays = lambda self, cr, uid, ids, context=None: self.check_holidays(cr, uid, ids, context=context)
_columns = {
'name': fields.char('Description', size=64),
'state': fields.selection([('draft', 'To Submit'), ('cancel', 'Cancelled'),('confirm', 'To Approve'), ('refuse', 'Refused'), ('validate1', 'Second Approval'), ('validate', 'Approved')],
'Status', readonly=True, track_visibility='onchange', copy=False,
help='The status is set to \'To Submit\', when a holiday request is created.\
\nThe status is \'To Approve\', when holiday request is confirmed by user.\
\nThe status is \'Refused\', when holiday request is refused by manager.\
\nThe status is \'Approved\', when holiday request is approved by manager.'),
'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True),
'date_from': fields.datetime('Start Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, select=True, copy=False),
'date_to': fields.datetime('End Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'holiday_status_id': fields.many2one("hr.holidays.status", "Leave Type", required=True,readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'employee_id': fields.many2one('hr.employee', "Employee", select=True, invisible=False, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'manager_id': fields.many2one('hr.employee', 'First Approval', invisible=False, readonly=True, copy=False,
help='This area is automatically filled by the user who validate the leave'),
'notes': fields.text('Reasons',readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'number_of_days_temp': fields.float('Allocation', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'number_of_days': fields.function(_compute_number_of_days, string='Number of Days', store=True),
'meeting_id': fields.many2one('calendar.event', 'Meeting'),
'type': fields.selection([('remove','Leave Request'),('add','Allocation Request')], 'Request Type', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help="Choose 'Leave Request' if someone wants to take an off-day. \nChoose 'Allocation Request' if you want to increase the number of leaves available for someone", select=True),
'parent_id': fields.many2one('hr.holidays', 'Parent'),
'linked_request_ids': fields.one2many('hr.holidays', 'parent_id', 'Linked Requests',),
'department_id':fields.related('employee_id', 'department_id', string='Department', type='many2one', relation='hr.department', readonly=True, store=True),
'category_id': fields.many2one('hr.employee.category', "Employee Tag", help='Category of Employee', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'holiday_type': fields.selection([('employee','By Employee'),('category','By Employee Tag')], 'Allocation Mode', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help='By Employee: Allocation/Request for individual Employee, By Employee Tag: Allocation/Request for group of employees in category', required=True),
'manager_id2': fields.many2one('hr.employee', 'Second Approval', readonly=True, copy=False,
help='This area is automaticly filled by the user who validate the leave with second level (If Leave type need second validation)'),
'double_validation': fields.related('holiday_status_id', 'double_validation', type='boolean', relation='hr.holidays.status', string='Apply Double Validation'),
'can_reset': fields.function(
_get_can_reset,
type='boolean'),
}
_defaults = {
'employee_id': _employee_get,
'state': 'confirm',
'type': 'remove',
'user_id': lambda obj, cr, uid, context: uid,
'holiday_type': 'employee'
}
_constraints = [
(_check_date, 'You can not have 2 leaves that overlaps on same day!', ['date_from','date_to']),
(_check_holidays, 'The number of remaining leaves is not sufficient for this leave type', ['state','number_of_days_temp'])
]
_sql_constraints = [
('type_value', "CHECK( (holiday_type='employee' AND employee_id IS NOT NULL) or (holiday_type='category' AND category_id IS NOT NULL))",
"The employee or employee category of this request is missing. Please make sure that your user login is linked to an employee."),
('date_check2', "CHECK ( (type='add') OR (date_from <= date_to))", "The start date must be anterior to the end date."),
('date_check', "CHECK ( number_of_days_temp >= 0 )", "The number of days must be greater than 0."),
]
def _create_resource_leave(self, cr, uid, leaves, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays validated '''
obj_res_leave = self.pool.get('resource.calendar.leaves')
for leave in leaves:
vals = {
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_id.calendar_id.id
}
obj_res_leave.create(cr, uid, vals, context=context)
return True
def _remove_resource_leave(self, cr, uid, ids, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays cancel/removed'''
obj_res_leave = self.pool.get('resource.calendar.leaves')
leave_ids = obj_res_leave.search(cr, uid, [('holiday_id', 'in', ids)], context=context)
return obj_res_leave.unlink(cr, uid, leave_ids, context=context)
def onchange_type(self, cr, uid, ids, holiday_type, employee_id=False, context=None):
result = {}
if holiday_type == 'employee' and not employee_id:
ids_employee = self.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)])
if ids_employee:
result['value'] = {
'employee_id': ids_employee[0]
}
elif holiday_type != 'employee':
result['value'] = {
'employee_id': False
}
return result
def onchange_employee(self, cr, uid, ids, employee_id):
result = {'value': {'department_id': False}}
if employee_id:
employee = self.pool.get('hr.employee').browse(cr, uid, employee_id)
result['value'] = {'department_id': employee.department_id.id}
return result
# TODO: can be improved using resource calendar method
def _get_number_of_days(self, date_from, date_to):
"""Returns a float equals to the timedelta between two dates given as string."""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
from_dt = datetime.datetime.strptime(date_from, DATETIME_FORMAT)
to_dt = datetime.datetime.strptime(date_to, DATETIME_FORMAT)
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
return diff_day
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel', 'confirm']:
raise osv.except_osv(_('Warning!'),_('You cannot delete a leave which is in %s state.')%(rec.state))
return super(hr_holidays, self).unlink(cr, uid, ids, context)
def onchange_date_from(self, cr, uid, ids, date_to, date_from):
"""
If there are no date set for date_to, automatically set one 8 hours later than
the date_from.
Also update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# No date_to set so far: automatically compute one 8 hours later
if date_from and not date_to:
date_to_with_delta = datetime.datetime.strptime(date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8)
result['value']['date_to'] = str(date_to_with_delta)
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def onchange_date_to(self, cr, uid, ids, date_to, date_from):
"""
Update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def create(self, cr, uid, values, context=None):
""" Override to avoid automatic logging of creation """
if context is None:
context = {}
context = dict(context, mail_create_nolog=True)
if values.get('state') and values['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
return super(hr_holidays, self).create(cr, uid, values, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('state') and vals['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % vals.get('state'))
return super(hr_holidays, self).write(cr, uid, ids, vals, context=context)
def holidays_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'draft',
'manager_id': False,
'manager_id2': False,
})
to_unlink = []
for record in self.browse(cr, uid, ids, context=context):
for record2 in record.linked_request_ids:
self.holidays_reset(cr, uid, [record2.id], context=context)
to_unlink.append(record2.id)
if to_unlink:
self.unlink(cr, uid, to_unlink, context=context)
return True
def holidays_first_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.holidays_first_validate_notificate(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'state':'validate1', 'manager_id': manager})
def holidays_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.write(cr, uid, ids, {'state':'validate'})
data_holiday = self.browse(cr, uid, ids)
for record in data_holiday:
if record.double_validation:
self.write(cr, uid, [record.id], {'manager_id2': manager})
else:
self.write(cr, uid, [record.id], {'manager_id': manager})
if record.holiday_type == 'employee' and record.type == 'remove':
meeting_obj = self.pool.get('calendar.event')
meeting_vals = {
'name': record.name or _('Leave Request'),
'categ_ids': record.holiday_status_id.categ_id and [(6,0,[record.holiday_status_id.categ_id.id])] or [],
'duration': record.number_of_days_temp * 8,
'description': record.notes,
'user_id': record.user_id.id,
'start': record.date_from,
'stop': record.date_to,
'allday': False,
'state': 'open', # to block that meeting date in the calendar
'class': 'confidential'
}
#Add the partner_id (if exist) as an attendee
if record.user_id and record.user_id.partner_id:
meeting_vals['partner_ids'] = [(4,record.user_id.partner_id.id)]
ctx_no_email = dict(context or {}, no_email=True)
meeting_id = meeting_obj.create(cr, uid, meeting_vals, context=ctx_no_email)
self._create_resource_leave(cr, uid, [record], context=context)
self.write(cr, uid, ids, {'meeting_id': meeting_id})
elif record.holiday_type == 'category':
emp_ids = obj_emp.search(cr, uid, [('category_ids', 'child_of', [record.category_id.id])])
leave_ids = []
for emp in obj_emp.browse(cr, uid, emp_ids):
vals = {
'name': record.name,
'type': record.type,
'holiday_type': 'employee',
'holiday_status_id': record.holiday_status_id.id,
'date_from': record.date_from,
'date_to': record.date_to,
'notes': record.notes,
'number_of_days_temp': record.number_of_days_temp,
'parent_id': record.id,
'employee_id': emp.id
}
leave_ids.append(self.create(cr, uid, vals, context=None))
for leave_id in leave_ids:
# TODO is it necessary to interleave the calls?
for sig in ('confirm', 'validate', 'second_validate'):
self.signal_workflow(cr, uid, [leave_id], sig)
return True
def holidays_confirm(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.employee_id and record.employee_id.parent_id and record.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [record.id], user_ids=[record.employee_id.parent_id.user_id.id], context=context)
return self.write(cr, uid, ids, {'state': 'confirm'})
def holidays_refuse(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.state == 'validate1':
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id': manager})
else:
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id2': manager})
self.holidays_cancel(cr, uid, ids, context=context)
return True
def holidays_cancel(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids):
# Delete the meeting
if record.meeting_id:
record.meeting_id.unlink()
# If a category that created several holidays, cancel all related
self.signal_workflow(cr, uid, map(attrgetter('id'), record.linked_request_ids or []), 'refuse')
self._remove_resource_leave(cr, uid, ids, context=context)
return True
def check_holidays(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.holiday_type != 'employee' or record.type != 'remove' or not record.employee_id or record.holiday_status_id.limit:
continue
leave_days = self.pool.get('hr.holidays.status').get_days(cr, uid, [record.holiday_status_id.id], record.employee_id.id, context=context)[record.holiday_status_id.id]
if leave_days['remaining_leaves'] < 0 or leave_days['virtual_remaining_leaves'] < 0:
# Raising a warning gives a more user-friendly feedback than the default constraint error
raise Warning(_('The number of remaining leaves is not sufficient for this leave type.\n'
'Please verify also the leaves waiting for validation.'))
return True
# -----------------------------
# OpenChatter and notifications
# -----------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
# if this user is a hr.manager, he should do second validations
if self.pool.get('res.users').has_group(cr, uid, 'base.group_hr_manager'):
dom = ['|'] + dom + [('state', '=', 'validate1')]
return dom
def holidays_first_validate_notificate(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
self.message_post(cr, uid, [obj.id],
_("Request approved, waiting second validation."), context=context)
class resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'holiday_id': fields.many2one("hr.holidays", "Leave Request"),
}
class hr_employee(osv.osv):
_inherit="hr.employee"
def create(self, cr, uid, vals, context=None):
# don't pass the value of remaining leave if it's 0 at the creation time, otherwise it will trigger the inverse
# function _set_remaining_days and the system may not be configured for. Note that we don't have this problem on
# the write because the clients only send the fields that have been modified.
if 'remaining_leaves' in vals and not vals['remaining_leaves']:
del(vals['remaining_leaves'])
return super(hr_employee, self).create(cr, uid, vals, context=context)
def _set_remaining_days(self, cr, uid, empl_id, name, value, arg, context=None):
employee = self.browse(cr, uid, empl_id, context=context)
diff = value - employee.remaining_leaves
type_obj = self.pool.get('hr.holidays.status')
holiday_obj = self.pool.get('hr.holidays')
# Find for holidays status
status_ids = type_obj.search(cr, uid, [('limit', '=', False)], context=context)
if len(status_ids) != 1 :
raise osv.except_osv(_('Warning!'),_("The feature behind the field 'Remaining Legal Leaves' can only be used when there is only one leave type with the option 'Allow to Override Limit' unchecked. (%s Found). Otherwise, the update is ambiguous as we cannot decide on which leave type the update has to be done. \nYou may prefer to use the classic menus 'Leave Requests' and 'Allocation Requests' located in 'Human Resources \ Leaves' to manage the leave days of the employees if the configuration does not allow to use this field.") % (len(status_ids)))
status_id = status_ids and status_ids[0] or False
if not status_id:
return False
if diff > 0:
leave_id = holiday_obj.create(cr, uid, {'name': _('Allocation for %s') % employee.name, 'employee_id': employee.id, 'holiday_status_id': status_id, 'type': 'add', 'holiday_type': 'employee', 'number_of_days_temp': diff}, context=context)
elif diff < 0:
raise osv.except_osv(_('Warning!'), _('You cannot reduce validated allocation requests'))
else:
return False
for sig in ('confirm', 'validate', 'second_validate'):
holiday_obj.signal_workflow(cr, uid, [leave_id], sig)
return True
def _get_remaining_days(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
sum(h.number_of_days) as days,
h.employee_id
from
hr_holidays h
join hr_holidays_status s on (s.id=h.holiday_status_id)
where
h.state='validate' and
s.limit=False and
h.employee_id in %s
group by h.employee_id""", (tuple(ids),))
res = cr.dictfetchall()
remaining = {}
for r in res:
remaining[r['employee_id']] = r['days']
for employee_id in ids:
if not remaining.get(employee_id):
remaining[employee_id] = 0.0
return remaining
def _get_leave_status(self, cr, uid, ids, name, args, context=None):
holidays_obj = self.pool.get('hr.holidays')
holidays_id = holidays_obj.search(cr, uid,
[('employee_id', 'in', ids), ('date_from','<=',time.strftime('%Y-%m-%d %H:%M:%S')),
('date_to','>=',time.strftime('%Y-%m-%d 23:59:59')),('type','=','remove'),('state','not in',('cancel','refuse'))],
context=context)
result = {}
for id in ids:
result[id] = {
'current_leave_state': False,
'current_leave_id': False,
'leave_date_from':False,
'leave_date_to':False,
}
for holiday in self.pool.get('hr.holidays').browse(cr, uid, holidays_id, context=context):
result[holiday.employee_id.id]['leave_date_from'] = holiday.date_from
result[holiday.employee_id.id]['leave_date_to'] = holiday.date_to
result[holiday.employee_id.id]['current_leave_state'] = holiday.state
result[holiday.employee_id.id]['current_leave_id'] = holiday.holiday_status_id.id
return result
def _leaves_count(self, cr, uid, ids, field_name, arg, context=None):
Holidays = self.pool['hr.holidays']
return {
employee_id: Holidays.search_count(cr,uid, [('employee_id', '=', employee_id), ('type', '=', 'remove')], context=context)
for employee_id in ids
}
_columns = {
'remaining_leaves': fields.function(_get_remaining_days, string='Remaining Legal Leaves', fnct_inv=_set_remaining_days, type="float", help='Total number of legal leaves allocated to this employee, change this value to create allocation/leave request. Total based on all the leave types without overriding limit.'),
'current_leave_state': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Status", type="selection",
selection=[('draft', 'New'), ('confirm', 'Waiting Approval'), ('refuse', 'Refused'),
('validate1', 'Waiting Second Approval'), ('validate', 'Approved'), ('cancel', 'Cancelled')]),
'current_leave_id': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Type",type='many2one', relation='hr.holidays.status'),
'leave_date_from': fields.function(_get_leave_status, multi='leave_status', type='date', string='From Date'),
'leave_date_to': fields.function(_get_leave_status, multi='leave_status', type='date', string='To Date'),
'leaves_count': fields.function(_leaves_count, type='integer', string='Leaves'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
UXE/local-edx | common/lib/xmodule/xmodule/randomize_module.py | 54 | 3690 | import logging
import random
from xmodule.x_module import XModule, STUDENT_VIEW
from xmodule.seq_module import SequenceDescriptor
from lxml import etree
from xblock.fields import Scope, Integer
from xblock.fragment import Fragment
log = logging.getLogger('edx.' + __name__)
class RandomizeFields(object):
choice = Integer(help="Which random child was chosen", scope=Scope.user_state)
class RandomizeModule(RandomizeFields, XModule):
"""
Chooses a random child module. Chooses the same one every time for each student.
Example:
<randomize>
<problem url_name="problem1" />
<problem url_name="problem2" />
<problem url_name="problem3" />
</randomize>
User notes:
- If you're randomizing amongst graded modules, each of them MUST be worth the same
number of points. Otherwise, the earth will be overrun by monsters from the
deeps. You have been warned.
Technical notes:
- There is more dark magic in this code than I'd like. The whole varying-children +
grading interaction is a tangle between super and subclasses of descriptors and
modules.
"""
def __init__(self, *args, **kwargs):
super(RandomizeModule, self).__init__(*args, **kwargs)
# NOTE: calling self.get_children() creates a circular reference--
# it calls get_child_descriptors() internally, but that doesn't work until
# we've picked a choice
num_choices = len(self.descriptor.get_children())
if self.choice > num_choices:
# Oops. Children changed. Reset.
self.choice = None
if self.choice is None:
# choose one based on the system seed, or randomly if that's not available
if num_choices > 0:
if self.system.seed is not None:
self.choice = self.system.seed % num_choices
else:
self.choice = random.randrange(0, num_choices)
if self.choice is not None:
self.child_descriptor = self.descriptor.get_children()[self.choice]
# Now get_children() should return a list with one element
log.debug("children of randomize module (should be only 1): %s",
self.get_children())
self.child = self.get_children()[0]
else:
self.child_descriptor = None
self.child = None
def get_child_descriptors(self):
"""
For grading--return just the chosen child.
"""
if self.child_descriptor is None:
return []
return [self.child_descriptor]
def student_view(self, context):
if self.child is None:
# raise error instead? In fact, could complain on descriptor load...
return Fragment(content=u"<div>Nothing to randomize between</div>")
return self.child.render(STUDENT_VIEW, context)
def get_icon_class(self):
return self.child.get_icon_class() if self.child else 'other'
class RandomizeDescriptor(RandomizeFields, SequenceDescriptor):
# the editing interface can be the same as for sequences -- just a container
module_class = RandomizeModule
filename_extension = "xml"
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('randomize')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
def has_dynamic_children(self):
"""
Grading needs to know that only one of the children is actually "real". This
makes it use module.get_child_descriptors().
"""
return True
| agpl-3.0 |
danilito19/django | tests/model_meta/test_legacy.py | 199 | 7556 | import warnings
from django import test
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import CharField, related
from django.utils.deprecation import RemovedInDjango110Warning
from .models import BasePerson, Person
from .results import TEST_RESULTS
class OptionsBaseTests(test.SimpleTestCase):
def _map_related_query_names(self, res):
return tuple((o.field.related_query_name(), m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
class M2MTests(OptionsBaseTests):
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
models = [model for field, model in model._meta.get_m2m_with_model()]
self.assertEqual([RemovedInDjango110Warning], [w.message.__class__ for w in warning])
self.assertEqual(models, expected_result)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class RelatedObjectsTests(OptionsBaseTests):
key_name = lambda self, r: r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model()
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(local_only=True)
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_hidden=True, local_only=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_proxy(self):
result_key = 'get_all_related_objects_with_model_proxy_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True)
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_proxy_hidden(self):
result_key = 'get_all_related_objects_with_model_proxy_hidden_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True, include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class RelatedM2MTests(OptionsBaseTests):
def test_related_m2m_with_model(self):
result_key = 'get_all_related_many_to_many_with_model_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_m2m_objects_with_model()
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_m2m_local_only(self):
result_key = 'get_all_related_many_to_many_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_many_to_many_objects(local_only=True)
self.assertEqual([o.field.related_query_name() for o in objects], expected)
def test_related_m2m_asymmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('following_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertTrue('followers_base' in [o.field.related_query_name() for o in related_m2m])
def test_related_m2m_symmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('friends_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertIn('friends_inherited_rel_+', [o.field.related_query_name() for o in related_m2m])
@test.ignore_warnings(category=RemovedInDjango110Warning)
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = Person._meta.get_field_by_name('data_abstract')
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = Person._meta.get_field_by_name('m2m_base')
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = Person._meta.get_field_by_name('relating_baseperson')
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertTrue(field_info[0].auto_created)
def test_get_related_m2m(self):
field_info = Person._meta.get_field_by_name('relating_people')
self.assertEqual(field_info[1:], (None, False, True))
self.assertTrue(field_info[0].auto_created)
def test_get_generic_relation(self):
field_info = Person._meta.get_field_by_name('generic_relation_base')
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
def test_get_m2m_field_invalid(self):
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
self.assertRaises(
FieldDoesNotExist,
Person._meta.get_field,
**{'field_name': 'm2m_base', 'many_to_many': False}
)
self.assertEqual(Person._meta.get_field('m2m_base', many_to_many=True).name, 'm2m_base')
# 2 RemovedInDjango110Warning messages should be raised, one for each call of get_field()
# with the 'many_to_many' argument.
self.assertEqual(
[RemovedInDjango110Warning, RemovedInDjango110Warning],
[w.message.__class__ for w in warning]
)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class GetAllFieldNamesTestCase(OptionsBaseTests):
def test_get_all_field_names(self):
for model, expected_names in TEST_RESULTS['get_all_field_names'].items():
objects = model._meta.get_all_field_names()
self.assertEqual(sorted(map(str, objects)), sorted(expected_names))
| bsd-3-clause |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/francetv.py | 1 | 15999 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
clean_html,
determine_ext,
ExtractorError,
int_or_none,
parse_duration,
try_get,
url_or_none,
)
from .dailymotion import DailymotionIE
class FranceTVBaseInfoExtractor(InfoExtractor):
def _make_url_result(self, video_or_full_id, catalog=None):
full_id = 'francetv:%s' % video_or_full_id
if '@' not in video_or_full_id and catalog:
full_id += '@%s' % catalog
return self.url_result(
full_id, ie=FranceTVIE.ie_key(),
video_id=video_or_full_id.split('@')[0])
class FranceTVIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
sivideo\.webservices\.francetelevisions\.fr/tools/getInfosOeuvre/v2/\?
.*?\bidDiffusion=[^&]+|
(?:
https?://videos\.francetv\.fr/video/|
francetv:
)
(?P<id>[^@]+)(?:@(?P<catalog>.+))?
)
'''
_TESTS = [{
# without catalog
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=162311093&callback=_jsonp_loader_callback_request_0',
'md5': 'c2248a8de38c4e65ea8fae7b5df2d84f',
'info_dict': {
'id': '162311093',
'ext': 'mp4',
'title': '13h15, le dimanche... - Les mystères de Jésus',
'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42',
'timestamp': 1502623500,
'upload_date': '20170813',
},
}, {
# with catalog
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=NI_1004933&catalogue=Zouzous&callback=_jsonp_loader_callback_request_4',
'only_matching': True,
}, {
'url': 'http://videos.francetv.fr/video/NI_657393@Regions',
'only_matching': True,
}, {
'url': 'francetv:162311093',
'only_matching': True,
}, {
'url': 'francetv:NI_1004933@Zouzous',
'only_matching': True,
}, {
'url': 'francetv:NI_983319@Info-web',
'only_matching': True,
}, {
'url': 'francetv:NI_983319',
'only_matching': True,
}, {
'url': 'francetv:NI_657393@Regions',
'only_matching': True,
}, {
# france-3 live
'url': 'francetv:SIM_France3',
'only_matching': True,
}]
def _extract_video(self, video_id, catalogue=None):
# Videos are identified by idDiffusion so catalogue part is optional.
# However when provided, some extra formats may be returned so we pass
# it if available.
info = self._download_json(
'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/',
video_id, 'Downloading video JSON', query={
'idDiffusion': video_id,
'catalogue': catalogue or '',
})
if info.get('status') == 'NOK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, info['message']),
expected=True)
allowed_countries = info['videos'][0].get('geoblocage')
if allowed_countries:
georestricted = True
geo_info = self._download_json(
'http://geo.francetv.fr/ws/edgescape.json', video_id,
'Downloading geo restriction info')
country = geo_info['reponse']['geo_info']['country_code']
if country not in allowed_countries:
raise ExtractorError(
'The video is not available from your location',
expected=True)
else:
georestricted = False
def sign(manifest_url, manifest_id):
for host in ('hdfauthftv-a.akamaihd.net', 'hdfauth.francetv.fr'):
signed_url = url_or_none(self._download_webpage(
'https://%s/esi/TA' % host, video_id,
'Downloading signed %s manifest URL' % manifest_id,
fatal=False, query={
'url': manifest_url,
}))
if signed_url:
return signed_url
return manifest_url
is_live = None
formats = []
for video in info['videos']:
if video['statut'] != 'ONLINE':
continue
video_url = video['url']
if not video_url:
continue
if is_live is None:
is_live = (try_get(
video, lambda x: x['plages_ouverture'][0]['direct'],
bool) is True) or '/live.francetv.fr/' in video_url
format_id = video['format']
ext = determine_ext(video_url)
if ext == 'f4m':
if georestricted:
# See https://github.com/rg3/youtube-dl/issues/3963
# m3u8 urls work fine
continue
formats.extend(self._extract_f4m_formats(
sign(video_url, format_id) + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44',
video_id, f4m_id=format_id, fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
sign(video_url, format_id), video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id,
fatal=False))
elif video_url.startswith('rtmp'):
formats.append({
'url': video_url,
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
})
else:
if self._is_valid_url(video_url, video_id, format_id):
formats.append({
'url': video_url,
'format_id': format_id,
})
self._sort_formats(formats)
title = info['titre']
subtitle = info.get('sous_titre')
if subtitle:
title += ' - %s' % subtitle
title = title.strip()
subtitles = {}
subtitles_list = [{
'url': subformat['url'],
'ext': subformat.get('format'),
} for subformat in info.get('subtitles', []) if subformat.get('url')]
if subtitles_list:
subtitles['fr'] = subtitles_list
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'is_live': is_live,
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
catalog = mobj.group('catalog')
if not video_id:
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = qs.get('idDiffusion', [None])[0]
catalog = qs.get('catalogue', [None])[0]
if not video_id:
raise ExtractorError('Invalid URL', expected=True)
return self._extract_video(video_id, catalog)
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://(?:(?:www\.)?france\.tv|mobile\.france\.tv)/(?:[^/]+/)*(?P<id>[^/]+)\.html'
_TESTS = [{
'url': 'https://www.france.tv/france-2/13h15-le-dimanche/140921-les-mysteres-de-jesus.html',
'info_dict': {
'id': '162311093',
'ext': 'mp4',
'title': '13h15, le dimanche... - Les mystères de Jésus',
'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42',
'timestamp': 1502623500,
'upload_date': '20170813',
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}, {
# france3
'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html',
'only_matching': True,
}, {
# france4
'url': 'https://www.france.tv/france-4/hero-corp/saison-1/134151-apres-le-calme.html',
'only_matching': True,
}, {
# france5
'url': 'https://www.france.tv/france-5/c-a-dire/saison-10/137013-c-a-dire.html',
'only_matching': True,
}, {
# franceo
'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html',
'only_matching': True,
}, {
# france2 live
'url': 'https://www.france.tv/france-2/direct.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/jeux-et-divertissements/divertissements/133965-le-web-contre-attaque.html',
'only_matching': True,
}, {
'url': 'https://mobile.france.tv/france-5/c-dans-l-air/137347-emission-du-vendredi-12-mai-2017.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/142749-rouge-sang.html',
'only_matching': True,
}, {
# france-3 live
'url': 'https://www.france.tv/france-3/direct.html',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
catalogue = None
video_id = self._search_regex(
r'data-main-video=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'video id', default=None, group='id')
if not video_id:
video_id, catalogue = self._html_search_regex(
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVEmbedIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://embed\.francetv\.fr/*\?.*?\bue=(?P<id>[^&]+)'
_TESTS = [{
'url': 'http://embed.francetv.fr/?ue=7fd581a2ccf59d2fc5719c5c13cf6961',
'info_dict': {
'id': 'NI_983319',
'ext': 'mp4',
'title': 'Le Pen Reims',
'upload_date': '20170505',
'timestamp': 1493981780,
'duration': 16,
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://api-embed.webservices.francetelevisions.fr/key/%s' % video_id,
video_id)
return self._make_url_result(video['video_id'], video.get('catalog'))
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetvinfo.fr'
_VALID_URL = r'https?://(?:www|mobile|france3-regions)\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&.]+)'
_TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'info_dict': {
'id': '84981923',
'ext': 'mp4',
'title': 'Soir 3',
'upload_date': '20130826',
'timestamp': 1377548400,
'subtitles': {
'fr': 'mincount:2',
},
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
'only_matching': True,
}, {
'url': 'http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html',
'only_matching': True,
}, {
'url': 'http://france3-regions.francetvinfo.fr/bretagne/cotes-d-armor/thalassa-echappee-breizh-ce-venredi-dans-les-cotes-d-armor-954961.html',
'only_matching': True,
}, {
# Dailymotion embed
'url': 'http://www.francetvinfo.fr/politique/notre-dame-des-landes/video-sur-france-inter-cecile-duflot-denonce-le-regard-meprisant-de-patrick-cohen_1520091.html',
'md5': 'ee7f1828f25a648addc90cb2687b1f12',
'info_dict': {
'id': 'x4iiko0',
'ext': 'mp4',
'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen',
'description': 'Au lendemain de la victoire du "oui" au référendum sur l\'aéroport de Notre-Dame-des-Landes, l\'ancienne ministre écologiste est l\'invitée de Patrick Cohen. Plus d\'info : https://www.franceinter.fr/emissions/le-7-9/le-7-9-27-juin-2016',
'timestamp': 1467011958,
'upload_date': '20160627',
'uploader': 'France Inter',
'uploader_id': 'x2q2ez',
},
'add_ie': ['Dailymotion'],
}, {
'url': 'http://france3-regions.francetvinfo.fr/limousin/emissions/jt-1213-limousin',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
dailymotion_urls = DailymotionIE._extract_urls(webpage)
if dailymotion_urls:
return self.playlist_result([
self.url_result(dailymotion_url, DailymotionIE.ie_key())
for dailymotion_url in dailymotion_urls])
video_id, catalogue = self._search_regex(
(r'id-video=([^@]+@[^"]+)',
r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"'),
webpage, 'video id').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVInfoSportIE(FranceTVBaseInfoExtractor):
IE_NAME = 'sport.francetvinfo.fr'
_VALID_URL = r'https?://sport\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://sport.francetvinfo.fr/les-jeux-olympiques/retour-sur-les-meilleurs-moments-de-pyeongchang-2018',
'info_dict': {
'id': '6e49080e-3f45-11e8-b459-000d3a2439ea',
'ext': 'mp4',
'title': 'Retour sur les meilleurs moments de Pyeongchang 2018',
'timestamp': 1523639962,
'upload_date': '20180413',
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(r'data-video="([^"]+)"', webpage, 'video_id')
return self._make_url_result(video_id, 'Sport-web')
class GenerationWhatIE(InfoExtractor):
IE_NAME = 'france2.fr:generation-what'
_VALID_URL = r'https?://generation-what\.francetv\.fr/[^/]+/video/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://generation-what.francetv.fr/portrait/video/present-arms',
'info_dict': {
'id': 'wtvKYUG45iw',
'ext': 'mp4',
'title': 'Generation What - Garde à vous - FRA',
'uploader': 'Generation What',
'uploader_id': 'UCHH9p1eetWCgt4kXBYCb3_w',
'upload_date': '20160411',
},
'params': {
'skip_download': True,
},
'add_ie': ['Youtube'],
}, {
'url': 'http://generation-what.francetv.fr/europe/video/present-arms',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
youtube_id = self._search_regex(
r"window\.videoURL\s*=\s*'([0-9A-Za-z_-]{11})';",
webpage, 'youtube id')
return self.url_result(youtube_id, ie='Youtube', video_id=youtube_id)
class CultureboxIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://culturebox.francetvinfo.fr/opera-classique/musique-classique/c-est-baroque/concerts/cantates-bwv-4-106-et-131-de-bach-par-raphael-pichon-57-268689',
'info_dict': {
'id': 'EV_134885',
'ext': 'mp4',
'title': 'Cantates BWV 4, 106 et 131 de Bach par Raphaël Pichon 5/7',
'description': 'md5:19c44af004b88219f4daa50fa9a351d4',
'upload_date': '20180206',
'timestamp': 1517945220,
'duration': 5981,
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
if ">Ce live n'est plus disponible en replay<" in webpage:
raise ExtractorError(
'Video %s is not available' % display_id, expected=True)
video_id, catalogue = self._search_regex(
r'["\'>]https?://videos\.francetv\.fr/video/([^@]+@.+?)["\'<]',
webpage, 'video id').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVJeunesseIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?(?:zouzous|ludo)\.fr/heros/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://www.zouzous.fr/heros/simon',
'info_dict': {
'id': 'simon',
},
'playlist_count': 9,
}, {
'url': 'https://www.ludo.fr/heros/ninjago',
'info_dict': {
'id': 'ninjago',
},
'playlist_count': 10,
}, {
'url': 'https://www.zouzous.fr/heros/simon?abc',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
playlist = self._download_json(
'%s/%s' % (mobj.group('url'), 'playlist'), playlist_id)
if not playlist.get('count'):
raise ExtractorError(
'%s is not available' % playlist_id, expected=True)
entries = []
for item in playlist['items']:
identity = item.get('identity')
if identity and isinstance(identity, compat_str):
entries.append(self._make_url_result(identity))
return self.playlist_result(entries, playlist_id)
| gpl-3.0 |
kevinlondon/glances | glances/core/glances_snmp.py | 12 | 4873 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
# Import Glances libs
from glances.core.glances_logging import logger
# Import mandatory PySNMP lib
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
except ImportError:
logger.critical("PySNMP library not found. To install it: pip install pysnmp")
sys.exit(2)
class GlancesSNMPClient(object):
"""SNMP client class (based on pysnmp library)."""
def __init__(self, host='localhost', port=161, version='2c',
community='public', user='private', auth=''):
super(GlancesSNMPClient, self).__init__()
self.cmdGen = cmdgen.CommandGenerator()
self.version = version
self.host = host
self.port = port
self.community = community
self.user = user
self.auth = auth
def __buid_result(self, varBinds):
"""Build the results."""
ret = {}
for name, val in varBinds:
if str(val) == '':
ret[name.prettyPrint()] = ''
else:
ret[name.prettyPrint()] = val.prettyPrint()
# In Python 3, prettyPrint() return 'b'linux'' instead of 'linux'
if ret[name.prettyPrint()].startswith('b\''):
ret[name.prettyPrint()] = ret[name.prettyPrint()][2:-1]
return ret
def __get_result__(self, errorIndication, errorStatus, errorIndex, varBinds):
"""Put results in table."""
ret = {}
if not errorIndication or not errorStatus:
ret = self.__buid_result(varBinds)
return ret
def get_by_oid(self, *oid):
"""SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
"""
if self.version == '3':
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
else:
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
return self.__get_result__(errorIndication, errorStatus, errorIndex, varBinds)
def __bulk_result__(self, errorIndication, errorStatus, errorIndex, varBindTable):
ret = []
if not errorIndication or not errorStatus:
for varBindTableRow in varBindTable:
ret.append(self.__buid_result(varBindTableRow))
return ret
def getbulk_by_oid(self, non_repeaters, max_repetitions, *oid):
"""SNMP getbulk request.
In contrast to snmpwalk, this information will typically be gathered in
a single transaction with the agent, rather than one transaction per
variable found.
* non_repeaters: This specifies the number of supplied variables that
should not be iterated over.
* max_repetitions: This specifies the maximum number of iterations over
the repeating variables.
* oid: oid list
> Return a list of dicts
"""
if self.version.startswith('3'):
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
if self.version.startswith('2'):
errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
else:
# Bulk request are not available with SNMP version 1
return []
return self.__bulk_result__(errorIndication, errorStatus, errorIndex, varBindTable)
| lgpl-3.0 |
fureszpeter/a2billing | CallBack/callback-daemon-py/build/lib/callback_daemon/manager.py | 14 | 19498 | #!/usr/bin/env python
# vim: set expandtab shiftwidth=4:
"""
Python Interface for Asterisk Manager
This module provides a Python API for interfacing with the asterisk manager.
import asterisk.manager
import sys
def handle_shutdown(event, manager):
print "Recieved shutdown event"
manager.close()
# we could analize the event and reconnect here
def handle_event(event, manager):
print "Recieved event: %s" % event.name
manager = asterisk.manager.Manager()
try:
# connect to the manager
try:
manager.connect('host')
manager.login('user', 'secret')
# register some callbacks
manager.register_event('Shutdown', handle_shutdown) # shutdown
manager.register_event('*', handle_event) # catch all
# get a status report
response = manager.status()
manager.logoff()
except asterisk.manager.ManagerSocketException, (errno, reason):
print "Error connecting to the manager: %s" % reason
sys.exit(1)
except asterisk.manager.ManagerAuthException, reason:
print "Error logging in to the manager: %s" % reason
sys.exit(1)
except asterisk.manager.ManagerException, reason:
print "Error: %s" % reason
sys.exit(1)
finally:
# remember to clean up
manager.close()
Remember all header, response, and event names are case sensitive.
Not all manager actions are implmented as of yet, feel free to add them
and submit patches.
"""
import sys,os
import socket
import threading
import Queue
import re
from cStringIO import StringIO
from types import *
from time import sleep
EOL = '\r\n'
class ManagerMsg(object):
"""A manager interface message"""
def __init__(self, response):
self.response = response # the raw response, straight from the horse's mouth
self.data = ''
self.headers = {}
# parse the response
self.parse(response)
if not self.headers:
# Bad app not returning any headers. Let's fake it
# this could also be the inital greeting
self.headers['Response'] = 'Generated Header'
# 'Response:'
def parse(self, response):
"""Parse a manager message"""
response.seek(0)
data = []
# read the response line by line
for line in response.readlines():
line = line.rstrip() # strip trailing whitespace
if not line: continue # don't process if this is not a message
# locate the ':' in our message, if there is one
if line.find(':') > -1:
item = [x.strip() for x in line.split(':',1)]
# if this is a header
if len(item) == 2:
# store the header
self.headers[item[0]] = item[1]
# otherwise it is just plain data
else:
data.append(line)
# if there was no ':', then we have data
else:
data.append(line)
# store the data
self.data = '%s\n' % '\n'.join(data)
def has_header(self, hname):
"""Check for a header"""
return self.headers.has_key(hname)
def get_header(self, hname):
"""Return the specfied header"""
return self.headers[hname]
def __getitem__(self, hname):
"""Return the specfied header"""
return self.headers[hname]
def __repr__(self):
return self.headers['Response']
class Event(object):
"""Manager interface Events, __init__ expects and 'Event' message"""
def __init__(self, message):
# store all of the event data
self.message = message
self.data = message.data
self.headers = message.headers
# if this is not an event message we have a problem
if not message.has_header('Event'):
raise ManagerException('Trying to create event from non event message')
# get the event name
self.name = message.get_header('Event')
def has_header(self, hname):
"""Check for a header"""
return self.headers.has_key(hname)
def get_header(self, hname):
"""Return the specfied header"""
return self.headers[hname]
def __getitem__(self, hname):
"""Return the specfied header"""
return self.headers[hname]
def __repr__(self):
return self.headers['Event']
def get_action_id(self):
return self.headers.get('ActionID',0000)
class Manager(object):
def __init__(self):
self._sock = None # our socket
self._connected = threading.Event()
self._running = threading.Event()
# our hostname
self.hostname = socket.gethostname()
# our queues
self._message_queue = Queue.Queue()
self._response_queue = Queue.Queue()
self._event_queue = Queue.Queue()
# callbacks for events
self._event_callbacks = {}
self._reswaiting = [] # who is waiting for a response
# sequence stuff
self._seqlock = threading.Lock()
self._seq = 0
# some threads
self.message_thread = threading.Thread(target=self.message_loop)
self.event_dispatch_thread = threading.Thread(target=self.event_dispatch)
self.message_thread.setDaemon(True)
self.event_dispatch_thread.setDaemon(True)
def __del__(self):
self.close()
def connected(self):
"""
Check if we are connected or not.
"""
return self._connected.isSet()
def next_seq(self):
"""Return the next number in the sequence, this is used for ActionID"""
self._seqlock.acquire()
try:
return self._seq
finally:
self._seq += 1
self._seqlock.release()
def send_action(self, cdict={}, **kwargs):
"""
Send a command to the manager
If a list is passed to the cdict argument, each item in the list will
be sent to asterisk under the same header in the following manner:
cdict = {"Action": "Originate",
"Variable": ["var1=value", "var2=value"]}
send_action(cdict)
...
Action: Originate
Variable: var1=value
Variable: var2=value
"""
if not self._connected.isSet():
raise ManagerException("Not connected")
# fill in our args
cdict.update(kwargs)
# set the action id
if not cdict.has_key('ActionID'): cdict['ActionID'] = '%s-%08x' % (self.hostname, self.next_seq())
clist = []
# generate the command
for key, value in cdict.items():
if isinstance(value, list):
for item in value:
item = tuple([key, item])
clist.append('%s: %s' % item)
else:
item = tuple([key, value])
clist.append('%s: %s' % item)
clist.append(EOL)
command = EOL.join(clist)
# lock the socket and send our command
try:
self._sock.sendall(command)
except socket.error, (errno, reason):
raise ManagerSocketException(errno, reason)
self._reswaiting.insert(0,1)
response = self._response_queue.get()
self._reswaiting.pop(0)
if not response:
raise ManagerSocketException(0, 'Connection Terminated')
return response
def _receive_data(self):
"""
Read the response from a command.
"""
# loop while we are sill running and connected
while self._running.isSet() and self._connected.isSet():
lines = []
try:
try:
# if there is data to be read
# read a message
while self._connected.isSet():
line = []
# read a line, one char at a time
while self._connected.isSet():
c = self._sock.recv(1)
if not c: # the other end closed the connection
self._sock.close()
self._connected.clear()
break
line.append(c) # append the character to our line
# is this the end of a line?
if c == '\n':
line = ''.join(line)
break
# if we are no longer connected we probably did not
# recieve a full message, don't try to handle it
if not self._connected.isSet(): break
# make sure our line is a string
assert type(line) in StringTypes
lines.append(line) # add the line to our message
# if the line is our EOL marker we have a complete message
if line == EOL:
break
# check to see if this is the greeting line
if line.find('/') >= 0 and line.find(':') < 0:
self.title = line.split('/')[0].strip() # store the title of the manager we are connecting to
self.version = line.split('/')[1].strip() # store the version of the manager we are connecting to
break
#sleep(.001) # waste some time before reading another line
except socket.error:
self._sock.close()
self._connected.clear()
break
finally:
# if we have a message append it to our queue
if lines and self._connected.isSet():
self._message_queue.put(StringIO(''.join(lines)))
else:
self._message_queue.put(None)
def register_event(self, event, function):
"""
Register a callback for the specfied event.
If a callback function returns True, no more callbacks for that
event will be executed.
"""
# get the current value, or an empty list
# then add our new callback
current_callbacks = self._event_callbacks.get(event, [])
current_callbacks.append(function)
self._event_callbacks[event] = current_callbacks
def unregister_event(self, event, function):
"""
Unregister a callback for the specified event.
"""
current_callbacks = self._event_callbacks.get(event, [])
current_callbacks.remove(function)
self._event_callbacks[event] = current_callbacks
def message_loop(self):
"""
The method for the event thread.
This actually recieves all types of messages and places them
in the proper queues.
"""
# start a thread to recieve data
t = threading.Thread(target=self._receive_data)
t.setDaemon(True)
t.start()
try:
# loop getting messages from the queue
while self._running.isSet():
# get/wait for messages
data = self._message_queue.get()
# if we got None as our message we are done
if not data:
# notify the other queues
self._event_queue.put(None)
for waiter in self._reswaiting:
self._response_queue.put(None)
break
# parse the data
message = ManagerMsg(data)
# check if this is an event message
if message.has_header('Event'):
self._event_queue.put(Event(message))
# check if this is a response
elif message.has_header('Response'):
self._response_queue.put(message)
# this is an unknown message
else:
print 'No clue what we got\n%s' % message.data
finally:
# wait for our data receiving thread to exit
t.join()
def event_dispatch(self):
"""This thread is responsible fore dispatching events"""
# loop dispatching events
while self._running.isSet():
# get/wait for an event
ev = self._event_queue.get()
# if we got None as an event, we are finished
if not ev:
break
# dispatch our events
# first build a list of the functions to execute
callbacks = self._event_callbacks.get(ev.name, [])
callbacks.extend(self._event_callbacks.get('*', []))
# now execute the functions
for callback in callbacks:
if callback(ev, self):
break
def connect(self, host, port=5038):
"""Connect to the manager interface"""
if self._connected.isSet():
raise ManagerException('Already connected to manager')
# make sure host is a string
assert type(host) in StringTypes
port = int(port) # make sure port is an int
# create our socket and connect
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((host,port))
except socket.error, (errno, reason):
raise ManagerSocketException(errno, reason)
# we are connected and running
self._connected.set()
self._running.set()
# start the event thread
self.message_thread.start()
# start the event dispatching thread
self.event_dispatch_thread.start()
# get our initial connection response
return self._response_queue.get()
def close(self):
"""Shutdown the connection to the manager"""
# if we are still running, logout
if self._running.isSet() and self._connected.isSet():
self.logoff()
if self._running.isSet():
# put None in the message_queue to kill our threads
self._message_queue.put(None)
# wait for the event thread to exit
self.message_thread.join()
# make sure we do not join our self (when close is called from event handlers)
if threading.currentThread() != self.event_dispatch_thread:
# wait for the dispatch thread to exit
self.event_dispatch_thread.join()
self._running.clear()
def login(self, username, secret):
"""Login to the manager, throws ManagerAuthException when login fails"""
cdict = {'Action':'Login'}
cdict['Username'] = username
cdict['Secret'] = secret
response = self.send_action(cdict)
if response.get_header('Response') == 'Error':
raise ManagerAuthException(response.get_header('Message'))
return response
def ping(self):
"""Send a ping action to the manager"""
cdict = {'Action':'Ping'}
response = self.send_action(cdict)
return response
def logoff(self):
"""Logoff from the manager"""
cdict = {'Action':'Logoff'}
response = self.send_action(cdict)
# Clear connection
self._sock.close()
self._connected.clear()
return response
def hangup(self, channel):
"""Hanup the specfied channel"""
cdict = {'Action':'Hangup'}
cdict['Channel'] = channel
response = self.send_action(cdict)
return response
def status(self, channel = ''):
"""Get a status message from asterisk"""
cdict = {'Action':'Status'}
cdict['Channel'] = channel
response = self.send_action(cdict)
return response
def redirect(self, channel, exten, priority='1', extra_channel='', context=''):
"""Redirect a channel"""
cdict = {'Action':'Redirect'}
cdict['Channel'] = channel
cdict['Exten'] = exten
cdict['Priority'] = priority
if context: cdict['Context'] = context
if extra_channel: cdict['ExtraChannel'] = extra_channel
response = self.send_action(cdict)
return response
def originate(self, channel, exten, context='', priority='', timeout='', caller_id='', async=False, account='', application='', data='', variables={}, ActionID=''):
"""Originate a call"""
cdict = {'Action':'Originate'}
cdict['Channel'] = channel
cdict['Exten'] = exten
if context: cdict['Context'] = context
if priority: cdict['Priority'] = priority
if timeout: cdict['Timeout'] = timeout
if caller_id: cdict['CallerID'] = caller_id
if async: cdict['Async'] = 'yes'
if account: cdict['Account'] = account
if application: cdict['Application'] = application
if data: cdict['Data'] = data
if ActionID: cdict['ActionID'] = ActionID
# join dict of vairables together in a string in the form of 'key=val|key=val'
# with the latest CVS HEAD this is no longer necessary
# if variables: cdict['Variable'] = '|'.join(['='.join((str(key), str(value))) for key, value in variables.items()])
#if variables: cdict['Variable'] = ['='.join((str(key), str(value))) for key, value in variables.items()]
if variables: cdict['Variable'] = variables
response = self.send_action(cdict)
return response
def mailbox_status(self, mailbox):
"""Get the status of the specfied mailbox"""
cdict = {'Action':'MailboxStatus'}
cdict['Mailbox'] = mailbox
response = self.send_action(cdict)
return response
def command(self, command):
"""Execute a command"""
cdict = {'Action':'Command'}
cdict['Command'] = command
response = self.send_action(cdict)
return response
def extension_state(self, exten, context):
"""Get the state of an extension"""
cdict = {'Action':'ExtensionState'}
cdict['Exten'] = exten
cdict['Context'] = context
response = self.send_action(cdict)
return response
def absolute_timeout(self, channel, timeout):
"""Set an absolute timeout on a channel"""
cdict = {'Action':'AbsoluteTimeout'}
cdict['Channel'] = channel
cdict['Timeout'] = timeout
response = self.send_action(cdict)
return response
def mailbox_count(self, mailbox):
cdict = {'Action':'MailboxCount'}
cdict['Mailbox'] = mailbox
response = self.send_action(cdict)
return response
class ManagerException(Exception): pass
class ManagerSocketException(ManagerException): pass
class ManagerAuthException(ManagerException): pass
| agpl-3.0 |
louiskun/flaskGIT | venv/lib/python2.7/site-packages/wheel/test/test_ranking.py | 565 | 1496 | import unittest
from wheel.pep425tags import get_supported
from wheel.install import WheelFile
WHEELPAT = "%(name)s-%(ver)s-%(pyver)s-%(abi)s-%(arch)s.whl"
def make_wheel(name, ver, pyver, abi, arch):
name = WHEELPAT % dict(name=name, ver=ver, pyver=pyver, abi=abi,
arch=arch)
return WheelFile(name)
# This relies on the fact that generate_supported will always return the
# exact pyver, abi, and architecture for its first (best) match.
sup = get_supported()
pyver, abi, arch = sup[0]
genver = 'py' + pyver[2:]
majver = genver[:3]
COMBINATIONS = (
('bar', '0.9', 'py2.py3', 'none', 'any'),
('bar', '0.9', majver, 'none', 'any'),
('bar', '0.9', genver, 'none', 'any'),
('bar', '0.9', pyver, abi, arch),
('bar', '1.3.2', majver, 'none', 'any'),
('bar', '3.1', genver, 'none', 'any'),
('bar', '3.1', pyver, abi, arch),
('foo', '1.0', majver, 'none', 'any'),
('foo', '1.1', pyver, abi, arch),
('foo', '2.1', majver + '0', 'none', 'any'),
# This will not be compatible for Python x.0. Beware when we hit Python
# 4.0, and don't test with 3.0!!!
('foo', '2.1', majver + '1', 'none', 'any'),
('foo', '2.1', pyver , 'none', 'any'),
('foo', '2.1', pyver , abi, arch),
)
WHEELS = [ make_wheel(*args) for args in COMBINATIONS ]
class TestRanking(unittest.TestCase):
def test_comparison(self):
for i in range(len(WHEELS)-1):
for j in range(i):
self.assertTrue(WHEELS[j]<WHEELS[i])
| mit |
hifly/Pentaho-reports-for-OpenERP | openerp_addon/pentaho_reports/java_oe.py | 13 | 5128 | # -*- encoding: utf-8 -*-
from datetime import datetime
TYPE_STRING = 'str'
TYPE_BOOLEAN = 'bool'
TYPE_INTEGER = 'int'
TYPE_NUMBER = 'num'
TYPE_DATE = 'date'
TYPE_TIME = 'dtm'
OPENERP_DATA_TYPES = [(TYPE_STRING, 'String'),
(TYPE_BOOLEAN, 'Boolean'),
(TYPE_INTEGER, 'Integer'),
(TYPE_NUMBER, 'Number'),
(TYPE_DATE, 'Date'),
(TYPE_TIME, 'Date Time'),
]
"""
Define mappings as functions, which can be passed the data format to make them conditional.
Lists begin with '[L' and finish with ';', for example '[Ljava.lang.Integer;'
"""
JAVA_MAPPING = {
'java.lang.String': lambda x: TYPE_STRING,
'java.lang.Boolean': lambda x: TYPE_BOOLEAN,
'java.lang.Number': lambda x: TYPE_NUMBER,
'java.util.Date': lambda x: TYPE_DATE if x and not('H' in x) else TYPE_TIME,
'java.sql.Date': lambda x: TYPE_DATE if x and not('H' in x) else TYPE_TIME,
'java.sql.Time': lambda x: TYPE_TIME,
'java.sql.Timestamp': lambda x: TYPE_TIME,
'java.lang.Double': lambda x: TYPE_NUMBER,
'java.lang.Float': lambda x: TYPE_NUMBER,
'java.lang.Integer': lambda x: TYPE_INTEGER,
'java.lang.Long': lambda x: TYPE_INTEGER,
'java.lang.Short': lambda x: TYPE_INTEGER,
'java.math.BigInteger': lambda x: TYPE_INTEGER,
'java.math.BigDecimal': lambda x: TYPE_NUMBER,
}
MAX_PARAMS = 50 # Do not make this bigger than 999
PARAM_XXX_STRING_VALUE = 'param_%03i_string_value'
PARAM_XXX_BOOLEAN_VALUE = 'param_%03i_boolean_value'
PARAM_XXX_INTEGER_VALUE = 'param_%03i_integer_value'
PARAM_XXX_NUMBER_VALUE = 'param_%03i_number_value'
PARAM_XXX_DATE_VALUE = 'param_%03i_date_value'
PARAM_XXX_TIME_VALUE = 'param_%03i_time_value'
PARAM_XXX_2M_VALUE = 'param_%03i_2m_value'
PARAM_VALUES = {
TYPE_STRING: {
'value': PARAM_XXX_STRING_VALUE,
'value_list': PARAM_XXX_2M_VALUE,
'if_false': '',
'py_types': (str, unicode)},
TYPE_BOOLEAN: {
'value': PARAM_XXX_BOOLEAN_VALUE,
'if_false': False,
'py_types': (bool,)},
TYPE_INTEGER: {
'value': PARAM_XXX_INTEGER_VALUE,
'value_list': PARAM_XXX_2M_VALUE,
'if_false': 0,
'py_types': (int, long)},
TYPE_NUMBER: {
'value': PARAM_XXX_NUMBER_VALUE,
'value_list': PARAM_XXX_2M_VALUE,
'if_false': 0.0,
'py_types': (float,),
'convert': lambda x: float(x)},
TYPE_DATE: {
'value': PARAM_XXX_DATE_VALUE,
'if_false': '',
'py_types': (str, unicode),
'convert': lambda x: datetime.strptime(x, '%Y-%m-%d'),
'conv_default': lambda x: datetime.strptime(x.value, '%Y%m%dT%H:%M:%S').strftime('%Y-%m-%d')},
TYPE_TIME: {
'value': PARAM_XXX_TIME_VALUE,
'if_false': '',
'py_types': (str, unicode),
'convert': lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S'),
'conv_default': lambda x: datetime.strptime(x.value, '%Y%m%dT%H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')},
}
def parameter_can_2m(parameters, index):
return PARAM_VALUES[parameters[index]['type']].get('value_list', False) and parameters[index].get('multi_select') or False
def parameter_resolve_column_name(parameters, index):
return parameter_can_2m(parameters, index) and PARAM_VALUES[parameters[index]['type']]['value_list'] % index or PARAM_VALUES[parameters[index]['type']]['value'] % index
# functions here will be passed a dictionary to evaluate reserved values. The dictionary should have:
# 'ids' - object ids in force
# 'uid' - the applicable user
# 'context' - the applicable context
RESERVED_PARAMS = {
'ids': lambda s, cr, uid, d: d.get('ids',[]),
'user_id': lambda s, cr, uid, d: d.get('uid', 0),
'user_name': lambda s, cr, uid, d: d.get('uid') and s.pool.get('res.users').browse(cr, uid, d['uid'], context=d.get('context')).name or '',
'context_lang': lambda s, cr, uid, d: d.get('context', {}).get('lang', ''),
'context_tz': lambda s, cr, uid, d: d.get('context', {}).get('tz', ''),
}
def check_java_list(type):
if type[0:2] == '[L':
return True, type[2:-1]
return False, type
| gpl-2.0 |
mharrys/sudoku | sudoku.py | 1 | 7848 | import fileinput
from dlx import DLX
from numpy import array, unique
from optparse import OptionParser
class SudokuError(Exception):
"""Raised when any error related to Sudoku is found during construction
and validation such as unexpected values or contradictions.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value.encode('string_escape')
class Sudoku(object):
"""Complete all necessary steps to solve a Sudoku challenge using
Dancing Links (DLX) including validating the challenge and building and
validating the possible solution found by DLX.
The expected input is one line of 81 characters where each unknown digit
is represented as a '.' (dot).
"""
def __init__(self, validate, pretty):
self.validate = validate
self.pretty = pretty
def solve(self, line):
"""Return list of solutions from specified line.
Return empty list if no solutions are found and return at most
one solution if validation is enabled or all solutions if validation
is disabled. It is possible for a Sudoku challenge to have more than
one solution but such challenge is concidered to be an invalid.
"""
grid = self.build_challenge(line)
self.validate_challenge(grid)
self.grids = []
dlx = DLX.from_sudoku(grid, self.result)
dlx.run(self.validate)
return self.grids
def build_challenge(self, line):
"""Returns 9x9 numpy array from specified line.
SudokuError is raised if unexpected value is found.
"""
grid = []
for c in line:
if c != '.':
if c < '1' or c > '9':
msg = 'Unexpected value "%s" when building challenge.' % c
raise SudokuError(msg)
grid.append(int(c))
else:
grid.append(0)
return array(grid).reshape(9, 9)
def validate_challenge(self, grid):
"""Search specified grid (9x9 numpy array) for contradictions.
SudokuError is raised if a contradiction is found.
"""
# validate rows
for row in grid:
cells = []
for cell in row:
if cell != 0:
if cell in cells:
msg = 'Row digits are not unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
# validate columns
for column in grid.transpose():
cells = []
for cell in column:
if cell != 0:
if cell in cells:
msg = 'Column digits are not unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
# validate boxes
for i in range(3):
# row slice
rs = i * 3
re = i * 3 + 3
for j in range(3):
# column slice
cs = j * 3
ce = j * 3 + 3
# box slice
box = grid[rs:re, cs:ce]
cells = []
for cell in box.flatten():
if cell != 0:
if cell in cells:
msg = 'Box digits are no unique in challenge.'
raise SudokuError(msg)
else:
cells.append(cell)
def build_solution(self, s):
"""Return 9x9 grid from a solution found by DLX.
"""
rows = []
for k in s:
rows.append(k.ID)
rows.sort()
grid = []
for row in rows:
grid.append(row % 9 + 1)
return array(grid).reshape(9, 9)
def validate_solution(self, grid):
"""Search specified grid (9x9 numpy array) for contradictions.
SudokuError is raised if a contradiction is found.
"""
# validate cells
for cell in grid.flatten():
if cell not in range(1, 10):
msg = 'Cell digit is not between 1 and 9 in solution.'
raise SudokuError(msg)
# validate rows
for row in grid:
if unique(row).size != 9:
msg = 'Row digits are not unique in solution.'
raise SudokuError(msg)
# validate columns
for col in grid.transpose():
if unique(col).size != 9:
msg = 'Column digits are not unique in solution.'
raise SudokuError(msg)
# validate boxes
for i in range(3):
# row slice
rs = i * 3
re = i * 3 + 3
for j in range(3):
# column slice
cs = j * 3
ce = j * 3 + 3
# box slice
box = grid[rs:re, cs:ce]
if unique(box.flatten()).size != 9:
msg = 'Box digits are not unique in solution.'
raise SudokuError(msg)
def result(self, solutions, s):
"""Build, validate and save recieved solution.
SudokuError is raised if validation is enabled and more than one
solution exist or contradiction is found in solution.
"""
grid = self.build_solution(s)
if self.validate:
if solutions > 1:
msg = 'More than one solution exist.'
raise SudokuError(msg)
self.validate_solution(grid)
if self.pretty:
self.grids.append(self.format_pretty(grid))
else:
self.grids.append(self.format_simple(grid))
def format_simple(self, grid):
"""Return solution in the same format as expected input line.
"""
f = ''
for s in grid.ravel():
f += str(s)
return f
def format_pretty(self, grid):
"""Return solution in a more human readable format.
"""
f = '+-------+-------+-------+\n'
for i, s in enumerate(grid):
num = str(s)[1:-1].replace(',', '')
f += '| %s | %s | %s |\n' % (num[0:5], num[6:11], num[12:17])
if (i + 1) % 3 == 0:
f += '+-------+-------+-------+'
if (i + 1) < len(grid):
f += '\n'
return f
def print_error(n, msg):
print('sudoku: Error on line %s: %s' % (n, msg))
def print_solutions(grids):
for grid in grids:
print(grid)
def solve_line(sudoku, line, line_num):
if len(line) < 82 or line[81] != '\n':
print_error(line_num, 'Input line must be exactly 81 chars long.')
else:
grids = []
try:
grids = sudoku.solve(line[:81]) # slice off '\n'
except SudokuError as e:
print_error(line_num, e)
else:
print_solutions(grids)
def solve_line_by_line(options, args):
sudoku = Sudoku(options.validate, options.pretty)
for line in fileinput.input(args):
solve_line(sudoku, line, fileinput.lineno())
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(
'-v',
'--validate',
dest='validate',
help='validate solution (longer search time)',
action='store_true'
)
parser.add_option(
'-p',
'--pretty',
dest='pretty',
help='pretty print solution',
action='store_true'
)
options, args = parser.parse_args()
try:
solve_line_by_line(options, args)
except IOError as e:
print('sudoku: %s' % e)
except (KeyboardInterrupt, SystemExit) as e:
print('')
print('sudoku: Interrupt caught ... exiting')
| gpl-3.0 |
Plain-Andy-legacy/android_external_chromium_org | tools/telemetry/telemetry/value/list_of_scalar_values_unittest.py | 29 | 5972 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import value
from telemetry.page import page_set
from telemetry.value import list_of_scalar_values
from telemetry.value import none_values
class TestBase(unittest.TestCase):
def setUp(self):
self.page_set = page_set.PageSet(file_path=os.path.dirname(__file__))
self.page_set.AddPageWithDefaultRunNavigate("http://www.bar.com/")
self.page_set.AddPageWithDefaultRunNavigate("http://www.baz.com/")
self.page_set.AddPageWithDefaultRunNavigate("http://www.foo.com/")
@property
def pages(self):
return self.page_set.pages
class ValueTest(TestBase):
def testListSamePageMergingWithSamePageConcatenatePolicy(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1,2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[3,4], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2, 3, 4], vM.values)
def testListSamePageMergingWithPickFirstPolicy(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1,2], same_page_merge_policy=value.PICK_FIRST)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[3,4], same_page_merge_policy=value.PICK_FIRST)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.PICK_FIRST, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
def testListDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page1, 'x', 'unit',
[3, 4], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromDifferentPages([v0, v1]))
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2, 3, 4], vM.values)
def testListWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
None, same_page_merge_policy=value.CONCATENATE, none_value_reason='n')
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(None, vM.values)
self.assertEquals(none_values.MERGE_FAILURE_REASON,
vM.none_value_reason)
def testListWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit', None))
def testListWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit', [1, 2],
none_value_reason='n'))
def testAsDict(self):
v = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'unit', [1, 2],
same_page_merge_policy=value.PICK_FIRST, important=False)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'values': [1, 2]
})
def testNoneValueAsDict(self):
v = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'unit', None, same_page_merge_policy=value.PICK_FIRST,
important=False, none_value_reason='n')
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'values': None,
'none_value_reason': 'n'
})
def testFromDictInts(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': [1, 2]
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, [1, 2])
def testFromDictFloats(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': [1.3, 2.7]
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, [1.3, 2.7])
def testFromDictNoneValue(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': None,
'none_value_reason': 'n'
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, None)
self.assertEquals(v.none_value_reason, 'n')
| bsd-3-clause |
defcello/Children-of-Eden-Synth-Server | src/data/webpages/rolandfantomxr/PRA.py | 4 | 7924 | ####################################################################################################
# Copyright 2013 John Crawford
#
# This file is part of PatchCorral.
#
# PatchCorral is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PatchCorral is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PatchCorral. If not, see <http://www.gnu.org/licenses/>.
####################################################################################################
## @file
# Module Information.
# @date 3/10/2013 Created file. -jc
# @author John Crawford
NAME = 'PR-A'
PATCHES = [
('So true...', 87, 64, 0, 'AC.PIANO', 'PR-A 001'),
('ConcertPiano', 87, 64, 1, 'AC.PIANO', 'PR-A 002'),
('Warm Piano', 87, 64, 2, 'AC.PIANO', 'PR-A 003'),
('Warm Pad Pno', 87, 64, 3, 'AC.PIANO', 'PR-A 004'),
('Warm Str Pno', 87, 64, 4, 'AC.PIANO', 'PR-A 005'),
('BealeSt Walk', 87, 64, 5, 'AC.PIANO', 'PR-A 006'),
('Rapsody', 87, 64, 6, 'AC.PIANO', 'PR-A 007'),
('JD-800 Piano', 87, 64, 7, 'AC.PIANO', 'PR-A 008'),
('SA Dance Pno', 87, 64, 8, 'AC.PIANO', 'PR-A 009'),
('FS E-Grand', 87, 64, 9, 'AC.PIANO', 'PR-A 010'),
('FS Blend Pno', 87, 64, 10, 'AC.PIANO', 'PR-A 011'),
('LA Piano', 87, 64, 11, 'AC.PIANO', 'PR-A 012'),
('FS 70\'EP', 87, 64, 12, 'EL.PIANO', 'PR-A 013'),
('StageEP Trem', 87, 64, 13, 'EL.PIANO', 'PR-A 014'),
('Back2the60s', 87, 64, 14, 'EL.PIANO', 'PR-A 015'),
('Tine EP', 87, 64, 15, 'EL.PIANO', 'PR-A 016'),
('LEO EP', 87, 64, 16, 'EL.PIANO', 'PR-A 017'),
('LonesomeRoad', 87, 64, 17, 'EL.PIANO', 'PR-A 018'),
('Age\'n\'Tines', 87, 64, 18, 'EL.PIANO', 'PR-A 019'),
('Brill TremEP', 87, 64, 19, 'EL.PIANO', 'PR-A 020'),
('Crystal EP', 87, 64, 20, 'EL.PIANO', 'PR-A 021'),
('Celestial EP', 87, 64, 21, 'EL.PIANO', 'PR-A 022'),
('Spirit Tines', 87, 64, 22, 'EL.PIANO', 'PR-A 023'),
('Psycho EP', 87, 64, 23, 'EL.PIANO', 'PR-A 024'),
('Mk2 Stg phsr', 87, 64, 24, 'EL.PIANO', 'PR-A 025'),
('SA Stacks', 87, 64, 25, 'EL.PIANO', 'PR-A 026'),
('Backing PhEP', 87, 64, 26, 'EL.PIANO', 'PR-A 027'),
('Balladeer', 87, 64, 27, 'EL.PIANO', 'PR-A 028'),
('Remember', 87, 64, 28, 'EL.PIANO', 'PR-A 029'),
('FS Wurly', 87, 64, 29, 'EL.PIANO', 'PR-A 030'),
('Wurly Trem', 87, 64, 30, 'EL.PIANO', 'PR-A 031'),
('Super Wurly', 87, 64, 31, 'EL.PIANO', 'PR-A 032'),
('Pulse EPno', 87, 64, 32, 'EL.PIANO', 'PR-A 033'),
('Fonky Fonky', 87, 64, 33, 'EL.PIANO', 'PR-A 034'),
('FM EP', 87, 64, 34, 'EL.PIANO', 'PR-A 035'),
('FM-777', 87, 64, 35, 'EL.PIANO', 'PR-A 036'),
('FM EPad', 87, 64, 36, 'EL.PIANO', 'PR-A 037'),
('D6 Clavi', 87, 64, 37, 'KEYBOARDS', 'PR-A 038'),
('Cutter Clavi', 87, 64, 38, 'KEYBOARDS', 'PR-A 039'),
('FS Clavi', 87, 64, 39, 'KEYBOARDS', 'PR-A 040'),
('Funky D', 87, 64, 40, 'KEYBOARDS', 'PR-A 041'),
('Phase Clavi', 87, 64, 41, 'KEYBOARDS', 'PR-A 042'),
('BPF Clavi Ph', 87, 64, 42, 'KEYBOARDS', 'PR-A 043'),
('Pulse Clavi', 87, 64, 43, 'KEYBOARDS', 'PR-A 044'),
('Analog Clavi', 87, 64, 44, 'KEYBOARDS', 'PR-A 045'),
('Reso Clavi', 87, 64, 45, 'KEYBOARDS', 'PR-A 046'),
('Harpsy Clavi', 87, 64, 46, 'KEYBOARDS', 'PR-A 047'),
('FS Harpsi', 87, 64, 47, 'KEYBOARDS', 'PR-A 048'),
('Amadeus', 87, 64, 48, 'KEYBOARDS', 'PR-A 049'),
('FS Celesta', 87, 64, 49, 'KEYBOARDS', 'PR-A 050'),
('FS Glocken', 87, 64, 50, 'BELL', 'PR-A 051'),
('Music Bells', 87, 64, 51, 'BELL', 'PR-A 052'),
('FS Musicbox', 87, 64, 52, 'BELL', 'PR-A 053'),
('MuBox Pad', 87, 64, 53, 'BELL', 'PR-A 054'),
('Kalimbells', 87, 64, 54, 'BELL', 'PR-A 055'),
('Himalaya Ice', 87, 64, 55, 'BELL', 'PR-A 056'),
('Dreaming Box', 87, 64, 56, 'BELL', 'PR-A 057'),
('Step Ice', 87, 64, 57, 'BELL', 'PR-A 058'),
('FS Bell 1', 87, 64, 58, 'BELL', 'PR-A 059'),
('FS Bell 2', 87, 64, 59, 'BELL', 'PR-A 060'),
('Candy Bell', 87, 64, 60, 'BELL', 'PR-A 061'),
('FS Chime', 87, 64, 61, 'BELL', 'PR-A 062'),
('Bell Ring', 87, 64, 62, 'BELL', 'PR-A 063'),
('Tubular Bell', 87, 64, 63, 'BELL', 'PR-A 064'),
('5th Key', 87, 64, 64, 'BELL', 'PR-A 065'),
('Vibrations', 87, 64, 65, 'MALLET', 'PR-A 066'),
('FS Vibe', 87, 64, 66, 'MALLET', 'PR-A 067'),
('FS Marimba', 87, 64, 67, 'MALLET', 'PR-A 068'),
('FS Xylo', 87, 64, 68, 'MALLET', 'PR-A 069'),
('Ethno Keys', 87, 64, 69, 'MALLET', 'PR-A 070'),
('Synergy MLT', 87, 64, 70, 'MALLET', 'PR-A 071'),
('Steel Drums', 87, 64, 71, 'MALLET', 'PR-A 072'),
('Xylosizer', 87, 64, 72, 'MALLET', 'PR-A 073'),
('Toy Box', 87, 64, 73, 'MALLET', 'PR-A 074'),
('FullDraw Org', 87, 64, 74, 'ORGAN', 'PR-A 075'),
('StakDraw Org', 87, 64, 75, 'ORGAN', 'PR-A 076'),
('FullStop Org', 87, 64, 76, 'ORGAN', 'PR-A 077'),
('FS Perc Org', 87, 64, 77, 'ORGAN', 'PR-A 078'),
('Euro Organ', 87, 64, 78, 'ORGAN', 'PR-A 079'),
('Perky Organ', 87, 64, 79, 'ORGAN', 'PR-A 080'),
('LoFi PercOrg', 87, 64, 80, 'ORGAN', 'PR-A 081'),
('Rochno Org', 87, 64, 81, 'ORGAN', 'PR-A 082'),
('R&B Organ 1', 87, 64, 82, 'ORGAN', 'PR-A 083'),
('R&B Organ 2', 87, 64, 83, 'ORGAN', 'PR-A 084'),
('Zepix Organ', 87, 64, 84, 'ORGAN', 'PR-A 085'),
('Peep Durple', 87, 64, 85, 'ORGAN', 'PR-A 086'),
('FS Dist Bee', 87, 64, 86, 'ORGAN', 'PR-A 087'),
('60\'s Org 1', 87, 64, 87, 'ORGAN', 'PR-A 088'),
('60\'s Org 2', 87, 64, 88, 'ORGAN', 'PR-A 089'),
('FS SoapOpera', 87, 64, 89, 'ORGAN', 'PR-A 090'),
('Chapel Organ', 87, 64, 90, 'ORGAN', 'PR-A 091'),
('Grand Pipe', 87, 64, 91, 'ORGAN', 'PR-A 092'),
('Masked Opera', 87, 64, 92, 'ORGAN', 'PR-A 093'),
('Pipe Org/Mod', 87, 64, 93, 'ORGAN', 'PR-A 094'),
('Vodkakordion', 87, 64, 94, 'ACCORDION', 'PR-A 095'),
('Squeeze Me!', 87, 64, 95, 'ACCORDION', 'PR-A 096'),
('Guinguette', 87, 64, 96, 'ACCORDION', 'PR-A 097'),
('Harmonderca', 87, 64, 97, 'HARMONICA', 'PR-A 098'),
('BluesHrp V/S', 87, 64, 98, 'HARMONICA', 'PR-A 099'),
('Green Bullet', 87, 64, 99, 'HARMONICA', 'PR-A 100'),
('SoftNyln Gtr', 87, 64, 100, 'AC.GUITAR', 'PR-A 101'),
('FS Nylon Gt', 87, 64, 101, 'AC.GUITAR', 'PR-A 102'),
('Wet Nyln Gtr', 87, 64, 102, 'AC.GUITAR', 'PR-A 103'),
('Pre Mass Hum', 87, 64, 103, 'AC.GUITAR', 'PR-A 104'),
('Thick Steel', 87, 64, 104, 'AC.GUITAR', 'PR-A 105'),
('Uncle Martin', 87, 64, 105, 'AC.GUITAR', 'PR-A 106'),
('Wide Ac Gtr', 87, 64, 106, 'AC.GUITAR', 'PR-A 107'),
('Comp Stl Gtr', 87, 64, 107, 'AC.GUITAR', 'PR-A 108'),
('Stl Gtr Duo', 87, 64, 108, 'AC.GUITAR', 'PR-A 109'),
('FS 12str Gtr', 87, 64, 109, 'AC.GUITAR', 'PR-A 110'),
('So good !', 87, 64, 110, 'AC.GUITAR', 'PR-A 111'),
('Muted Gtr Pk', 87, 64, 111, 'EL.GUITAR', 'PR-A 112'),
('StratSeq\'nce', 87, 64, 112, 'EL.GUITAR', 'PR-A 113'),
('Fixx it', 87, 64, 113, 'EL.GUITAR', 'PR-A 114'),
('Jazz Guitar', 87, 64, 114, 'EL.GUITAR', 'PR-A 115'),
('DynoJazz Gtr', 87, 64, 115, 'EL.GUITAR', 'PR-A 116'),
('Wet TC', 87, 64, 116, 'EL.GUITAR', 'PR-A 117'),
('Clean Gtr', 87, 64, 117, 'EL.GUITAR', 'PR-A 118'),
('Crimson Gtr', 87, 64, 118, 'EL.GUITAR', 'PR-A 119'),
('Touchee Funk', 87, 64, 119, 'EL.GUITAR', 'PR-A 120'),
('Plug n\' Gig', 87, 64, 120, 'EL.GUITAR', 'PR-A 121'),
('Kinda Kurt', 87, 64, 121, 'EL.GUITAR', 'PR-A 122'),
('Nice Oct Gtr', 87, 64, 122, 'EL.GUITAR', 'PR-A 123'),
('Strat Gtr', 87, 64, 123, 'EL.GUITAR', 'PR-A 124'),
('JC Strat Bdy', 87, 64, 124, 'EL.GUITAR', 'PR-A 125'),
('Twin StratsB', 87, 64, 125, 'EL.GUITAR', 'PR-A 126'),
('BluNoteStrat', 87, 64, 126, 'EL.GUITAR', 'PR-A 127'),
('FS Funk Gtr', 87, 64, 127, 'EL.GUITAR', 'PR-A 128'),
] | gpl-3.0 |
behzadnouri/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_I.py | 10 | 1175 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import sin, sum
from .go_benchmark import Benchmark
class Infinity(Benchmark):
r"""
Infinity objective function.
This class defines the Infinity [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Infinity}}(x) = \sum_{i=1}^{n} x_i^{6}
\left [ \sin\left ( \frac{1}{x_i} \right ) + 2 \right ]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-1.0] * self.N, [1.0] * self.N)
self.global_optimum = [[1e-16 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 6.0 * (sin(1.0 / x) + 2.0))
| bsd-3-clause |
Bekt/tweetement | src/service.py | 1 | 3578 | import logging
import string
import tweepy
from credentials import (consumer_key, consumer_secret)
from models import Stopword
from collections import Counter
class Service(object):
# Map uppercase to lowercase, and deletes any punctuation.
trans = {ord(string.ascii_uppercase[i]): ord(string.ascii_lowercase[i])
for i in range(26)}
trans.update({ord(c): None for c in string.punctuation})
def __init__(self, access_token='', access_token_secret=''):
self._tw_api = None
self._access_token = access_token
self._access_token_secret = access_token_secret
@property
def tw_api(self):
"""Tweepy API client."""
if self._tw_api is None:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(self._access_token, self._access_token_secret)
self._tw_api = tweepy.API(auth)
return self._tw_api
def fetch(self, query, limit=100):
"""Fetches search results for the given query."""
# Cursor doesn't work with dev_appserver.py :(
# return list(tweepy.Cursor(self.tw_api.search, q=query, lang='en',
# result_type='popular').items(limit))
query += ' -filter:retweets'
# Try to get as many 'popular' posts as possible.
# Twitter limits this really hard.
res_type = 'popular'
last_id = -1
tweets = []
while len(tweets) < limit:
count = limit - len(tweets)
try:
t = self.tw_api.search(q=query, count=count, result_type=res_type,
lang='en', max_id=str(last_id - 1))
if len(t) < 3 and res_type == 'popular':
tweets.extend(t)
res_type = 'mixed'
last_id = -1
continue
if len(t) < 3 and res_type == 'mixed':
tweets.extend(t)
break
tweets.extend(t)
last_id = t[-1].id
except tweepy.TweepError as e:
logging.exception(e)
break
return tweets
@staticmethod
def top_hashtags(tweets, limit=5):
"""Extracts most frequent hashtags from given tweets."""
hashtags = Counter()
for t in tweets:
for h in t.entities['hashtags']:
if 'text' in h:
hashtags[h['text'].lower()] += 1
top = hashtags.most_common(limit)
return ['#' + t[0] for t in top]
@staticmethod
def top_keywords(tweets, limit=5, exclude=set()):
"""Extracts most frequent keywords from given tweets."""
exc = set()
for w in exclude:
ok, text = _token_okay(w)
if ok:
exc.add(text)
words = Counter()
for t in tweets:
for token in set(t.text.split()):
ok, text = _token_okay(token)
if ok and text not in exc:
words[text] += 1
top = words.most_common(limit)
return [t[0] for t in top]
def _token_okay(text):
"""Decides whether the given token is a valid expandable query."""
text = ''.join(c for c in text if 127 > ord(c) > 31)
try:
text = text.translate(Service.trans)
except TypeError:
return False, text
if (len(text) < 2 or text.isdigit()
or Stopword.gql('WHERE token = :1', text).get() is not None):
return False, text
return True, text
| mit |
tersmitten/ansible | lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration.py | 13 | 7982 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqlconfiguration
version_added: "2.8"
short_description: Manage Configuration instance.
description:
- Create, update and delete instance of Configuration.
options:
resource_group:
description:
- The name of the resource group that contains the resource.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the server configuration.
required: True
value:
description:
- Value of the configuration.
state:
description:
- Assert the state of the MySQL configuration. Use C(present) to update setting, or
C(absent) to reset to default value.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Update SQL Server setting
azure_rm_mysqlconfiguration:
resource_group: myResourceGroup
server_name: myServer
name: event_scheduler
value: "ON"
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/myServer/confi
gurations/event_scheduler"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMMySqlConfiguration(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
value=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.value = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMMySqlConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
old_response = self.get_configuration()
if not old_response:
self.log("Configuration instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Configuration instance already exists")
if self.state == 'absent' and old_response['source'] == 'user-override':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Configuration instance has to be deleted or may be updated")
if self.value != old_response.get('value'):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Configuration instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_configuration()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Configuration instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_configuration()
else:
self.log("Configuration instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_configuration(self):
self.log("Creating / Updating the Configuration instance {0}".format(self.name))
try:
response = self.mysql_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
value=self.value,
source='user-override')
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Configuration instance.')
self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
return response.as_dict()
def delete_configuration(self):
self.log("Deleting the Configuration instance {0}".format(self.name))
try:
response = self.mysql_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
source='system-default')
except CloudError as e:
self.log('Error attempting to delete the Configuration instance.')
self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
return True
def get_configuration(self):
self.log("Checking if the Configuration instance {0} is present".format(self.name))
found = False
try:
response = self.mysql_client.configurations.get(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Configuration instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Configuration instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMMySqlConfiguration()
if __name__ == '__main__':
main()
| gpl-3.0 |
ptitjano/bokeh | examples/compat/mpl_contour.py | 7 | 1028 | # demo inspired by: http://matplotlib.org/examples/pylab_examples/contour_demo.html
from bokeh import mpl
from bokeh.plotting import output_file, show
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
plt.figure()
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('Simplest default with labels')
output_file("mpl_contour.html", title="mpl_contour.py example")
show(mpl.to_bokeh())
| bsd-3-clause |
StefanRijnhart/OpenUpgrade | addons/l10n_cn/__init__.py | 102 | 1055 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
srm912/servo | tests/wpt/css-tests/tools/manifest/update.py | 230 | 3336 | #!/usr/bin/env python
import argparse
import imp
import os
import sys
import manifest
import vcs
from log import get_logger
from tree import GitTree, NoVCSTree
here = os.path.dirname(__file__)
localpaths = imp.load_source("localpaths", os.path.abspath(os.path.join(here, os.pardir, "localpaths.py")))
def update(tests_root, url_base, manifest, ignore_local=False):
if vcs.is_git_repo(tests_root):
tests_tree = GitTree(tests_root, url_base)
remove_missing_local = False
else:
tests_tree = NoVCSTree(tests_root, url_base)
remove_missing_local = not ignore_local
if not ignore_local:
local_changes = tests_tree.local_changes()
else:
local_changes = None
manifest.update(tests_root,
url_base,
tests_tree.current_rev(),
tests_tree.committed_changes(manifest.rev),
local_changes,
remove_missing_local=remove_missing_local)
def update_from_cli(**kwargs):
tests_root = kwargs["tests_root"]
path = kwargs["path"]
assert tests_root is not None
m = None
logger = get_logger()
if not kwargs.get("rebuild", False):
try:
m = manifest.load(tests_root, path)
except manifest.ManifestVersionMismatch:
logger.info("Manifest version changed, rebuilding")
m = None
else:
logger.info("Updating manifest")
if m is None:
m = manifest.Manifest(None)
update(tests_root,
kwargs["url_base"],
m,
ignore_local=kwargs.get("ignore_local", False))
manifest.write(m, path)
def abs_path(path):
return os.path.abspath(os.path.expanduser(path))
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--path", type=abs_path, help="Path to manifest file.")
parser.add_argument(
"--tests-root", type=abs_path, help="Path to root of tests.")
parser.add_argument(
"-r", "--rebuild", action="store_true", default=False,
help="Force a full rebuild of the manifest.")
parser.add_argument(
"--ignore-local", action="store_true", default=False,
help="Don't include uncommitted local changes in the manifest.")
parser.add_argument(
"--url-base", action="store", default="/",
help="Base url to use as the mount point for tests in this manifest.")
return parser
def find_top_repo():
path = here
rv = None
while path != "/":
if vcs.is_git_repo(path):
rv = path
path = os.path.abspath(os.path.join(path, os.pardir))
return rv
def main(default_tests_root=None):
opts = create_parser().parse_args()
if opts.tests_root is None:
tests_root = None
if default_tests_root is not None:
tests_root = default_tests_root
else:
tests_root = find_top_repo()
if tests_root is None:
print >> sys.stderr, """No git repo found; could not determine test root.
Run again with --test-root"""
sys.exit(1)
opts.tests_root = tests_root
if opts.path is None:
opts.path = os.path.join(opts.tests_root, "MANIFEST.json")
update_from_cli(**vars(opts))
if __name__ == "__main__":
main()
| mpl-2.0 |
mgamer/gyp | test/lib/TestWin.py | 90 | 3168 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestWin.py: a collection of helpers for testing on Windows.
"""
import errno
import os
import re
import sys
import subprocess
class Registry(object):
def _QueryBase(self, sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def Query(self, key, value=None):
r"""Use reg.exe to read a particular key through _QueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = self._QueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = self._QueryBase('System32', key, value)
else:
raise
return text
def GetValue(self, key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = self.Query(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def KeyExists(self, key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not self.Query(key):
return False
return True
| bsd-3-clause |
dengit/shadowsocks | shadowsocks/lru_cache.py | 11 | 4274 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrency, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.MutableMapping):
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout
self.close_callback = close_callback
self._store = {}
self._time_to_keys = collections.defaultdict(list)
self._keys_to_last_time = {}
self._last_visits = collections.deque()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0
while len(self._last_visits) > 0:
least = self._last_visits[0]
if now - least <= self.timeout:
break
if self.close_callback is not None:
for key in self._time_to_keys[least]:
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key]
self.close_callback(value)
for key in self._time_to_keys[least]:
self._last_visits.popleft()
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
if __name__ == '__main__':
test()
| mit |
echodaemon/Malfunction | malfunction/disassembler.py | 3 | 2079 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# disassembler.py
#
# Authors: James Brahm, Matthew Rogers, Morgan Wagner, Jeramy Lochner,
# Donte Brock
# -----------------------------------------------------------------------
# Copyright 2015 Dynetics, Inc.
#
# This file is a part of Malfunction
#
# Malfunction is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Malfunction is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------
import os
import subprocess
def get_data(binary):
""" Gets the functions from a given file
Uses radare2 to get function start addresses and length
Then uses the lengths and starts to snip the functions
from the original file """
print("Disassembling " + binary + "...")
functions = []
# Open the binary
f = open(binary, "rb")
cmd = "r2 " + binary + " -c af -c ?p -c afl -q"
DEVNULL = open(os.devnull, "w")
output = subprocess.check_output(cmd, shell=True, stderr=DEVNULL)
output = output.splitlines()
pma = int(output.pop(0), 16)
flist = []
for line in output:
flist.append(line.decode("utf-8").split(" "))
offset = int(flist[0][0], 16) - pma
# Make a list of functions
for e in flist:
size = int(e[1])
if size > 20:
f.seek(int(e[0], 16) - offset, 0)
buf = f.read(size)
functions.append([buf, size])
print("Found {0} functions".format(len(functions)))
return functions
| lgpl-2.1 |
mcanthony/rethinkdb | external/v8_3.30.33.16/build/gyp/test/hard_dependency/gyptest-no-exported-hard-dependency.py | 350 | 1226 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that a hard_dependency that is not exported is not pulled in as a
dependency for a target if the target does not explicitly specify a dependency
and none of its dependencies export the hard_dependency.
"""
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'dump_dependency_json':
test.skip_test('Skipping test; dependency JSON does not adjust ' \
'static libaries.\n')
test.run_gyp('hard_dependency.gyp', chdir='src')
chdir = 'relocate/src'
test.relocate('src', chdir)
test.build('hard_dependency.gyp', 'd', chdir=chdir)
# Because 'c' does not export a hard_dependency, only the target 'd' should
# be built. This is because the 'd' target does not need the generated headers
# in order to be compiled.
test.built_file_must_not_exist('a', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('c', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_exist('d', type=test.STATIC_LIB, chdir=chdir)
test.pass_test()
| agpl-3.0 |
mahmutf/dupeguru | qt/problem_dialog.py | 3 | 2842 | # Created By: Virgil Dupras
# Created On: 2010-04-12
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QDialog,
QVBoxLayout,
QHBoxLayout,
QPushButton,
QSpacerItem,
QSizePolicy,
QLabel,
QTableView,
QAbstractItemView,
)
from hscommon.trans import trget
from .problem_table import ProblemTable
tr = trget("ui")
class ProblemDialog(QDialog):
def __init__(self, parent, model, **kwargs):
flags = Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowSystemMenuHint
super().__init__(parent, flags, **kwargs)
self._setupUi()
self.model = model
self.model.view = self
self.table = ProblemTable(self.model.problem_table, view=self.tableView)
self.revealButton.clicked.connect(self.model.reveal_selected_dupe)
self.closeButton.clicked.connect(self.accept)
def _setupUi(self):
self.setWindowTitle(tr("Problems!"))
self.resize(413, 323)
self.verticalLayout = QVBoxLayout(self)
self.label = QLabel(self)
msg = tr(
"There were problems processing some (or all) of the files. The cause of "
"these problems are described in the table below. Those files were not "
"removed from your results."
)
self.label.setText(msg)
self.label.setWordWrap(True)
self.verticalLayout.addWidget(self.label)
self.tableView = QTableView(self)
self.tableView.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tableView.setSelectionMode(QAbstractItemView.SingleSelection)
self.tableView.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tableView.setShowGrid(False)
self.tableView.horizontalHeader().setStretchLastSection(True)
self.tableView.verticalHeader().setDefaultSectionSize(18)
self.tableView.verticalHeader().setHighlightSections(False)
self.verticalLayout.addWidget(self.tableView)
self.horizontalLayout = QHBoxLayout()
self.revealButton = QPushButton(self)
self.revealButton.setText(tr("Reveal Selected"))
self.horizontalLayout.addWidget(self.revealButton)
spacerItem = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.closeButton = QPushButton(self)
self.closeButton.setText(tr("Close"))
self.closeButton.setDefault(True)
self.horizontalLayout.addWidget(self.closeButton)
self.verticalLayout.addLayout(self.horizontalLayout)
| gpl-3.0 |
cryptickp/heat | heat/tests/ceilometer/test_gnocchi_alarm.py | 4 | 14472 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from ceilometerclient import exc as ceilometerclient_exc
import mock
import mox
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import ceilometer
from heat.engine.resources.openstack.ceilometer import gnocchi_alarm as gnocchi
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
gnocchi_resources_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Resources Alarm Test
resources:
GnoResAlarm:
type: OS::Ceilometer::GnocchiResourcesAlarm
properties:
description: Do stuff with gnocchi
metric: cpu_util
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
alarm_actions: []
resource_type: instance
resource_id: 5a517ceb-b068-4aca-9eb9-3e4eb9b90d9a
comparison_operator: gt
'''
gnocchi_aggregation_by_metrics_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Aggregation by Metrics Alarm Test
resources:
GnoAggregationByMetricsAlarm:
type: OS::Ceilometer::GnocchiAggregationByMetricsAlarm
properties:
description: Do stuff with gnocchi metrics
metrics: ["911fce07-e0d7-4210-8c8c-4a9d811fcabc",
"2543d435-fe93-4443-9351-fb0156930f94"]
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
alarm_actions: []
comparison_operator: gt
'''
gnocchi_aggregation_by_resources_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Aggregation by Resources Alarm Test
resources:
GnoAggregationByResourcesAlarm:
type: OS::Ceilometer::GnocchiAggregationByResourcesAlarm
properties:
description: Do stuff with gnocchi aggregation by resource
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
metric: cpu_util
alarm_actions: []
resource_type: instance
query: '{"=": {"server_group": "my_autoscaling_group"}}'
comparison_operator: gt
'''
class FakeCeilometerAlarm(object):
alarm_id = 'foo'
def __init__(self):
self.to_dict = lambda: {'attr': 'val'}
class GnocchiResourcesAlarmTest(common.HeatTestCase):
def setUp(self):
super(GnocchiResourcesAlarmTest, self).setUp()
self.fc = mock.Mock()
def create_alarm(self):
self.m.StubOutWithMock(ceilometer.CeilometerClientPlugin, '_create')
ceilometer.CeilometerClientPlugin._create().AndReturn(
self.fc)
self.m.StubOutWithMock(self.fc.alarms, 'create')
self.fc.alarms.create(
alarm_actions=[],
description=u'Do stuff with gnocchi',
enabled=True,
insufficient_data_actions=None,
ok_actions=None,
name=mox.IgnoreArg(), type='gnocchi_resources_threshold',
repeat_actions=True,
gnocchi_resources_threshold_rule={
"metric": "cpu_util",
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"resource_type": "instance",
"resource_id": "5a517ceb-b068-4aca-9eb9-3e4eb9b90d9a",
"comparison_operator": "gt",
},
time_constraints=[],
severity='low',
).AndReturn(FakeCeilometerAlarm())
snippet = template_format.parse(gnocchi_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.CeilometerGnocchiResourcesAlarm(
'GnoResAlarm', resource_defns['GnoResAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(
alarm_id='foo',
gnocchi_resources_threshold_rule={
'resource_id': 'd3d6c642-921e-4fc2-9c5f-15d9a5afb598'})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['resource_id'] = (
'd3d6c642-921e-4fc2-9c5f-15d9a5afb598')
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(gnocchi_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoResAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarms.get.return_value = mock_alarm
return res
def test_create(self):
rsrc = self.create_alarm()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('foo', rsrc.resource_id)
self.m.VerifyAll()
def test_suspend(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(alarm_id='foo', enabled=False)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_resume(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(alarm_id='foo', enabled=True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'delete')
self.fc.alarms.delete('foo')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_not_found(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'delete')
self.fc.alarms.delete('foo').AndRaise(
ceilometerclient_exc.HTTPNotFound())
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_check(self):
res = self._prepare_check_resource()
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
def test_check_failure(self):
res = self._prepare_check_resource()
res.client().alarms.get.side_effect = Exception('Boom')
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertEqual((res.CHECK, res.FAILED), res.state)
self.assertIn('Boom', res.status_reason)
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarms.create.return_value = mock.MagicMock(
alarm_id='2')
res.client().alarms.get.return_value = FakeCeilometerAlarm()
scheduler.TaskRunner(res.create)()
self.assertEqual({'attr': 'val'}, res.FnGetAtt('show'))
class GnocchiAggregationByMetricsAlarmTest(GnocchiResourcesAlarmTest):
def create_alarm(self):
self.m.StubOutWithMock(ceilometer.CeilometerClientPlugin, '_create')
ceilometer.CeilometerClientPlugin._create().AndReturn(
self.fc)
self.m.StubOutWithMock(self.fc.alarms, 'create')
self.fc.alarms.create(
alarm_actions=[],
description=u'Do stuff with gnocchi metrics',
enabled=True,
insufficient_data_actions=None,
ok_actions=None,
name=mox.IgnoreArg(),
type='gnocchi_aggregation_by_metrics_threshold',
repeat_actions=True,
gnocchi_aggregation_by_metrics_threshold_rule={
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"comparison_operator": "gt",
"metrics": ["911fce07-e0d7-4210-8c8c-4a9d811fcabc",
"2543d435-fe93-4443-9351-fb0156930f94"],
},
time_constraints=[],
severity='low',
).AndReturn(FakeCeilometerAlarm())
snippet = template_format.parse(
gnocchi_aggregation_by_metrics_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.CeilometerGnocchiAggregationByMetricsAlarm(
'GnoAggregationByMetricsAlarm',
resource_defns['GnoAggregationByMetricsAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(
alarm_id='foo',
gnocchi_aggregation_by_metrics_threshold_rule={
'metrics': ['d3d6c642-921e-4fc2-9c5f-15d9a5afb598',
'bc60f822-18a0-4a0c-94e7-94c554b00901']})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['metrics'] = [
'd3d6c642-921e-4fc2-9c5f-15d9a5afb598',
'bc60f822-18a0-4a0c-94e7-94c554b00901']
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(
gnocchi_aggregation_by_metrics_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoAggregationByMetricsAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarms.get.return_value = mock_alarm
return res
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarms.create.return_value = mock.MagicMock(
alarm_id='2')
res.client().alarms.get.return_value = FakeCeilometerAlarm()
scheduler.TaskRunner(res.create)()
self.assertEqual({'attr': 'val'}, res.FnGetAtt('show'))
class GnocchiAggregationByResourcesAlarmTest(GnocchiResourcesAlarmTest):
def create_alarm(self):
self.m.StubOutWithMock(ceilometer.CeilometerClientPlugin, '_create')
ceilometer.CeilometerClientPlugin._create().AndReturn(
self.fc)
self.m.StubOutWithMock(self.fc.alarms, 'create')
self.fc.alarms.create(
alarm_actions=[],
description=u'Do stuff with gnocchi aggregation by resource',
enabled=True,
insufficient_data_actions=None,
ok_actions=None,
name=mox.IgnoreArg(),
type='gnocchi_aggregation_by_resources_threshold',
repeat_actions=True,
gnocchi_aggregation_by_resources_threshold_rule={
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"comparison_operator": "gt",
"metric": "cpu_util",
"resource_type": "instance",
"query": '{"=": {"server_group": "my_autoscaling_group"}}',
},
time_constraints=[],
severity='low',
).AndReturn(FakeCeilometerAlarm())
snippet = template_format.parse(
gnocchi_aggregation_by_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.CeilometerGnocchiAggregationByResourcesAlarm(
'GnoAggregationByResourcesAlarm',
resource_defns['GnoAggregationByResourcesAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(
alarm_id='foo',
gnocchi_aggregation_by_resources_threshold_rule={
'query': '{"=": {"server_group": "my_new_group"}}'})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['query'] = (
'{"=": {"server_group": "my_new_group"}}')
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(
gnocchi_aggregation_by_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoAggregationByResourcesAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarms.get.return_value = mock_alarm
return res
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarms.create.return_value = mock.MagicMock(
alarm_id='2')
res.client().alarms.get.return_value = FakeCeilometerAlarm()
scheduler.TaskRunner(res.create)()
self.assertEqual({'attr': 'val'}, res.FnGetAtt('show'))
| apache-2.0 |
akashlevy/Yaklient | yaklient/objects/message.py | 3 | 1985 | # -*- coding: utf-8 -*-
"""Abstract class for a post on Yik Yak"""
from abc import abstractmethod
from yaklient import helper
class Message(object):
"""An abstract class for a postable object on Yik Yak (Comment or Yak)"""
def __init__(self, raw, user):
"""Initialize message from raw JSON dict and user"""
self.delivery_id = raw["deliveryID"]
self.liked = raw["liked"]
self.likes = raw["numberOfLikes"]
self.message_id = helper.backslash_remove(raw["messageID"])
self.poster_id = raw["posterID"]
self.time = raw["time"]
self.user = user
try:
self.reyaked = raw["reyaked"]
except KeyError:
self.reyaked = None
@abstractmethod
def __str__(self):
"""Return message as string"""
pass
def delete(self):
"""Delete message from Yik Yak. Return True if successful, False if
unsuccessful"""
return self.user.delete(self)
def downvote(self):
"""Downvote the message. Return True if successful, False if
unsuccessful"""
if self.user.downvote(self):
self.likes -= 1
return True
else:
return False
def get_comments(self):
"""Get comments on the message"""
return self.user.get_comments(self)
def post_comment(self, comment):
"""Post a comment on the message. Return True if successful, False if
unsuccessful"""
return self.user.post_comment(comment, self.message_id)
def report(self):
"""Report a message to Yik Yak"""
self.user.report(self)
@abstractmethod
def update(self):
"""Update properties from Yik Yak"""
pass
def upvote(self):
"""Upvote the message. Return True if successful, False if
unsuccessful"""
if self.user.upvote(self):
self.likes += 1
return True
else:
return False
| mit |
nbessi/pyhiccup | pyhiccup/page.py | 1 | 3037 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2014
# Original concept by James Reeves
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License 3
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from __future__ import unicode_literals
DOC_TYPES = {
'html4': "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01//EN\" "
"\"http://www.w3.org/TR/html4/strict.dtd\">\n",
'xhtml-strict': "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 ""Strict//EN\" "
"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n",
'xhtml-transitional': "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" "
"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n",
'html5': "<!DOCTYPE html>\n",
}
DEFAULT_XMLNS = 'http://www.w3.org/1999/xhtml'
XMl_DECLARATION = '<?xml version="1.0" encoding="UTF-8"?>'
def get_doc_type(doc_type):
"""Return a DOCTYPE declaration
:param doc_type: doc type string must be in ``page.DOC_TYPES``
:type doc_type: str
:return: DOCTYPE declaration
:rtype: str
"""
if doc_type not in DOC_TYPES:
raise ValueError(
'Invalid DOCTYPE %s available values are %s' %
(doc_type, DOC_TYPES.keys())
)
return DOC_TYPES[doc_type]
def build_html_enclosing_tag(etype, **kwargs):
"""Generate html tag list representation
:param etype: html doc type `html5, html4, xhtml-strict,
xhtml-transitional`
:type etype: str
:param kwargs: dict of attribute for HTML tag will override defaults
:type kwargs: dict
:return: html tag list representation ['html', {'xmlns': ...}]
:rtype: dict
"""
attrs = {}
if etype in DOC_TYPES:
attrs['lang'] = 'en'
attrs['dir'] = 'rtl'
attrs['xml:lang'] = 'en'
if 'xhtml' in etype:
attrs[u'xmlns'] = DEFAULT_XMLNS
attrs.update(kwargs)
return ['html', attrs]
def build_xml_enclosing_tag(etype, **kwargs):
"""Generate XML root tag list representation
:param etype: root tag name
:type etype: str
:param kwargs: dict of attribute for root tag
:type kwargs: dict
:return: root xml tag list representation ['atag', {'attr': ...}]
:rtype: dict
"""
return [etype, kwargs]
| agpl-3.0 |
docusign/docusign-python-client | docusign_esign/models/external_file.py | 1 | 7550 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ExternalFile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'_date': 'str',
'id': 'str',
'img': 'str',
'name': 'str',
'size': 'str',
'supported': 'str',
'type': 'str',
'uri': 'str'
}
attribute_map = {
'_date': 'date',
'id': 'id',
'img': 'img',
'name': 'name',
'size': 'size',
'supported': 'supported',
'type': 'type',
'uri': 'uri'
}
def __init__(self, _date=None, id=None, img=None, name=None, size=None, supported=None, type=None, uri=None): # noqa: E501
"""ExternalFile - a model defined in Swagger""" # noqa: E501
self.__date = None
self._id = None
self._img = None
self._name = None
self._size = None
self._supported = None
self._type = None
self._uri = None
self.discriminator = None
if _date is not None:
self._date = _date
if id is not None:
self.id = id
if img is not None:
self.img = img
if name is not None:
self.name = name
if size is not None:
self.size = size
if supported is not None:
self.supported = supported
if type is not None:
self.type = type
if uri is not None:
self.uri = uri
@property
def _date(self):
"""Gets the _date of this ExternalFile. # noqa: E501
# noqa: E501
:return: The _date of this ExternalFile. # noqa: E501
:rtype: str
"""
return self.__date
@_date.setter
def _date(self, _date):
"""Sets the _date of this ExternalFile.
# noqa: E501
:param _date: The _date of this ExternalFile. # noqa: E501
:type: str
"""
self.__date = _date
@property
def id(self):
"""Gets the id of this ExternalFile. # noqa: E501
# noqa: E501
:return: The id of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ExternalFile.
# noqa: E501
:param id: The id of this ExternalFile. # noqa: E501
:type: str
"""
self._id = id
@property
def img(self):
"""Gets the img of this ExternalFile. # noqa: E501
# noqa: E501
:return: The img of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._img
@img.setter
def img(self, img):
"""Sets the img of this ExternalFile.
# noqa: E501
:param img: The img of this ExternalFile. # noqa: E501
:type: str
"""
self._img = img
@property
def name(self):
"""Gets the name of this ExternalFile. # noqa: E501
# noqa: E501
:return: The name of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ExternalFile.
# noqa: E501
:param name: The name of this ExternalFile. # noqa: E501
:type: str
"""
self._name = name
@property
def size(self):
"""Gets the size of this ExternalFile. # noqa: E501
Reserved: TBD # noqa: E501
:return: The size of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ExternalFile.
Reserved: TBD # noqa: E501
:param size: The size of this ExternalFile. # noqa: E501
:type: str
"""
self._size = size
@property
def supported(self):
"""Gets the supported of this ExternalFile. # noqa: E501
# noqa: E501
:return: The supported of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._supported
@supported.setter
def supported(self, supported):
"""Sets the supported of this ExternalFile.
# noqa: E501
:param supported: The supported of this ExternalFile. # noqa: E501
:type: str
"""
self._supported = supported
@property
def type(self):
"""Gets the type of this ExternalFile. # noqa: E501
# noqa: E501
:return: The type of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ExternalFile.
# noqa: E501
:param type: The type of this ExternalFile. # noqa: E501
:type: str
"""
self._type = type
@property
def uri(self):
"""Gets the uri of this ExternalFile. # noqa: E501
# noqa: E501
:return: The uri of this ExternalFile. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this ExternalFile.
# noqa: E501
:param uri: The uri of this ExternalFile. # noqa: E501
:type: str
"""
self._uri = uri
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExternalFile, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExternalFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit |
ravibhure/ansible | lib/ansible/modules/windows/win_tempfile.py | 47 | 2164 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_tempfile
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Creates temporary files and directories.
description:
- Creates temporary files and directories.
- For non-Windows targets, please use the M(tempfile) module instead.
options:
state:
description:
- Whether to create file or directory.
choices: [ file, directory ]
default: file
path:
description:
- Location where temporary file or directory should be created.
- If path is not specified default system temporary directory (%TEMP%) will be used.
default: '%TEMP%'
prefix:
description:
- Prefix of file/directory name created by module.
default: ansible.
suffix:
description:
- Suffix of file/directory name created by module.
default: ''
notes:
- For non-Windows targets, please use the M(tempfile) module instead.
'''
EXAMPLES = r"""
- name: Create temporary build directory
win_tempfile:
state: directory
suffix: build
- name: Create temporary file
win_tempfile:
state: file
suffix: temp
"""
RETURN = r'''
path:
description: Path to created file or directory
returned: success
type: string
sample: C:\Users\Administrator\AppData\Local\Temp\ansible.bMlvdk
'''
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.